prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
concat,
date_range,
)
import pandas._testing as tm
class TestEmptyConcat:
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
{"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = | concat([s1, s2], axis=0) | pandas.concat |
import sys
import pandas as pd
from datetime import timedelta, datetime, date, time
import API
# if __name__ == '__main__':
# main(sys.argv[1])
# Global constants
DATA_TYPE = 'Adj Close'
DAYS_LOOK_BACK = 5110 # 12*252 years expresed in days
HOW_RECENT = 0
MAIN_FOLDER = 'C:/Users/champ/Python_proj/base_financial_repo/'
ECONO_DATA_REPO = 'Econometric_data_repo/'
META_FILE_FOLDER = 'meta_data/'
GIT_IGNORE = 'C:/Users/champ/Python_proj/git_ignore/'
SECURITIES_FILE = 'Securities research.csv'
HOLIDAYS_FILE = 'Federal_holidays.csv'
SECURITY_ARRAY = ['sectors','inx']
FRED_API = FRED_API_KEY # Your API goes here
FIGURE_WIDTH = 12
FIGURE_HEIGHT = 6
LINE_WIDTH = 2
GRAPH_FONT_SIZE = 12
INX = 'spy'
# Global variables
#end_date = pd.Timestamp('today')
end_date = pd.to_datetime('today')
#end_date = pd.to_datetime('now').tz_localize("GMT").tz_convert('America/Los_Angeles')
end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0)
start_date = end_date - timedelta(days=DAYS_LOOK_BACK)
fred = Fred(FRED_API)
META_FILE_FOLDER_LOCATION = MAIN_FOLDER + ECONO_DATA_REPO + META_FILE_FOLDER
securities_file_location = META_FILE_FOLDER_LOCATION + SECURITIES_FILE
Securities_file_import_from_csv = | pd.read_csv(securities_file_location, sep=';') | pandas.read_csv |
import gc
import logging
import traceback
from collections import defaultdict
from datetime import datetime, timedelta
from multiprocessing import Process, Queue
import numpy as np
import pandas as pd
import xarray as xr
from typhon.geodesy import great_circle_distance
from typhon.geographical import GeoIndex
from typhon.utils import add_xarray_groups, get_xarray_groups
from typhon.utils.timeutils import to_datetime, to_timedelta, Timer
__all__ = [
"Collocator",
"check_collocation_data"
]
logger = logging.getLogger(__name__)
# The names for the processes. This started as an easter egg, but it actually
# helps to identify different processes during debugging.
PROCESS_NAMES = [
'Newton', 'Einstein', 'Bohr', 'Darwin', 'Pasteur', 'Freud', 'Galilei',
'Lavoisier', 'Kepler', 'Copernicus', 'Faraday', 'Maxwell', 'Bernard',
'Boas', 'Heisenberg', 'Pauling', 'Virchow', 'Schrodinger', 'Rutherford',
'Dirac', 'Vesalius', 'Brahe', 'Buffon', 'Boltzmann', 'Planck', 'Curie',
'Herschel', 'Lyell', 'Laplace', 'Hubble', 'Thomson', 'Born', 'Crick',
'Fermi', 'Euler', 'Liebig', 'Eddington', 'Harvey', 'Malpighi', 'Huygens',
'Gauss', 'Haller', 'Kekule', 'Koch', 'Gell-Mann', 'Fischer', 'Mendeleev',
'Glashow', 'Watson', 'Bardeen', 'Neumann', 'Feynman', 'Wegener', 'Hawking',
'Leeuwenhoek', 'Laue', 'Kirchhoff', 'Bethe', 'Euclid', 'Mendel', 'Onnes',
'Morgan', 'Helmholtz', 'Ehrlich', 'Mayr', 'Sherrington', 'Dobzhansky',
'Delbruck', 'Lamarck', 'Bayliss', 'Chomsky', 'Sanger', 'Lucretius',
'Dalton', 'Broglie', 'Linnaeus', 'Piaget', 'Simpson', 'Levi-Strauss',
'Margulis', 'Landsteiner', 'Lorenz', 'Wilson', 'Hopkins', 'Elion', 'Selye',
'Oppenheimer', 'Teller', 'Libby', 'Haeckel', 'Salk', 'Kraepelin',
'Lysenko', 'Galton', 'Binet', 'Kinsey', 'Fleming', 'Skinner', 'Wundt',
'Archimedes'
]
class ProcessCrashed(Exception):
"""Helper exception for crashed processes"""
pass
class Collocator:
def __init__(
self, threads=None, name=None, #log_dir=None
):
"""Initialize a collocator object that can find collocations
Args:
threads: Finding collocations can be parallelized in threads. Give
here the maximum number of threads that you want to use. Which
number of threads is the best, may be machine-dependent. So
this is a parameter that you can use to fine-tune the
performance. Note: Not yet implemented due to GIL usage of
sklearn BallTree.
name: The name of this collocator, will be used in log statements.
"""
self.empty = None # xr.Dataset()
self.index = None
self.index_with_primary = False
self.threads = threads
# These optimization parameters will be overwritten in collocate
self.bin_factor = None
self.magnitude_factor = None
self.tunnel_limit = None
self.leaf_size = None
self.name = name if name is not None else "Collocator"
# If no collocations are found, this will be returned. We need empty
# arrays to concatenate the results without problems:
@property
def no_pairs(self):
return np.array([[], []])
@property
def no_intervals(self):
return np.array([], dtype='timedelta64[ns]')
@property
def no_distances(self):
return np.array([])
def __call__(self, *args, **kwargs):
return self.collocate(*args, **kwargs)
def _debug(self, msg):
logger.debug(f"[{self.name}] {msg}")
def _info(self, msg):
logger.info(f"[{self.name}] {msg}")
def _error(self, msg):
logger.error(f"[{self.name}] {msg}")
def collocate_filesets(
self, filesets, start=None, end=None, processes=None, output=None,
bundle=None, skip_file_errors=False, post_processor=None,
post_processor_kwargs=None, **kwargs
):
"""Find collocation between the data of two filesets
If you want to save the collocations directly to disk, it may be easier
to use :meth:`~typhon.collocations.Collocations.search` directly.
Args:
filesets: A list of two :class:`FileSet` objects, the primary and
the secondary fileset. Can be also
:class:`~typhon.collocations.common.Collocations` objects with
`read_mode=collapse`. The order of the filesets is irrelevant
for the results of the collocation search but files from the
secondary fileset might be read multiple times if using
parallel processing (`processes` is greater than one). The
number of output files could be different (see also the option
`bundle`).
start: Start date either as datetime object or as string
("YYYY-MM-DD hh:mm:ss"). Year, month and day are required.
Hours, minutes and seconds are optional. If not given, it is
datetime.min per default.
end: End date. Same format as "start". If not given, it is
datetime.max per default.
processes: Collocating can be parallelized which improves the
performance significantly. Pass here the number of processes to
use.
output: Fileset object where the collocated data should be stored.
bundle: Set this to *primary* if you want to bundle the output
files by their collocated primaries, i.e. there will be only
one output file per primary. *daily* is also possible, then all
files from one day are bundled together. Per default, all
collocations for each file match will be saved separately.
This might lead to a high number of output files.
Note: *daily* means one process bundles all collocations from
one day into one output file. If using multiple processes, this
could still produce several daily output files per day.
skip_file_errors: If this is *True* and a file could not be read,
the file and its match will be skipped and a warning will be
printed. Otheriwse the program will stop (default).
post_processor: A function for post-processing the collocated data
before saving it to `output`. Must accept two parameters: a
xarray.Dataset with the collocated data and a dictionary with
the path attributes from the collocated files.
post_processor_kwargs: A dictionary with keyword arguments that
should be passed to `post_processor`.
**kwargs: Further keyword arguments that are allowed for
:meth:`collocate`.
Yields:
A xarray.Dataset with the collocated data if `output` is not set.
If `output` is set to a FileSet-like object, only the filename of
the stored collocations is yielded. The results are not ordered if
you use more than one process. For more information about the
yielded xarray.Dataset have a look at :meth:`collocate`.
Examples:
.. code-block:: python
"""
timer = Timer().start()
if len(filesets) != 2:
raise ValueError("Only collocating two filesets at once is allowed"
"at the moment!")
# Check the max_interval argument because we need it later
max_interval = kwargs.get("max_interval", None)
if max_interval is None:
raise ValueError("Collocating filesets without max_interval is"
" not yet implemented!")
if start is None:
start = datetime.min
else:
start = to_datetime(start)
if end is None:
end = datetime.max
else:
end = to_datetime(end)
self._info(f"Collocate from {start} to {end}")
# Find the files from both filesets which overlap tempoerally.
matches = list(filesets[0].match(
filesets[1], start=start, end=end, max_interval=max_interval,
))
if processes is None:
processes = 1
# Make sure that there are never more processes than matches
processes = min(processes, len(matches))
total_matches = sum(len(match[1]) for match in matches)
self._info(f"using {processes} process(es) on {total_matches} matches")
# MAGIC with processes
# Each process gets a list with matches. Important: the matches should
# be continuous to guarantee a good performance. After finishing one
# match, the process pushes its results to the result queue. If errors
# are raised during collocating, the raised errors are pushed to the
# error queue,
matches_chunks = np.array_split(matches, processes)
# This queue collects all results:
results = Queue(maxsize=processes)
# This queue collects all error exceptions
errors = Queue()
# Extend the keyword arguments that we are going to pass to
# _collocate_files:
kwargs.update({
"start": start,
"end": end,
"filesets": filesets,
"output": output,
"bundle": bundle,
"skip_file_errors": skip_file_errors,
"post_processor": post_processor,
"post_processor_kwargs": post_processor_kwargs,
})
# This list contains all running processes
process_list = [
Process(
target=Collocator._process_caller,
args=(
self, results, errors, PROCESS_NAMES[i],
),
kwargs={**kwargs, "matches": matches_chunk},
daemon=True,
)
for i, matches_chunk in enumerate(matches_chunks)
]
# We want to keep track of the progress of the collocation search since
# it may take a while.
process_progress = {
name: 0. # Each process is at 0 percent at the beginning
for name in PROCESS_NAMES[:processes]
}
# Start all processes:
for process in process_list:
process.start()
# As long as some processes are still running, wait for their results:
running = process_list.copy()
processed_matches = 0
# The main process has two tasks during its child processes are
# collocating.
# 1) Collect their results and yield them to the user
# 2) Display the progress and estimate the remaining processing time
while running:
# Filter out all processes that are dead: they either crashed or
# complete their task
running = [
process for process in running if process.is_alive()
]
# Get all results from the result queue
while not results.empty():
process, progress, result = results.get()
# The process might be crashed. To keep the remaining time
# estimation useful, we exclude the crashed process from the
# calculation.
if result is ProcessCrashed:
del process_progress[process]
else:
process_progress[process] = progress
try:
nerrors = errors.qsize()
except NotImplementedError:
nerrors = 'unknown'
self._print_progress(
timer.elapsed, process_progress, len(running), nerrors)
if result is not None:
yield result
# Explicit free up memory:
gc.collect()
for process in process_list:
process.join()
if not errors.empty():
self._error("Some processes terminated due to errors:")
while not errors.empty():
error = errors.get()
msg = '\n'.join([
"-"*79,
error[2],
"".join(traceback.format_tb(error[1])),
"-" * 79 + "\n"
])
self._error(msg)
@staticmethod
def _print_progress(elapsed_time, process_progress, processes, errors):
elapsed_time -= timedelta(microseconds=elapsed_time.microseconds)
if len(process_progress) == 0:
msg = "-"*79 + "\n"
msg += f"100% | {elapsed_time} hours elapsed | " \
f"{errors} processes failed\n"
msg += "-"*79 + "\n"
logger.error(msg)
return
progress = sum(process_progress.values()) / len(process_progress)
try:
expected_time = elapsed_time * (100 / progress - 1)
expected_time -= timedelta(
microseconds=expected_time.microseconds)
except ZeroDivisionError:
expected_time = "unknown"
msg = "-"*79 + "\n"
msg += f"{progress:.0f}% | {elapsed_time} hours elapsed, " \
f"{expected_time} hours left | {processes} proc running, " \
f"{errors} failed\n"
msg += "-"*79 + "\n"
logger.error(msg)
@staticmethod
def _process_caller(
self, results, errors, name, output, bundle, post_processor,
post_processor_kwargs, **kwargs):
"""Wrapper around _collocate_matches
This function is called for each process. It communicates with the main
process via the result and error queue.
Result Queue:
Adds for each collocated file match the process name, its progress
and the actual results.
Error Queue:
If an error is raised, the name of this proces and the error
messages is put to this queue.
"""
self.name = name
# We keep track of how many file pairs we have already processed to
# make the error debugging easier. We need the match in flat form:
matches = [
[match[0], secondary]
for match in kwargs['matches']
for secondary in match[1]
]
# If we want to bundle the output, we need to collect some contents.
# The current_bundle_tag stores a certain information for the current
# bundle (e.g. filename of primary or day of the year). If it changes,
# the bundle is stored to disk and a new bundle is created.
cached_data = []
cached_attributes = {}
current_bundle_tag = None
try:
processed = 0
collocated_matches = self._collocate_matches(**kwargs)
for collocations, attributes in collocated_matches:
match = matches[processed]
processed += 1
progress = 100 * processed / len(matches)
if collocations is None:
results.put([name, progress, None])
continue
# The user does not want to bundle anything therefore just save
# the current collocations
if bundle is None:
result = self._save_and_return(
collocations, attributes, output,
post_processor, post_processor_kwargs
)
results.put([name, progress, result])
continue
# The user may want to bundle the collocations before writing
# them to disk, e.g. by their primaries.
save_cache = self._should_save_cache(
bundle, current_bundle_tag, match,
to_datetime(collocations.attrs["start_time"])
)
if save_cache:
result = self._save_and_return(
cached_data,
cached_attributes, output,
post_processor, post_processor_kwargs
)
results.put([name, progress, result])
cached_data = []
cached_attributes = {}
# So far, we have not cached any collocations or we still need
# to wait before saving them to disk.
cached_data.append(collocations)
cached_attributes.update(**attributes)
if bundle == "primary":
current_bundle_tag = match[0].path
elif bundle == "daily":
current_bundle_tag = \
to_datetime(collocations.attrs["start_time"]).date()
# After all iterations, save last cached data to disk:
if cached_data:
result = self._save_and_return(
cached_data,
cached_attributes, output,
post_processor, post_processor_kwargs
)
results.put([name, progress, result])
except Exception as exception:
# Tell the main process to stop considering this process for the
# remaining processing time:
results.put(
[name, 100., ProcessCrashed]
)
self._error("ERROR: I got a problem and terminate!")
# Build a message that contains all important information for
# debugging:
msg = f"Process {name} ({matches[0][0].times[0]} -" \
f"{matches[-1][0].times[1]}) failed\n" \
f"Failed to collocate {matches[processed]} with"\
f"{matches[processed]}\n"
# The main process needs to know about this exception!
error = [
name, exception.__traceback__,
msg + "ERROR: " + str(exception)
]
errors.put(error)
self._error(exception)
# Finally, raise the exception to terminate this process:
raise exception
self._info(f"Finished all {len(matches)} matches")
def _save_and_return(self, collocations, attributes, output,
post_processor, post_processor_kwargs):
"""Save collocations to disk or return them"""
if isinstance(collocations, list):
collocations = concat_collocations(
collocations
)
if output is None:
return collocations, attributes
else:
filename = output.get_filename(
[to_datetime(collocations.attrs["start_time"]),
to_datetime(collocations.attrs["end_time"])],
fill=attributes
)
# Apply a post processor function from the user
if post_processor is not None:
if post_processor_kwargs is None:
post_processor_kwargs = {}
collocations = post_processor(
collocations, attributes, **post_processor_kwargs
)
if collocations is None:
return None
self._info(f"Store collocations to\n{filename}")
# Write the data to the file.
output.write(collocations, filename)
return filename
@staticmethod
def _should_save_cache(bundle, current_bundle_tag, match, start_time):
"""Return true if the cache should be saved otherwise false
"""
if current_bundle_tag is None:
return False
elif bundle == "primary":
# Check whether the primary has changed since the last time:
return current_bundle_tag != match[0].path
elif bundle == "daily":
# Has the day changed since last time?
return current_bundle_tag != start_time.date()
# In all other cases, the bundle should not be saved yet:
return False
def _collocate_matches(
self, filesets, matches, skip_file_errors, **kwargs
):
"""Load file matches and collocate their content
Yields:
A tuple of two items: the first is always the current percentage
of progress. If output is True, the second is only the filename of
the saved collocations. Otherwise, it is a tuple of collocations
and their collected :class:`~typhon.files.handlers.common.FileInfo`
attributes as a dictionary.
"""
# Load all matches in a parallized queue:
loaded_matches = filesets[0].align(
filesets[1], matches=matches, return_info=True, compact=False,
skip_errors=skip_file_errors,
)
for loaded_match in loaded_matches:
# The FileInfo objects of the matched files:
files = loaded_match[0][0], loaded_match[1][0]
# We copy the data from the matches since they might be used for
# other matches as well:
primary, secondary = \
loaded_match[0][1].copy(), loaded_match[1][1].copy()
self._debug(f"Collocate {files[0].path}\nwith {files[1].path}")
collocations = self.collocate(
(filesets[0].name, primary),
(filesets[1].name, secondary), **kwargs,
)
if collocations is None:
self._debug("Found no collocations!")
# At least, give the process caller a progress update:
yield None, None
continue
# Check whether the collocation data is compatible and was build
# correctly
check_collocation_data(collocations)
found = [
collocations[f"{filesets[0].name}/time"].size,
collocations[f"{filesets[1].name}/time"].size
]
self._debug(
f"Found {found[0]} ({filesets[0].name}) and "
f"{found[1]} ({filesets[1].name}) collocations"
)
# Add the names of the processed files:
for f in range(2):
if f"{filesets[f].name}/__file" in collocations.variables:
continue
collocations[f"{filesets[f].name}/__file"] = files[f].path
# Collect the attributes of the input files. The attributes get a
# prefix, primary or secondary, to allow not-unique names.
attributes = {
f"primary.{p}" if f == 0 else f"secondary.{p}": v
for f, file in enumerate(files)
for p, v in file.attr.items()
}
yield collocations, attributes
def collocate(
self, primary, secondary, max_interval=None, max_distance=None,
bin_factor=1, magnitude_factor=10, tunnel_limit=None, start=None,
end=None, leaf_size=40
):
"""Find collocations between two xarray.Dataset objects
Collocations are two or more data points that are located close to each
other in space and/or time.
Each xarray.Dataset contain the variables *time*, *lat*, *lon*. They
must be - if they are coordinates - unique. Otherwise, their
coordinates must be unique, i.e. they cannot contain duplicated values.
*time* must be a 1-dimensional array with a *numpy.datetime64*-like
data type. *lat* and *lon* can be gridded, i.e. they can be multi-
dimensional. However, they must always share the first dimension with
*time*. *lat* must be latitudes between *-90* (south) and *90* (north)
and *lon* must be longitudes between *-180* (west) and *180* (east)
degrees. See below for examples.
The collocation searched is performed with a fast ball tree
implementation by scikit-learn. The ball tree is cached and reused
whenever the data points from `primary` or `secondary` have not
changed.
If you want to find collocations between FileSet objects, use
:class:`collocate_filesets` instead.
Args:
primary: A tuple of a string with the dataset name and a
xarray.Dataset that fulfill the specifications from above. Can
be also a xarray.Dataset only, the name is then automatically
set to *primary*.
secondary: A tuple of a string with the dataset name and a
xarray.Dataset that fulfill the specifications from above. Can
be also a xarray.Dataset only, the name is then automatically
set to *secondary*.
max_interval: Either a number as a time interval in seconds, a
string containing a time with a unit (e.g. *100 minutes*) or a
timedelta object. This is the maximum time interval between two
data points. If this is None, the data will be searched for
spatial collocations only.
max_distance: Either a number as a length in kilometers or a string
containing a length with a unit (e.g. *100 meters*). This is
the maximum distance between two data points to meet the
collocation criteria. If this is None, the data will be
searched for temporal collocations only. Either `max_interval`
or *max_distance* must be given.
tunnel_limit: Maximum distance in kilometers at which to switch
from tunnel to haversine distance metric. Per default this
algorithm uses the tunnel metric, which simply transform all
latitudes and longitudes to 3D-cartesian space and calculate
their euclidean distance. This is faster than the haversine
metric but produces an error that grows with larger distances.
When searching for distances exceeding this limit
(`max_distance` is greater than this parameter), the haversine
metric is used, which is more accurate but takes more time.
Default is 1000 kilometers.
magnitude_factor: Since building new trees is expensive, this
algorithm tries to use the last tree when possible (e.g. for
data with fixed grid). However, building the tree with the
larger dataset and query it with the smaller dataset is faster
than vice versa. Depending on which premise to follow, there
might have a different performance in the end. This parameter
is the factor of that one dataset must be larger than the other
to throw away an already-built ball tree and rebuild it with
the larger dataset.
leaf_size: The size of one leaf in the Ball Tree. The higher the
leaf size the faster is the tree building but the slower is the
tree query. The optimal leaf size is dataset-dependent. Default
is 40.
bin_factor: When using a temporal criterion via `max_interval`, the
data will be temporally binned to speed-up the search. The bin
size is `bin_factor` * `max_interval`. Which bin factor is the
best, may be dataset-dependent. So this is a parameter that you
can use to fine-tune the performance.
start: Limit the collocated data from this start date. Can be
either as datetime object or as string ("YYYY-MM-DD hh:mm:ss").
Year, month and day are required. Hours, minutes and seconds
are optional. If not given, it is datetime.min per default.
end: End date. Same format as "start". If not given, it is
datetime.max per default.
Returns:
None if no collocations were found. Otherwise,
a xarray.Dataset with the collocated data in *compact* form. It
consists of three groups (groups of variables containing */* in
their name): the *primary*, *secondary* and the *Collocations*
group. If you passed `primary` or `secondary` with own names,
they will be used in the output. The *Collocations* group contains
information about the found collocations. *Collocations/pairs* is
a 2xN array where N is the number of found collocations. It
contains the indices of the *primary* and *secondary* data points
which are collocations. The indices refer to the data points stored
in the *primary* or *secondary* group. *Collocations/interval* and
*Collocations/distance* are the intervals and distances between the
collocations in seconds and kilometers, respectively. Collocations
in *compact* form are efficient when saving them to disk but it
might be complicated to use them directly. Consider applying
:func:`~typhon.collocations.common.collapse` or
:func:`~typhon.collocations.common.expand` on them.
Examples:
.. code-block: python
# TODO: Update this example!
import numpy as np
from typhon.collocations import Collocator
# Create the data. primary and secondary can also be
# xarray.Dataset objects:
primary = {
"time": np.arange(
"2018-01-01", "2018-01-02", dtype="datetime64[h]"
),
"lat": 30.*np.sin(np.linspace(-3.14, 3.14, 24))+20,
"lon": np.linspace(0, 90, 24),
}
secondary = {
"time": np.arange(
"2018-01-01", "2018-01-02", dtype="datetime64[h]"
),
"lat": 30.*np.sin(np.linspace(-3.14, 3.14, 24)+1.)+20,
"lon": np.linspace(0, 90, 24),
}
# Find collocations with a maximum distance of 300 kilometers
# and a maximum interval of 1 hour
collocator = Collocator()
collocated = collocator.collocate(
primary, secondary,
max_distance="300km", max_interval="1h"
)
print(collocated)
"""
if max_distance is None and max_interval is None:
raise ValueError(
"Either max_distance or max_interval must be given!"
)
if max_interval is not None:
max_interval = to_timedelta(max_interval, numbers_as="seconds")
# The user can give strings instead of datetime objects:
start = datetime.min if start is None else to_datetime(start)
end = datetime.max if end is None else to_datetime(end)
# Did the user give the datasets specific names?
primary_name, primary, secondary_name, secondary = self._get_names(
primary, secondary
)
# Select the common time period of both datasets and flat them.
primary, secondary = self._prepare_data(
primary, secondary, max_interval, start, end
)
# Maybe there is no data left after selection?
if primary is None:
return self.empty
self.bin_factor = bin_factor
self.magnitude_factor = magnitude_factor
self.tunnel_limit = tunnel_limit
self.leaf_size = leaf_size
timer = Timer().start()
# We cannot allow NaNs in the time, lat or lon fields
not_nans1 = self._get_not_nans(primary)
not_nans2 = self._get_not_nans(secondary)
# Retrieve the important fields from the data. To avoid any overhead by
# xarray, we use the plain numpy.arrays and do not use the isel method
# (see https://github.com/pydata/xarray/issues/2227). We rather use
# index arrays that we use later to select the rest of the data
lat1 = primary.lat.values[not_nans1]
lon1 = primary.lon.values[not_nans1]
time1 = primary.time.values[not_nans1]
lat2 = secondary.lat.values[not_nans2]
lon2 = secondary.lon.values[not_nans2]
time2 = secondary.time.values[not_nans2]
original_indices = [
np.arange(primary.time.size)[not_nans1],
np.arange(secondary.time.size)[not_nans2]
]
self._debug(f"{timer} for filtering NaNs")
# We can search for spatial collocations (max_interval=None), temporal
# collocations (max_distance=None) or both.
if max_interval is None:
# Search for spatial collocations only:
pairs, distances = self.spatial_search(
lat1, lon1, lat2, lon2, max_distance,
)
intervals = self._get_intervals(
time1[pairs[0]], time2[pairs[1]]
)
return self._create_return(
primary, secondary, primary_name, secondary_name,
self._to_original(pairs, original_indices),
intervals, distances,
max_interval, max_distance
)
elif max_distance is None:
# Search for temporal collocations only:
pairs, intervals = self.temporal_search(
time1, time2, max_interval
)
distances = self._get_distances(
lat1[pairs[0]], lon1[pairs[0]],
lat2[pairs[1]], lon2[pairs[1]],
)
return self._create_return(
primary, secondary, primary_name, secondary_name,
self._to_original(pairs, original_indices),
intervals, distances,
max_interval, max_distance
)
# The user wants to use both criteria and search for spatial and
# temporal collocations. At first, we do a coarse temporal pre-binning
# so that we only search for collocations between points that might
# also be temporally collocated. Unfortunately, this also produces an
# overhead that is only negligible if we have a lot of data:
data_magnitude = time1.size * time2.size
if data_magnitude > 100_0000:
# We have enough data, do temporal pre-binning!
pairs, distances = self.spatial_search_with_temporal_binning(
{"lat": lat1, "lon": lon1, "time": time1},
{"lat": lat2, "lon": lon2, "time": time2},
max_distance, max_interval
)
else:
# We do not have enough data to justify that whole pre-binning.
# Simply do it directly!
pairs, distances = self.spatial_search(
lat1, lon1, lat2, lon2, max_distance,
)
# Did we find any spatial collocations?
if not pairs.any():
return self.empty
# Check now whether the spatial collocations really pass the temporal
# condition:
passed_temporal_check, intervals = self._temporal_check(
time1[pairs[0]], time2[pairs[1]], max_interval
)
# Return only the values that passed the time check
return self._create_return(
primary, secondary, primary_name, secondary_name,
self._to_original(
pairs[:, passed_temporal_check], original_indices),
intervals, distances[passed_temporal_check],
max_interval, max_distance
)
@staticmethod
def _to_original(pairs, original_indices):
return np.array([
original_indices[i][pair_array]
for i, pair_array in enumerate(pairs)
])
@staticmethod
def _get_names(primary, secondary):
# Check out whether the user gave the primary and secondary any name:
if isinstance(primary, (tuple, list)):
primary_name, primary = primary
else:
primary_name = "primary"
if isinstance(secondary, (tuple, list)):
secondary_name, secondary = secondary
else:
secondary_name = "secondary"
return primary_name, primary, secondary_name, secondary
def _prepare_data(self, primary, secondary, max_interval, start, end):
"""Prepare the data for the collocation search
This method selects the time period which should be searched for
collocations and flats the input datasets if they have gridded
variables.
Returns:
The datasets constraint to the common time period, sorted by time
and flattened. If no common time period could be found, two None
objects are returned.
"""
if max_interval is not None:
timer = Timer().start()
# We do not have to collocate everything, just the common time
# period expanded by max_interval and limited by the global start
# and end parameter:
primary_period, secondary_period = self._get_common_time_period(
primary, secondary, max_interval, start, end
)
# Check whether something is left:
if not primary_period.size or not secondary_period.size:
return None, None
# We need everything sorted by the time, otherwise xarray's stack
# method makes problems:
primary_period = primary_period.sortby(primary_period)
primary_dim = primary_period.dims[0]
secondary_period = secondary_period.sortby(secondary_period)
secondary_dim = secondary_period.dims[0]
# Select the common time period and while using sorted indices:
primary = primary.sel(**{primary_dim: primary_period[primary_dim]})
secondary = secondary.sel(
**{secondary_dim: secondary_period[secondary_dim]}
)
# Check whether something is left:
if not primary_period.size or not secondary_period.size:
return None, None
self._debug(f"{timer} for selecting common time period")
# Flat the data: For collocating, we need a flat data structure.
# Fortunately, xarray provides the very convenient stack method
# where we can flat multiple dimensions to one. Which dimensions do
# we have to stack together? We need the fields *time*, *lat* and
# *lon* to be flat. So we choose their dimensions to be stacked.
timer = Timer().start()
primary = self._flat_to_main_coord(primary)
secondary = self._flat_to_main_coord(secondary)
self._debug(f"{timer} for flatting data")
return primary, secondary
@staticmethod
def _get_common_time_period(
primary, secondary, max_interval, start, end):
max_interval = pd.Timedelta(max_interval)
# We want to select a common time window from both datasets,
# aligned to the primary's time coverage. Because xarray has a
# very annoying bug in time retrieving
# (https://github.com/pydata/xarray/issues/1240), this is a
# little bit cumbersome:
common_start = max(
start,
pd.Timestamp(primary.time.min().item(0)) - max_interval,
pd.Timestamp(secondary.time.min().item(0)) - max_interval
)
common_end = min(
end,
pd.Timestamp(primary.time.max().item(0)) + max_interval,
pd.Timestamp(secondary.time.max().item(0)) + max_interval
)
primary_period = primary.time.where(
(primary.time.values >= np.datetime64(common_start))
& (primary.time.values <= np.datetime64(common_end))
).dropna(primary.time.dims[0])
secondary_period = secondary.time.where(
(secondary.time.values >= np.datetime64(common_start))
& (secondary.time.values <= np.datetime64(common_end))
).dropna(secondary.time.dims[0])
return primary_period, secondary_period
@staticmethod
def _get_not_nans(dataset):
return dataset.lat.notnull().values & dataset.lon.notnull().values
@staticmethod
def _flat_to_main_coord(data):
"""Make the dataset flat despite of its original structure
We need a flat dataset structure for the collocation algorithms, i.e.
time, lat and lon are not allowed to be gridded, they must be
1-dimensional and share the same dimension (namely *collocation*).
There are three groups of original data structures that this method
can handle:
* linear (e.g. ship track measurements): time, lat and lon have the
same dimension and are all 1-dimensional. Fulfills all criteria
from above. No action has to be taken.
* gridded_coords (e.g. instruments on satellites with gridded swaths):
lat or lon are gridded (they have multiple dimensions). Stack
the coordinates of them together to a new shared dimension.
Args:
data: xr.Dataset object
Returns:
A xr.Dataset where time, lat and lon are aligned on one shared
dimension.
"""
# Flat:
shared_dims = list(
set(data.time.dims) | set(data.lat.dims) | set(data.lon.dims)
)
# Check whether the dataset is flat (time, lat and lon share the same
# dimension size and are 1-dimensional)
if len(shared_dims) == 1:
if shared_dims[0] in ("time", "lat", "lon"):
# One of the key variables is the main dimension! Change this:
data["collocation"] = shared_dims[0], np.arange(
data[shared_dims[0]].size)
data = data.swap_dims({shared_dims[0]: "collocation"})
data = data.reset_coords(shared_dims[0])
# So far, collocation is a coordinate. We want to make it to a
# dimension, so drop its values:
return data.drop("collocation")
return data.rename({
shared_dims[0]: "collocation"
})
# The coordinates are gridded:
# Some field might be more deeply stacked than another. Choose the
# dimensions of the most deeply stacked variable:
dims = max(
data["time"].dims, data["lat"].dims, data["lon"].dims,
key=lambda x: len(x)
)
# We want to be able to retrieve additional fields after collocating.
# Therefore, we give each dimension that is no coordinate yet a value
# to use them as indices later.
for dim in dims:
if dim not in data.coords:
data[dim] = dim, np.arange(data.dims[dim])
# We assume that coordinates must be unique! Otherwise, we would have
# to use this ugly work-around:
# Replace the former coordinates with new coordinates that have unique
# values.
# new_dims = []
# for dim in dims:
# new_dim = f"__replacement_{dim}"
# data[new_dim] = dim, np.arange(data.dims[dim])
# data = data.swap_dims({dim: new_dim})
# new_dims.append(new_dim)
return data.stack(collocation=dims)
def _create_return(
self, primary, secondary, primary_name, secondary_name,
original_pairs, intervals, distances,
max_interval, max_distance
):
if not original_pairs.any():
return self.empty
pairs = []
output = {}
names = [primary_name, secondary_name]
for i, dataset in enumerate([primary, secondary]):
# name of the current dataset (primary or secondary)
name = names[i]
# These are the indices of the points in the original data that
# have collocations. We remove the duplicates since we want to copy
# the required data only once. They are called original_indices
# because they are the indices in the original data array:
original_indices = pd.unique(original_pairs[i])
# After selecting the collocated data, the original indices cannot
# be applied any longer. We need new indices that indicate the
# pairs in the collocated data.
new_indices = np.empty(original_indices.max() + 1, dtype=int)
new_indices[original_indices] = np.arange(
original_indices.size
)
collocation_indices = new_indices[original_pairs[i]]
# Save the collocation indices in the metadata group:
pairs.append(collocation_indices)
output[names[i]] = dataset.isel(collocation=original_indices)
# We have to convert the MultiIndex to a normal index because we
# cannot store it to a file otherwise. We can convert it by simply
# setting it to new values, but we are losing the sub-level
# coordinates (the dimenisons that we stacked to create the
# multi-index in the first place) with that step. Hence, we store
# the sub-level coordinates in additional dataset to preserve them.
main_coord_is_multiindex = isinstance(
output[name].get_index("collocation"),
pd.core.indexes.multi.MultiIndex
)
if main_coord_is_multiindex:
stacked_dims_data = xr.merge([
xr.DataArray(
output[name][dim].values,
name=dim, dims=["collocation"]
)
for dim in output[name].get_index("collocation").names
])
# Okay, actually we want to get rid of the main coordinate. It
# should stay as a dimension name but without own labels. I.e. we
# want to drop it. Because it still may a MultiIndex, we cannot
# drop it directly but we have to set it to something different.
output[name]["collocation"] = \
np.arange(output[name]["collocation"].size)
if main_coord_is_multiindex:
# Now, since we unstacked the multi-index, we can add the
# stacked dimensions back to the dataset:
output[name] = xr.merge(
[output[name], stacked_dims_data],
)
# For the flattening we might have created temporal variables,
# also collect them to drop:
vars_to_drop = [
var for var in output[name].variables.keys()
if var.startswith("__replacement_")
]
output[name] = output[name].drop([
f"collocation", *vars_to_drop
])
# Merge all datasets into one:
output = add_xarray_groups(
xr.Dataset(), **output
)
# This holds the collocation information (pairs, intervals and
# distances):
metadata = xr.Dataset()
metadata["pairs"] = xr.DataArray(
np.array(pairs, dtype=int), dims=("group", "collocation"),
attrs={
"max_interval": f"Max. interval in secs: {max_interval}",
"max_distance": f"Max. distance in kilometers: {max_distance}",
"primary": primary_name,
"secondary": secondary_name,
}
)
metadata["interval"] = xr.DataArray(
intervals, dims=("collocation", ),
attrs={
"max_interval": f"Max. interval in secs: {max_interval}",
"max_distance": f"Max. distance in kilometers: {max_distance}",
"primary": primary_name,
"secondary": secondary_name,
}
)
metadata["distance"] = xr.DataArray(
distances, dims=("collocation",),
attrs={
"max_interval": f"Max. interval in secs: {max_interval}",
"max_distance": f"Max. distance in kilometers: {max_distance}",
"primary": primary_name,
"secondary": secondary_name,
"units": "kilometers",
}
)
metadata["group"] = xr.DataArray(
[primary_name, secondary_name], dims=("group",),
attrs={
"max_interval": f"Max. interval in secs: {max_interval}",
"max_distance": f"Max. distance in kilometers: {max_distance}",
}
)
output = add_xarray_groups(
output, Collocations=metadata
)
start = pd.Timestamp(
output[primary_name+"/time"].min().item(0)
)
end = pd.Timestamp(
output[primary_name+"/time"].max().item(0)
)
output.attrs = {
"start_time": str(start),
"end_time": str(end),
}
return output
@staticmethod
def get_meta_group():
return f"Collocations"
def spatial_search_with_temporal_binning(
self, primary, secondary, max_distance, max_interval
):
# For time-binning purposes, pandas Dataframe objects are a good choice
primary = pd.DataFrame(primary).set_index("time")
secondary = pd.DataFrame(secondary).set_index("time")
# Now let's split the two data data along their time coordinate so
# we avoid searching for spatial collocations that do not fulfill
# the temporal condition in the first place. However, the overhead
# of the finding algorithm must be considered too (for example the
# BallTree creation time). This can be adjusted by the parameter
# bin_factor:
bin_duration = self.bin_factor * max_interval
# The binning is more efficient if we use the largest dataset as
# primary:
swapped_datasets = secondary.size > primary.size
if swapped_datasets:
primary, secondary = secondary, primary
# Let's bin the primaries along their time axis and search for the
# corresponding secondary bins:
bin_pairs = (
self._bin_pairs(start, chunk, primary, secondary, max_interval)
for start, chunk in primary.groupby( | pd.Grouper(freq=bin_duration) | pandas.Grouper |
#!/bin/env python3
import pandas as pd
import numpy as np
import glob
import re
from tqdm.notebook import tqdm
from pathlib import Path
def read_conll(input_file, label_nr=3):
"""Reads a conllu file."""
ids = []
texts = []
tags = []
#
text = []
tag = []
idx = None
for line in open(input_file, encoding="utf-8"):
if line.startswith("# sent_id ="):
idx = line.strip().split()[-1]
ids.append(idx)
elif line.startswith("#"):
pass
elif line.strip() == "":
texts.append(text)
tags.append(tag)
text, tag = [], []
else:
try:
splits = line.strip().split("\t")
token = splits[1] # the token
label = splits[label_nr] # the UD Tag label
text.append(token)
tag.append(label)
except ValueError:
print(idx)
return ids, texts, tags
def make_lang_code_dicts():
file_path = Path(__file__).parent / "../utils/lang_codes.tsv"
lang_codes = pd.read_csv(file_path, header=0, sep="\t")
return {"code_to_name": pd.Series(lang_codes["English name of Language"].values,
index=lang_codes["ISO 639-1 Code"]).to_dict(),
"name_to_code": pd.Series(lang_codes["ISO 639-1 Code"].values,
index=lang_codes["English name of Language"]).to_dict()}
def make_lang_group_dict():
file_path = Path(__file__).parent / "../utils/lang_groups.tsv"
return pd.read_csv(file_path, sep="\t").set_index("Language").to_dict()["Group"]
def order_table(table, experiment):
assert experiment in ["tfm", "acl", "acl_sentiment"], \
"Invalid experiment, must be 'tfm' 'acl' or 'acl_sentiment'"
# Make sure the path is correct even when importing this function from somewhere else
file_path = Path(__file__).parent / "../utils/{}_langs.tsv".format(experiment)
all_langs = pd.read_csv(file_path, sep="\t", header=None).values.flatten()
lang_colname = find_lang_column(table)
lang_order = [lang for lang in all_langs if lang in table[lang_colname].values]
if isinstance(table.columns, pd.MultiIndex): # Check for hierarchical columns
level = 0
else:
level = None
new_table = table.copy() # Make a copy so the original does not get modified
new_table.insert(0, "sort", table[lang_colname].apply(lambda x: lang_order.index(x)))
new_table = new_table.sort_values(by=["sort"]).drop("sort", axis=1, level=level).reset_index(
drop=True)
return new_table
def convert_table_to_latex(table, experiment):
assert experiment in ["tfm", "acl", "acl_sentiment"], \
"Invalid experiment, must be 'tfm', 'acl' or 'acl_sentiment'"
table = order_table(table, experiment) # In case it's not already in correct order
# Retrieve language groups in correct order and add them to table
table = add_lang_groups(table, "group")
# Latex output
print("\n".join([" & ".join(line) + r"\\" for line in table.astype(str).values]))
# Pandas output
return table
def run_through_data(data_path, f, table=None, **kwargs):
code_dicts = make_lang_code_dicts()
code_to_name = code_dicts["code_to_name"]
# Find all data files in path
data_files = glob.glob(data_path + "*/*.csv") + glob.glob(data_path + "*/*.conllu")
task = data_path.split("/")[-2]
assert task in ["ud", "sentiment"], data_path + " is not a valid data path."
for file_path in tqdm(data_files):
lang_code = file_path.split("\\")[1]
lang_name = code_to_name[lang_code]
match = re.findall(r"[a-z]+\.", file_path)
if match and match[0][:-1] in ["train", "dev", "test"]:
dataset = match[0][:-1]
else:
print(file_path, "is not a valid data path, skipping")
continue
table = f({"file_path": file_path,
"lang_name": lang_name,
"lang_code": lang_code,
"dataset": dataset},
table, **kwargs)
return table
def find_lang_column(table):
r = re.compile(r".*[lL]ang")
if isinstance(table.columns, pd.MultiIndex): # Check for hierarchical columns
matches = list(filter(r.match, table.columns.levels[0])) # Check in top level
else:
matches = list(filter(r.match, table.columns))
if matches:
return matches[0]
else:
return None
def add_lang_groups(table, colname="group"):
# Retrieve language groups in correct order and add them to table in human readable format
lang_to_group = make_lang_group_dict()
lang_colname = find_lang_column(table)
table.insert(loc=0, column=colname, value=table[lang_colname].map(lang_to_group))
return table
def find_table(r, task="", by="colname"):
possible_tasks = ["", "pos", "sentiment"]
possible_by = ["colname", "path"]
assert task in possible_tasks, "Task must be one of " + str(possible_tasks)
assert by in possible_by, "'by' must be one of " + str(possible_by)
all_colnames = pd.read_csv(Path(__file__).parent / "../utils/all_colnames.tsv", sep="\t")
r = re.compile(r)
matches = list(
filter(r.match, all_colnames.loc[all_colnames["path"].apply(lambda x: task in x), by]))
if len(matches) == 0:
raise Exception("No match.")
if by == "colname":
paths = all_colnames.loc[
all_colnames["path"].apply(lambda x: task in x) & all_colnames["colname"].isin(
matches), "path"].values
if len(paths) == 0:
raise Exception("No match.")
print("\nMatched pairs: ", *enumerate(zip(paths, matches)), sep="\n")
i = int(input("Choose pair: "))
path = paths[i]
colname = matches[i]
else:
print("\nMatched paths", *enumerate(np.unique(matches)), sep="\n")
i = int(input("Choose path: "))
path = matches[i]
cols = pd.read_csv(path).columns
print("\nPossible columns", *enumerate( | pd.read_csv(path) | pandas.read_csv |
"""
Timing and Telemetry Data - :mod:`fastf1.core`
==============================================
The Fast-F1 core is a collection of functions and data objects for accessing
and analyzing F1 timing and telemetry data.
Data Objects
------------
All data is provided through the following data objects:
.. autosummary::
:nosignatures:
Weekend
Session
Laps
Lap
Telemetry
SessionResults
DriverResult
The :class:`Session` object is mainly used as an entry point for loading
timing data and telemetry data. The :class:`Session` can create a
:class:`Laps` object which contains all timing, track and session status
data for a whole session.
Usually you will be using :func:`get_session` to get a :class:`Session`
object.
The :class:`Laps` object holds detailed information about multiples laps.
The :class:`Lap` object holds the same information as :class:`Laps` but only
for one single lap. When selecting a single lap from a :class:`Laps` object,
an object of type :class:`Lap` will be returned.
Apart from only providing data, the :class:`Laps`, :class:`Lap` and
:class:`Telemetry` objects implement various methods for selecting and
analyzing specific parts of the data.
Functions
---------
.. autosummary::
:nosignatures:
get_session
get_round
"""
import collections
from functools import cached_property
import logging
import warnings
import numpy as np
import pandas as pd
import fastf1
from fastf1 import api, ergast
from fastf1.utils import recursive_dict_get, to_timedelta
logging.basicConfig(level=logging.INFO, style='{',
format="{module: <8} {levelname: >10} \t{message}")
D_LOOKUP = [[44, 'HAM', 'Mercedes'], [77, 'BOT', 'Mercedes'],
[55, 'SAI', 'Ferrari'], [16, 'LEC', 'Ferrari'],
[33, 'VER', 'Red Bull'], [11, 'PER', 'Red Bull'],
[3, 'RIC', 'McLaren'], [4, 'NOR', 'McLaren'],
[5, 'VET', '<NAME>'], [18, 'STR', 'Aston Martin'],
[14, 'ALO', 'Alpine'], [31, 'OCO', 'Alpine'],
[22, 'TSU', 'AlphaTauri'], [10, 'GAS', 'AlphaTauri'],
[47, 'MSC', 'Haas F1 Team'], [9, 'MAZ', 'Haas F1 Team'],
[7, 'RAI', '<NAME>o'], [99, 'GIO', 'Alfa Romeo'],
[6, 'LAT', 'Williams'], [63, 'RUS', 'Williams']]
def get_session(*args, **kwargs):
"""
.. deprecated:: 2.2
replaced by :func:`fastf1.get_session`
"""
# TODO remove
warnings.warn("`fastf1.core.get_session` has been deprecated and will be"
"removed in a future version.\n"
"Use `fastf1.get_session` instead.", FutureWarning)
from fastf1 import events
return events.get_session(*args, **kwargs)
def get_round(year, match):
"""
.. deprecated:: 2.2
will be removed without replacement;
Use :func:`fastf1.get_event` instead to get an
:class:`~fastf1.events.Event` object which provides
information including the round number for the event.
"""
# TODO remove
warnings.warn("_func:`fastf1.core.get_round` has been deprecated and will "
"be removed without replacement in a future version.\n"
"Use :func:`fastf1.get_event` instead to get an "
":class:`~fastf1.events.Event` object which provides "
"information including the round number for the event.",
FutureWarning)
from fastf1 import events
event = events.get_event(year, match)
return event.RoundNumber
class Telemetry(pd.DataFrame):
"""Multi-channel time series telemetry data
The object can contain multiple telemetry channels. Multiple telemetry objects with different channels
can be merged on time. Each telemetry channel is one dataframe column.
Partial telemetry (e.g. for one lap only) can be obtained through various methods for slicing the data.
Additionally, methods for adding common computed data channels are available.
The following telemetry channels existed in the original API data:
- **Car data**:
- `Speed` (float): Car speed
- `RPM` (int): Car RPM
- `nGear` (int): Car gear number
- `Throttle` (float): 0-100 Throttle pedal pressure
- `Brake` (float): 0-100 Brake pedal pressure
- `DRS` (int): DRS indicator (See :meth:`car_data` for more info)
- **Position data**:
- `X` (float): X position
- `Y` (float): Y position
- `Z` (float): Z position
- `Status` (string): Flag - OffTrack/OnTrack
- **For both of the above**:
- `Time` (timedelta): Time (0 is start of the data slice)
- `SessionTime` (timedelta): Time elapsed since the start of the session
- `Date` (datetime): The full date + time at which this sample was created
- `Source` (str): Flag indicating how this sample was created:
- 'car': sample from original api car data
- 'pos': sample from original api position data
- 'interpolated': this sample was artificially created; all values are computed/interpolated
Example:
A sample's source is indicated as 'car'. It contains
values for speed, rpm and x, y, z coordinates.
Originally, this sample (with its timestamp) was received
when loading car data.
This means that the speed and rpm value are original
values as received from the api. The coordinates are
interpolated for this sample.
All methods of :class:`Telemetry` which resample or
interpolate data will preserve and adjust the source flag
correctly when modifying data.
Through merging/slicing it is possible to obtain any combination of telemetry channels!
The following additional computed data channels can be added:
- Distance driven between two samples:
:meth:`add_differential_distance`
- Distance driven since the first sample:
:meth:`add_distance`
- Relative distance driven since the first sample:
:meth:`add_relative_distance`
- Distance to driver ahead and car number of said driver:
:meth:`add_driver_ahead`
.. note:: See the separate explanation concerning the various definitions of 'Time' for more information on the
three date and time related channels: :ref:`time-explanation`
Slicing this class will return :class:`Telemetry` again for slices containing multiple rows. Single rows will be
returned as :class:`pandas.Series`.
Args:
*args (any): passed through to `pandas.DataFrame` superclass
session (:class:`Session`): Instance of associated session object. Required for full functionality!
driver (str): Driver number as string. Required for full functionality!
**kwargs (any): passed through to `pandas.DataFrame` superclass
"""
TELEMETRY_FREQUENCY = 'original'
"""Defines the frequency used when resampling the telemetry data. Either
the string ``'original'`` or an integer to specify a frequency in Hz."""
_CHANNELS = {
'X': {'type': 'continuous', 'missing': 'quadratic'},
'Y': {'type': 'continuous', 'missing': 'quadratic'},
'Z': {'type': 'continuous', 'missing': 'quadratic'},
'Status': {'type': 'discrete'},
'Speed': {'type': 'continuous', 'missing': 'linear'}, # linear is often required as quadratic overshoots
'RPM': {'type': 'continuous', 'missing': 'linear'}, # on sudden changes like sudden pedal application)
'Throttle': {'type': 'continuous', 'missing': 'linear'},
'Brake': {'type': 'discrete'},
'DRS': {'type': 'discrete'},
'nGear': {'type': 'discrete'},
'Source': {'type': 'excluded'}, # special case, custom handling
'Date': {'type': 'excluded'}, # special case, used as the index during resampling
'Time': {'type': 'excluded'}, # special case, Time/SessionTime recalculated from 'Date'
'SessionTime': {'type': 'excluded'},
'Distance': {'type': 'continuous', 'missing': 'quadratic'},
'RelativeDistance': {'type': 'continuous', 'missing': 'quadratic'},
'DifferentialDistance': {'type': 'continuous', 'missing': 'quadratic'},
'DriverAhead': {'type': 'discrete'},
'DistanceToDriverAhead': {'type': 'continuous', 'missing': 'linear'}
}
"""Known telemetry channels which are supported by default"""
_metadata = ['session', 'driver']
def __init__(self, *args, session=None, driver=None, **kwargs):
super().__init__(*args, **kwargs)
self.session = session
self.driver = driver
@property
def _constructor(self):
def _new(*args, **kwargs):
return Telemetry(*args, **kwargs).__finalize__(self)
return _new
@property
def base_class_view(self):
"""For a nicer debugging experience; can view DataFrame through this property in various IDEs"""
return pd.DataFrame(self)
def join(self, *args, **kwargs):
"""Wraps :mod:`pandas.DataFrame.join` and adds metadata propagation.
When calling `self.join` metadata will be propagated from self to the joined dataframe.
"""
meta = dict()
for var in self._metadata:
meta[var] = getattr(self, var)
ret = super().join(*args, **kwargs)
for var, val in meta.items():
setattr(ret, var, val)
return ret
def merge(self, *args, **kwargs):
"""Wraps :mod:`pandas.DataFrame.merge` and adds metadata propagation.
When calling `self.merge` metadata will be propagated from self to the merged dataframe.
"""
meta = dict()
for var in self._metadata:
meta[var] = getattr(self, var)
ret = super().merge(*args, **kwargs)
for var, val in meta.items():
setattr(ret, var, val)
return ret
def slice_by_mask(self, mask, pad=0, pad_side='both'):
"""Slice self using a boolean array as a mask.
Args:
mask (array-like): Array of boolean values with the same length as self
pad (int): Number of samples used for padding the sliced data
pad_side (str): Where to pad the data; possible options: 'both', 'before', 'after'
Returns:
:class:`Telemetry`
"""
if pad:
if pad_side in ('both', 'before'):
i_left_pad = max(0, np.min(np.where(mask)) - pad)
else:
i_left_pad = np.min(np.where(mask))
if pad_side in ('both', 'after'):
i_right_pad = min(len(mask), np.max(np.where(mask)) + pad)
else:
i_right_pad = np.max(np.where(mask))
mask[i_left_pad: i_right_pad + 1] = True
data_slice = self.loc[mask].copy()
return data_slice
def slice_by_lap(self, ref_laps, pad=0, pad_side='both', interpolate_edges=False):
"""Slice self to only include data from the provided lap or laps.
.. note:: Self needs to contain a 'SessionTime' column.
.. note:: When slicing with an instance of :class:`Laps` as a reference, the data will be sliced by first and
last lap. Missing laps in between will not be considered and data for these will still be included in
the sliced result.
Args:
ref_laps (Lap or Laps): The lap/laps by which to slice self
pad (int): Number of samples used for padding the sliced data
pad_side (str): Where to pad the data; possible options: 'both', 'before', 'after
interpolate_edges (bool): Add an interpolated sample at the beginning and end to exactly
match the provided time window.
Returns:
:class:`Telemetry`
"""
if isinstance(ref_laps, Laps) and len(ref_laps) > 1:
if 'DriverNumber' not in ref_laps.columns:
ValueError("Laps is missing 'DriverNumber'. Cannot return telemetry for unknown driver.")
if not len(ref_laps['DriverNumber'].unique()) <= 1:
raise ValueError("Cannot create telemetry for multiple drivers at once!")
end_time = ref_laps['Time'].max()
start_time = ref_laps['LapStartTime'].min()
elif isinstance(ref_laps, (Lap, Laps)):
if isinstance(ref_laps, Laps): # one lap in Laps
ref_laps = ref_laps.iloc[0] # needs to be handled as a single lap
if 'DriverNumber' not in ref_laps.index:
ValueError("Lap is missing 'DriverNumber'. Cannot return telemetry for unknown driver.")
end_time = ref_laps['Time']
start_time = ref_laps['LapStartTime']
else:
raise TypeError("Attribute 'ref_laps' needs to be an instance of `Lap` or `Laps`")
return self.slice_by_time(start_time, end_time, pad, pad_side, interpolate_edges)
def slice_by_time(self, start_time, end_time, pad=0, pad_side='both', interpolate_edges=False):
"""Slice self to only include data in a specific time frame.
.. note:: Self needs to contain a 'SessionTime' column. Slicing by time use the 'SessionTime' as its reference.
Args:
start_time (Timedelta): Start of the section
end_time (Timedelta): End of the section
pad (int): Number of samples used for padding the sliced data
pad_side (str): Where to pad the data; possible options: 'both', 'before', 'after
interpolate_edges (bool): Add an interpolated sample at the beginning and end to exactly
match the provided time window.
Returns:
:class:`Telemetry`
"""
if interpolate_edges:
edges = Telemetry({'SessionTime': (start_time, end_time),
'Date': (start_time + self.session.t0_date, end_time + self.session.t0_date)},
session=self.session)
d = self.merge_channels(edges)
else:
d = self.copy() # TODO no copy?
sel = ((d['SessionTime'] <= end_time) & (d['SessionTime'] >= start_time))
if np.any(sel):
data_slice = d.slice_by_mask(sel, pad, pad_side)
if 'Time' in data_slice.columns:
# shift time to 0 so laps can overlap
data_slice.loc[:, 'Time'] = data_slice['SessionTime'] - start_time
return data_slice
return Telemetry()
def merge_channels(self, other, frequency=None):
"""Merge telemetry objects containing different telemetry channels.
The two objects don't need to have a common time base. The data will be merged, optionally resampled and
missing values will be interpolated.
:attr:`Telemetry.TELEMETRY_FREQUENCY` determines if and how the data is resampled. This can be overridden using
the `frequency` keyword fo this method.
Merging and resampling:
If the frequency is 'original', data will not be resampled. The two objects will be merged and all
timestamps of both objects are kept. Values will be interpolated so that all telemetry channels contain
valid data for all timestamps. This is the default and recommended option.
If the frequency is specified as an integer in Hz the data will be merged as before. After that, the merged
time base will be resampled from the first value on at the specified frequency. Afterwards, the data will
be interpolated to fit the new time base. This means that usually most if not all values of the data will
be interpolated values. This is detrimental for overall accuracy.
Interpolation:
Missing values after merging will be interpolated for all known telemetry channels using
:meth:`fill_missing`. Different interpolation methods are used depending on what kind of data the channel
contains. For example, forward fill is used to interpolated 'nGear' while linear interpolation is used
for 'RPM' interpolation.
.. note :: Unknown telemetry channels will be merged but missing values will not be interpolated. This can
either be done manually or a custom telemetry channel can be added using :meth:`register_new_channel`.
.. note :: Do not resample data multiple times. Always resample based on the original data
to preserve accuracy
Args:
other (:class:`Telemetry` or :class:`pandas.DataFrame`): Object to be merged with self
frequency (str or int): Optional frequency to overwrite global preset. (Either string 'original' or integer
for a frequency in Hz)
Returns:
:class:`Telemetry`
"""
# merge the data and interpolate missing; 'Date' needs to be the index
data = self.set_index('Date')
other = other.set_index('Date')
# save dtypes before merging so they can be restored after merging
# necessary for example because merging produces NaN values which would cause an int column to become float
# but it can be converted back to int after interpolating missing values
dtype_map = dict()
for df in data, other:
for col in df.columns:
if col not in dtype_map.keys():
dtype_map[col] = df[col].dtype
# Exclude columns existing on both dataframes from one dataframe before merging (cannot merge with duplicates)
on_both_columns = set(other.columns).intersection(set(data.columns))
merged = other.merge(data[data.columns.difference(on_both_columns, sort=False)],
how='outer', left_index=True, right_index=True, sort=True)
# now use the previously excluded columns to update the missing values in the merged dataframe
for col in on_both_columns:
merged[col].update(data[col])
if 'Driver' in merged.columns and len(merged['Driver'].unique()) > 1:
raise ValueError("Cannot merge multiple drivers")
if not frequency:
frequency = data.TELEMETRY_FREQUENCY
i = data.get_first_non_zero_time_index()
if i is None:
raise ValueError("No valid 'Time' data. Cannot resample!")
ref_date = merged.index[i]
# data needs to be resampled/interpolated differently, depending on what kind of data it is
# how to handle which column is defined in self._CHANNELS
if frequency == 'original':
# no resampling but still interpolation due to merging
merged = merged.fill_missing()
merged = merged.reset_index().rename(columns={'index': 'Date'}) # make 'Date' a column again
else:
frq = f'{1 / frequency}S'
resampled_columns = dict()
for ch in self._CHANNELS.keys():
if ch not in merged.columns:
continue
sig_type = self._CHANNELS[ch]['type']
if sig_type == 'continuous':
missing = self._CHANNELS[ch]['missing']
res = merged.loc[:, ch] \
.resample(frq, origin=ref_date).mean().interpolate(method=missing, fill_value='extrapolate')
elif sig_type == 'discrete':
res = merged.loc[:, ch].resample(frq, origin=ref_date).ffill().ffill().bfill()
# first ffill is a method of the resampler object and will ONLY ffill values created during
# resampling but not already existing NaN values. NaN values already existed because of merging,
# therefore call ffill a second time as a method of the returned series to fill these too
# only use bfill after ffill to fix first row
else:
continue
resampled_columns[ch] = res
res_source = merged.loc[:, 'Source'].resample(frq, origin=ref_date).asfreq().fillna(value='interpolation')
resampled_columns['Source'] = res_source
# join resampled columns and make 'Date' a column again
merged = Telemetry(resampled_columns, session=self.session).reset_index().rename(columns={'index': 'Date'})
# recalculate the time columns
merged['SessionTime'] = merged['Date'] - self.session.t0_date
merged['Time'] = merged['SessionTime'] - merged['SessionTime'].iloc[0]
# restore data types from before merging
for col in dtype_map.keys():
try:
merged.loc[:, col] = merged.loc[:, col].astype(dtype_map[col])
except ValueError:
logging.warning(f"Failed to preserve data type for column '{col}' while merging telemetry.")
return merged
def resample_channels(self, rule=None, new_date_ref=None, **kwargs):
"""Resample telemetry data.
Convenience method for frequency conversion and resampling. Up and down sampling of data is supported.
'Date' and 'SessionTime' need to exist in the data. 'Date' is used as the main time reference.
There are two ways to use this method:
- Usage like :meth:`pandas.DataFrame.resample`: In this case you need to specify the 'rule' for resampling
and any additional keywords will be passed on to :meth:`pandas.Series.resample` to create a new time
reference. See the pandas method to see which options are available.
- using the 'new_date_ref' keyword a :class:`pandas.Series` containing new values for date
(dtype :class:`pandas.Timestamp`) can be provided. The existing data will be resampled onto this new
time reference.
Args:
rule (optional, str): Resampling rule for :meth:`pandas.Series.resample`
new_date_ref (optional, pandas.Series): New custom Series of reference dates
**kwargs (optional, any): Only in combination with 'rule'; additional parameters for
:meth:`pandas.Series.resample`
"""
if rule is not None and new_date_ref is not None:
raise ValueError("You can only specify one of 'rule' or 'new_index'")
if rule is None and new_date_ref is None:
raise ValueError("You need to specify either 'rule' or 'new_index'")
if new_date_ref is None:
st = pd.Series(index=pd.DatetimeIndex(self['Date']), dtype=int).resample(rule, **kwargs).asfreq()
new_date_ref = | pd.Series(st.index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 08:59:17 2019
@author: <NAME>
@contact: <EMAIL>
"""
import pandas as pd
def relative_strong_signal(data,threshold,val_threshold):
""" This function compute date based sectional
relative strong/weak indicator given dataframe with
structure = {"row":"dates","col":"product price"}
we select the dates when market overal drops/raise
but a very small portion(threshold) of it goes opposite
Output dataframe gives relative strong/weak indicator
with that date return and associated quantile
"""
answers = {} # Output={"Product":[Dates,return,threshold]..}
ans_df = pd.DataFrame() # Reordering answer dataframe [df1,df2]
# Provide data in pandas dataframe
df = data.pct_change().dropna().round(5)
total = df.shape[1]
for index, row in df.iterrows():
positive_pct = round(sum(row>0)/total,3)
negative_pct = round(sum(row<0)/total,3)
if positive_pct <= threshold:
temp = row.where(row>0).dropna().to_dict()
for key,val in temp.items():
val = val*100
if abs(val) > val_threshold:
if key not in answers:
answers[key] = [[index.date(),val,positive_pct]]
else:
answers[key].append([index.date(),val,positive_pct])
elif negative_pct <= threshold:
temp = row.where(row<0).dropna().to_dict()
for key,val in temp.items():
val = val*100
if abs(val) > val_threshold:
if key not in answers:
answers[key] = [[index.date(),val,negative_pct]]
else:
answers[key].append([index.date(),val,negative_pct])
for key,val in answers.items():
df = pd.DataFrame(val,columns = ["Dates","Return%","Quantile"])
df["Product"] = key
df = df[["Dates","Product","Return%","Quantile"]]
ans_df = ans_df.append(df)
ans_df = ans_df.set_index("Dates").sort_index()
return ans_df
def relative_skew_signal( data, window,
Top_NUM, val_tao ):
""" This function compute date based sectional
relative skewness indicator given dataframe with
structure = {"row":"dates","col":"product price"}
we select the dates when market overal skewness is one side
but a very small portion(threshold) of it goes opposite
Output dataframe gives relative strong/weak indicator
with that date return and associated quantile
"""
answers = {} # Output={"Product":[Dates,return,threshold]..}
ans_df = | pd.DataFrame() | pandas.DataFrame |
import re
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers import (
CategoricalTransformer, LabelEncodingTransformer, OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test__get_intervals(self):
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert result[0] == expected_intervals
def test_fit(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = np.array(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer.fit(data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert transformer.intervals == expected_intervals
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.fuzzy = False
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.25
@patch('scipy.stats.norm.rvs')
def test__get_value_fuzzy(self, rvs_mock):
# setup
rvs_mock.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test_reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = np.array(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer.fit(data)
result = transformer.reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows
Output:
- the output of `_transform_by_category`
Side effects:
- `_transform_by_category` will be called once
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer.transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer.transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transfrom_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transfrom_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform_by_matrix(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_category` method is called.
When there is not enough virtual memory and the number of rows is greater than the
number of categories, expect that the `_reverse_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 5 rows
Output:
- the output of `_reverse_transform_by_category`
Side effects:
- `_reverse_transform_by_category` will be called once
"""
# Setup
transform_data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = transform_data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(
categorical_transformer_mock, transform_data)
# Asserts
categorical_transformer_mock._reverse_transform_by_category.assert_called_once_with(
transform_data)
assert reverse == categorical_transformer_mock._reverse_transform_by_category.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category(self, psutil_mock):
"""Test the _reverse_transform_by_category method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 5 rows
Ouptut:
- the original data
"""
data = pd.Series([1, 3, 3, 2, 1])
transformed = pd.Series([0.875, 0.375, 0.375, 0.625, 0.875])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
reverse = transformer._reverse_transform_by_category(transformed)
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_row` method is called.
When there is not enough virtual memory and the number of rows is less than or equal
to the number of categories, expect that the `_reverse_transform_by_row` method
is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_row`
Side effects:
- `_reverse_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock.starts = pd.DataFrame(
[0., 0.25, 0.5, 0.75], index=[4, 3, 2, 1], columns=['category'])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_row.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_row.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row(self, psutil_mock):
"""Test the _reverse_transform_by_row method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means, starts,
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.starts = pd.DataFrame(
[4, 3, 2, 1], index=[0., 0.25, 0.5, 0.75], columns=['category'])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer.reverse_transform(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
class TestOneHotEncodingTransformer:
def test__prepare_data_empty_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[], [], []]
# Assert
with pytest.raises(ValueError):
ohet._prepare_data(data)
def test__prepare_data_nested_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[[]]]
# Assert
with pytest.raises(ValueError):
ohet._prepare_data(data)
def test__prepare_data_list_of_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = [['a'], ['b'], ['c']]
out = ohet._prepare_data(data)
# Assert
expected = np.array(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test__prepare_data_pandas_series(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
out = ohet._prepare_data(data)
# Assert
expected = pd.Series(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test_fit_no_nans(self):
"""Test the ``fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be activated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b', 'c'])
np.testing.assert_array_equal(ohet.decoder, ['a', 'b', 'c'])
assert ohet.dummy_encoded
assert not ohet.dummy_na
def test_fit_no_nans_numeric(self):
"""Test the ``fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, 3])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2, 3])
np.testing.assert_array_equal(ohet.decoder, [1, 2, 3])
assert not ohet.dummy_encoded
assert not ohet.dummy_na
def test_fit_nans(self):
"""Test the ``fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
and NA should be activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', None])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b'])
np.testing.assert_array_equal(ohet.decoder, ['a', 'b', np.nan])
assert ohet.dummy_encoded
assert ohet.dummy_na
def test_fit_nans_numeric(self):
"""Test the ``fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated and NA activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, np.nan])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2])
np.testing.assert_array_equal(ohet.decoder, [1, 2, np.nan])
assert not ohet.dummy_encoded
assert ohet.dummy_na
def test_fit_single(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'a', 'a'])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a'])
def test__transform_no_nan(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation.
Input:
- Series with values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet.dummies = ['a', 'b', 'c']
ohet.num_dummies = 3
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_no_nan_categorical(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch.
Input:
- Series with categorical values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet.dummies = ['a', 'b', 'c']
ohet.indexer = [0, 1, 2]
ohet.num_dummies = 3
ohet.dummy_encoded = True
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation. Null
values should be represented by the same encoding.
Input:
- Series with values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet.dummies = ['a', 'b']
ohet.dummy_na = True
ohet.num_dummies = 2
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans_categorical(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation using
the categorical branch. Null values should be
represented by the same encoding.
Input:
- Series with categorical values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet.dummies = ['a', 'b']
ohet.indexer = [0, 1]
ohet.dummy_na = True
ohet.num_dummies = 2
ohet.dummy_encoded = True
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single(self):
"""Test the ``_transform`` with one category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
where it should be a single column.
Input:
- Series with a single category
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet.dummies = ['a']
ohet.num_dummies = 1
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single_categorical(self):
"""Test the ``_transform`` with one category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch where it should
be a single column.
Input:
- Series with a single category
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet.dummies = ['a']
ohet.indexer = [0]
ohet.num_dummies = 1
ohet.dummy_encoded = True
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_zeros(self):
"""Test the ``_transform`` with unknown category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
where it should be a column of zeros.
Input:
- Series with unknown values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet.dummies = ['a']
ohet.num_dummies = 1
# Run
out = ohet._transform(pd.Series(['b', 'b', 'b']))
# Assert
expected = np.array([
[0],
[0],
[0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_zeros_categorical(self):
"""Test the ``_transform`` with unknown category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch where it should
be a column of zeros.
Input:
- Series with categorical and unknown values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet.dummies = ['a']
ohet.indexer = [0]
ohet.num_dummies = 1
ohet.dummy_encoded = True
# Run
out = ohet._transform(pd.Series(['b', 'b', 'b']))
# Assert
expected = np.array([
[0],
[0],
[0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_unknown_nan(self):
"""Test the ``_transform`` with unknown and nans.
This is an edge case for ``_transform`` where
unknowns should be zeros and nans should be
the last entry in the column.
Input:
- Series with unknown and nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet.dummies = ['a']
ohet.dummy_na = True
ohet.num_dummies = 1
# Run
out = ohet._transform(pd.Series(['b', 'b', np.nan]))
# Assert
expected = np.array([
[0, 0],
[0, 0],
[0, 1]
])
np.testing.assert_array_equal(out, expected)
def test_transform_no_nans(self):
"""Test the ``transform`` without nans.
In this test ``transform`` should return an identity
matrix representing each item in the input.
Input:
- Series with categorical values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet.fit(data)
# Run
out = ohet.transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test_transform_nans(self):
"""Test the ``transform`` with nans.
In this test ``transform`` should return an identity matrix
representing each item in the input as well as nans.
Input:
- Series with categorical values and nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', None])
ohet.fit(data)
# Run
out = ohet.transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test_transform_single(self):
"""Test the ``transform`` on a single category.
In this test ``transform`` should return a column
filled with ones.
Input:
- Series with a single categorical value
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet.fit(data)
# Run
out = ohet.transform(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test_transform_unknown(self):
"""Test the ``transform`` with unknown data.
In this test ``transform`` should raise an error
due to the attempt of transforming data with previously
unseen categories.
Input:
- Series with unknown categorical values
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a'])
ohet.fit(data)
# Assert
with np.testing.assert_raises(ValueError):
ohet.transform(['b'])
def test_transform_numeric(self):
"""Test the ``transform`` on numeric input.
In this test ``transform`` should return a matrix
representing each item in the input as one-hot encodings.
Input:
- Series with numeric input
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([1, 2])
ohet.fit(data)
expected = np.array([
[1, 0],
[0, 1],
])
# Run
out = ohet.transform(data)
# Assert
assert not ohet.dummy_encoded
np.testing.assert_array_equal(out, expected)
def test_reverse_transform_no_nans(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet.fit(data)
# Run
transformed = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
out = ohet.reverse_transform(transformed)
# Assert
expected = | pd.Series(['a', 'b', 'c']) | pandas.Series |
import os
import pathlib
import sys
import febrl_data_transform as transform
import pandas as pd
OUTPUT_DATA_DIR = pathlib.Path(__file__).parent / "holdout"
ORIGINALS_DATA_DIR = pathlib.Path(__file__).parent / "holdout" / "originals"
def main():
# Read in FEBRL data with dupes and separate into A/B/true links.
dataset_A = []
dataset_B = []
true_links = []
for filename in [
"febrl_holdout_dupes_light_mod.csv",
"febrl_holdout_dupes_medium_mod.csv",
"febrl_holdout_dupes_heavy_mod.csv",
]:
_df_A, _df_B, _df_true_links = transform.transform_febrl_dataset_with_dupes(
ORIGINALS_DATA_DIR / filename
)
dataset_A.append(_df_A)
dataset_B.append(_df_B)
true_links.append(_df_true_links)
df_A = pd.concat(dataset_A)
df_B = pd.concat(dataset_B)
df_true_links = | pd.concat(true_links) | pandas.concat |
import os
from os.path import join
import collections
import numpy as np
import pandas as pd
from itertools import chain
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.pipeline import make_union, Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import ShuffleSplit, GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.metrics import (mean_absolute_error, mean_squared_error,
explained_variance_score, r2_score)
from ukbb_variables import (brain_dmri_fa, brain_dmri_icvf,
brain_dmri_isovf, brain_dmri_l1,
brain_dmri_l2, brain_dmri_l3,
brain_dmri_md, brain_dmri_mo,
brain_dmri_od, brain_smri_plus,
fluid_intelligence,
earlylife, lifestyle, mental_health,
education, primary_demographics)
path_to_csv = '/storage/store/work/kdadi/rs_study/experiments/UKBB/ukb9543.csv'
path_to_matrices = '/storage/store/derivatives/UKBB/rfMRI_tangent_matrix_dim100/'
path_to_merge_brain = '/storage/store/work/kdadi/rs_study/experiments/UKBB/para/roadmap/ukb_add1_merge_brain.csv'
X_iterate = zip([brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1,
brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo,
brain_dmri_od, brain_smri_plus, fluid_intelligence,
earlylife, lifestyle, mental_health, primary_demographics, education],
['fa', 'icvf', 'isovf', 'l1', 'l2', 'l3', 'md', 'mo', 'od',
'smri', 'Fluid \n intelligence', 'Earlylife', 'Lifestyle',
'MH', 'Age', 'Education'])
columns = []
for i in X_iterate:
columns.extend(i[0].keys())
columns.extend(['eid'])
ukbb = pd.read_csv(path_to_csv, usecols=['20016-2.0', 'eid'])
y = ukbb[['eid', '20016-2.0']].dropna()
new_ukbb = pd.DataFrame(ukbb, index=y.index)
new_ukbb = new_ukbb.drop(columns=['20016-2.0'], errors='ignore')
# Random splitting of data to train our model
X_train, X_test, y_train, y_test = train_test_split(
new_ukbb, y, test_size=0.5, random_state=0)
merged_data = pd.read_csv(path_to_merge_brain, usecols=columns)
dmriDict = collections.OrderedDict(chain(brain_dmri_fa.items(),
brain_dmri_icvf.items(),
brain_dmri_isovf.items(),
brain_dmri_l1.items(),
brain_dmri_l2.items(),
brain_dmri_l3.items(),
brain_dmri_md.items(),
brain_dmri_mo.items(),
brain_dmri_od.items()))
dmriDict.update({'eid': 'eid'})
dmri = pd.DataFrame(merged_data, columns=dmriDict.keys())
dmri = dmri.dropna()
def load_combine_data(X_split, merged_data, dmri):
data_frame = []
connectomes = []
eids = []
for e_id in X_split.eid:
this_eid_data = merged_data[merged_data['eid'] == e_id]
this_path = os.path.join(
path_to_matrices, str(e_id) + '_20227_2_0.txt')
this_dmri_data = dmri[dmri['eid'] == e_id]
if not e_id == 3551399:
if os.path.exists(this_path) and not len(this_dmri_data) == 0:
eids.append(e_id)
data_frame.append(this_eid_data)
connectomes.append(np.loadtxt(this_path))
X_split = pd.concat(data_frame)
y_split = | pd.DataFrame(X_split, columns=['20016-2.0']) | pandas.DataFrame |
'''Runs program'''
import pandas as pd
import matplotlib.pyplot as plt
import create_data as cd
def prompt():
'''Prompts the user what information they want to see.
:returns: page URL
'''
page_list = {
"Marine Fish": "https://www.liveaquaria.com/divers-den/category/3/marine-fish",
"Freshwater": "https://www.liveaquaria.com/divers-den/category/11/freshwater",
"Betta Fish": "https://www.liveaquaria.com/divers-den/category/12/betta-fish",
"Inverts": "https://www.liveaquaria.com/divers-den/category/4/inverts",
"Maricultured Corals":
"https://www.liveaquaria.com/divers-den/category/6/maricultured-corals",
"Aquacultured Corals":
"https://www.liveaquaria.com/divers-den/category/5/aquacultured-corals",
"Soft Corals":
"https://www.liveaquaria.com/divers-den/category/7/polyp-mushroom-and-soft-corals",
"LPS Corals": "https://www.liveaquaria.com/divers-den/category/8/lps-corals",
"SPS Corals": "https://www.liveaquaria.com/divers-den/category/2/sps-corals",
"NPS Corals": "https://www.liveaquaria.com/divers-den/category/10/non-photosynthetic-nps",
"Deals": "https://www.liveaquaria.com/divers-den/category/9/deals-steals"
}
print("""Welcome to the LiveAquaria Scraper!
What information would you like to know about?
Please pick one from the following
\t- Marine Fish
\t- Freshwater
\t- Betta Fish
\t- Inverts
\t- Maricultured Corals
\t- Aquacultured Corals
\t- Soft Corals
\t- LPS Corals
\t- SPS Corals
\t- NPS Corals
\t- Deals
""")
not_answered = True
while not_answered:
answer = input("> ")
if answer in page_list:
return page_list[answer]
def display_data(names, price):
'''Creates and displays data graph
:names: List of item name
:price: Price of item
'''
data = {
'Name': names,
'Price': price
}
data_frame = | pd.DataFrame(data, columns=['Name', 'Price']) | pandas.DataFrame |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == | SparseDtype(np.bool) | pandas.core.sparse.api.SparseDtype |
import sys
import pandas as pd
import pandas_ta as ta
import investpy as iv
import numpy as np
from datetime import date, datetime
from calendar import monthrange
def find_days_in_month():
today = date.today()
first = today.replace(day=1)
last = today.replace(day=monthrange(today.year, today.month)[1])
return first, last
# def find_days_in_arange(*args):
def find_days_in_arange(day_range, interval='Daily'):
'''
Now only support Days and Weeks
-> handle Months | Years TypeError issue
* Daily: get when new day income
* Weekly: get when new week income
* Monthly: get when new month income
'''
interval_dict = {'Daily': 'D', 'Weekly': 'W'}
end = date.today()
start = np.datetime64(end) - \
np.timedelta64(day_range, interval_dict[interval])
start = np.datetime64(start).astype(datetime)
return start, end
def convert_date(date):
return date.strftime("%d/%m/%Y")
# Get data
def get_data(*inputs):
# start, end = find_days_in_month() # need changing this
quote, interval, day_range = inputs
start, end = find_days_in_arange(day_range, interval)
start, end = convert_date(start), convert_date(end) # can reuse
print(start, end)
# issue if current month not reach the end
df = iv.commodities.get_commodity_historical_data(
quote, start, end, interval=interval)
# print(df.tail(10))
# print(df)
df.to_csv(f"data\\{quote}_{interval}.csv")
return
# Load data
def load_data(quote, interval):
df = pd.read_csv(f"data\\{quote}_{interval}.csv")
df.drop('Currency', axis=1, inplace=True)
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
""" test get/set & misc """
import pytest
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas import (Series, DataFrame, MultiIndex,
Timestamp, Timedelta, Categorical)
from pandas.tseries.offsets import BDay
from pandas.compat import lrange, range
from pandas.util.testing import (assert_series_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestMisc(TestData):
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
@pytest.mark.parametrize(
'result_1, duplicate_item, expected_1',
[
[
pd.Series({1: 12, 2: [1, 2, 2, 3]}), pd.Series({1: 313}),
pd.Series({1: 12, }, dtype=object),
],
[
pd.Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
pd.Series({1: [1, 2, 3]}), pd.Series({1: [1, 2, 3], }),
],
])
def test_getitem_with_duplicates_indices(
self, result_1, duplicate_item, expected_1):
# GH 17610
result = result_1.append(duplicate_item)
expected = expected_1.append(duplicate_item)
assert_series_equal(result[1], expected)
assert result[2] == result_1[2]
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
pytest.raises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
assert s.iloc[0] == s['a']
s.iloc[0] = 5
tm.assert_almost_equal(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
def test_series_box_timestamp(self):
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng)
assert isinstance(ser[5], pd.Timestamp)
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng, index=rng)
assert isinstance(ser[5], pd.Timestamp)
assert isinstance(ser.iat[5], pd.Timestamp)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
pytest.raises(KeyError, s.loc.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
assert is_scalar(obj['c'])
assert obj['c'] == 0
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = s.loc[['foo', 'bar', 'bah', 'bam']]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
pytest.raises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
assert result == s.loc['A']
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.loc[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
assert self.series.index[9] not in numSlice.index
assert self.objSeries.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == self.series.index[11]
assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:])
# Test return view.
sl = self.series[10:20]
sl[:] = 0
assert (self.series[10:20] == 0).all()
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
assert np.isnan(self.ts[6])
assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
assert (series[::2] == 0).all()
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = self.ts.set_value(idx, 0)
assert res is self.ts
assert self.ts[idx] == 0
# equiv
s = self.series.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = s.set_value('foobar', 0)
assert res is s
assert res.index[-1] == 'foobar'
assert res['foobar'] == 0
s = self.series.copy()
s.loc['foobar'] = 0
assert s.index[-1] == 'foobar'
assert s['foobar'] == 0
def test_setslice(self):
sl = self.ts[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
pytest.raises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
pytest.raises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.loc[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
assert result == expected
result = s.iloc[0]
assert result == expected
result = s['a']
assert result == expected
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([ | pd.Timestamp('2016-01-01 00:00', tz=tz) | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 12 16:49:09 2021
@author: Administrator
"""
#this website is called macrotrends
#this script is designed to scrape its financial statements
#yahoo finance only contains the recent 5 year
#macrotrends can trace back to 2005 if applicable
import re
import json
import pandas as pd
import requests
import os
os.chdir('k:/')
#simply scrape
def scrape(url,**kwargs):
session=requests.Session()
session.headers.update(
{'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'})
response=session.get(url,**kwargs)
return response
#create dataframe
def etl(response):
#regex to find the data
num=re.findall('(?<=div\>\"\,)[0-9\.\"\:\-\, ]*',response.text)
text=re.findall('(?<=s\: \')\S+(?=\'\, freq)',response.text)
#convert text to dict via json
dicts=[json.loads('{'+i+'}') for i in num]
#create dataframe
df= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 08:06:31 2021
@author: bcamc
"""
#%% Import Packages
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition, inset_axes
from matplotlib.lines import Line2D
import pandas as pd
import numpy as np
import scipy
from scipy.stats.stats import pearsonr, spearmanr
import cartopy
import cartopy.crs as ccrs
from mpl_toolkits.axes_grid1 import make_axes_locatable
import xarray as xr
from sklearn.decomposition import IncrementalPCA
import os
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from datetime import datetime
from sklearn import linear_model
import datetime
import cartopy.io.shapereader as shpreader
from cartopy.feature import ShapelyFeature
import shapely.geometry as sgeom
from shapely.ops import unary_union
from shapely.prepared import prep
import joblib
from joblib import Parallel, delayed
from obspy.geodetics import kilometers2degrees, degrees2kilometers
import cmocean
import seaborn as sns
from tabulate import tabulate
# Progress bar package
from tqdm import tqdm
# Gibbs seawater properties packages
import gsw
# Import pre-built mapping functions
from SO_mapping_templates import haversine, South_1ax_map, South_1ax_flat_map
# Import function to calculate fluxes
from Fluxes import calculate_fluxes
# Import taylor diagram script
from taylorDiagram import TaylorDiagram
#%% Define directories
front_dir = 'C:/Users/bcamc/OneDrive/Desktop/Python/Projects/sulfur/southern_ocean/'
lana_dir = 'C:/Users/bcamc/OneDrive/Desktop/Python/Projects/sulfur/southern_ocean/dmsclimatology/'
jarnikova_dir = 'C:/Users/bcamc/OneDrive/Desktop/Python/Projects/sulfur/southern_ocean/Jarnikova_SO_files/'
#%% Set working directories
dir_ = 'C:\\Users\\bcamc\\OneDrive\\Desktop\\Python\\Projects\\sulfur\\southern_ocean\\Scripts'
if os.getcwd() != dir_:
os.chdir(dir_)
#%% Read in data (optional)
export_dir = 'C:/Users/bcamc/OneDrive/Desktop/Python/Projects/sulfur/southern_ocean/export_data/'
models_combined = pd.read_csv(export_dir+'models_combined.csv').set_index(['datetime','latbins','lonbins']).squeeze('columns')
X_full_plus = pd.read_csv(export_dir+'X_full_plus.csv').set_index(['datetime','latbins','lonbins'])
# ANN_y_pred = pd.read_csv(export_dir+'ANN_y_pred.csv').set_index(['datetime','latbins','lonbins']).squeeze('columns')
# RFR_y_pred = pd.read_csv(export_dir+'RFR_y_pred.csv').set_index(['datetime','latbins','lonbins']).squeeze('columns')
# y = pd.read_csv(export_dir+'y.csv').set_index(['datetime','latbins','lonbins']).squeeze('columns')
# X = pd.read_csv(export_dir+'X.csv').set_index(['datetime','latbins','lonbins'])
# X_full = X_full_plus.drop(['dSSHA','currents','SRD'],axis=1)
#%% Post-processing
# ***** Load in models/data using "SO_DMS_build_models.py" *****
# for plotting
reordered_months = np.array([10.,11.,12.,1.,2.,3.,4.])
# Average predictions
RFR_y_pred_mean = np.sinh(RFR_y_pred).groupby(['latbins','lonbins']).mean()
ANN_y_pred_mean = np.sinh(ANN_y_pred).groupby(['latbins','lonbins']).mean()
# calculate Si*
Si_star = (X_full.loc[:,'Si']-X_full.loc[:,'SSN']).squeeze()
X_full_plus['Si_star'] = Si_star
#------------------------------------------------------------------------------
# Import ACC front locations
front_data = xr.open_dataset(front_dir+'Park_durand_fronts.nc')
fronts = dict()
to_bin = lambda x: np.round(x /grid) * grid
#------------------------------------------------------------------------------
# NB front
fronts['NB'] = pd.DataFrame(np.stack([front_data.LatNB.values,
front_data.LonNB.values,
np.ones(front_data.LonNB.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['NB'] = fronts['NB'].sort_values('lonbins')
fronts['NB']['latbins'] = fronts['NB']['latbins'].map(to_bin).round(3)
fronts['NB']['lonbins'] = fronts['NB']['lonbins'].map(to_bin).round(3)
fronts['NB'] = fronts['NB'].set_index(['latbins','lonbins']).squeeze()
fronts['NB'] = fronts['NB'][~fronts['NB'].index.duplicated(keep='first')]
# fronts['NB'] = fronts['NB'].reindex_like(models_combined.loc[1])
#------------------------------------------------------------------------------
# SAF front
fronts['SAF'] = pd.DataFrame(np.stack([front_data.LatSAF.values,
front_data.LonSAF.values,
np.ones(front_data.LonSAF.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['SAF'] = fronts['SAF'].sort_values('lonbins')
fronts['SAF']['latbins'] = fronts['SAF']['latbins'].map(to_bin).round(3)
fronts['SAF']['lonbins'] = fronts['SAF']['lonbins'].map(to_bin).round(3)
fronts['SAF'] = fronts['SAF'].set_index(['latbins','lonbins']).squeeze()
fronts['SAF'] = fronts['SAF'][~fronts['SAF'].index.duplicated(keep='first')]
# fronts['SAF'] = fronts['SAF'].reindex_like(models_combined.loc[1])
#------------------------------------------------------------------------------
# PF front
fronts['PF'] = pd.DataFrame(np.stack([front_data.LatPF.values,
front_data.LonPF.values,
np.ones(front_data.LonPF.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['PF'] = fronts['PF'].sort_values('lonbins')
fronts['PF']['latbins'] = fronts['PF']['latbins'].map(to_bin).round(3)
fronts['PF']['lonbins'] = fronts['PF']['lonbins'].map(to_bin).round(3)
fronts['PF'] = fronts['PF'].set_index(['latbins','lonbins']).squeeze()
fronts['PF'] = fronts['PF'][~fronts['PF'].index.duplicated(keep='first')]
# fronts['PF'] = fronts['PF'].reindex_like(models_combined.loc[1])
#------------------------------------------------------------------------------
# SACCF front
fronts['SACCF'] = pd.DataFrame(np.stack([front_data.LatSACCF.values,
front_data.LonSACCF.values,
np.ones(front_data.LonSACCF.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['SACCF'] = fronts['SACCF'].sort_values('lonbins')
fronts['SACCF']['latbins'] = fronts['SACCF']['latbins'].map(to_bin).round(3)
fronts['SACCF']['lonbins'] = fronts['SACCF']['lonbins'].map(to_bin).round(3)
fronts['SACCF'] = fronts['SACCF'].set_index(['latbins','lonbins']).squeeze()
fronts['SACCF'] = fronts['SACCF'][~fronts['SACCF'].index.duplicated(keep='first')]
# fronts['SACCF'] = fronts['SACCF'].reindex_like(models_combined.loc[1])
#------------------------------------------------------------------------------
# SB front
fronts['SB'] = pd.DataFrame(np.stack([front_data.LatSB.values,
front_data.LonSB.values,
np.ones(front_data.LonSB.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['SB'] = fronts['SB'].sort_values('lonbins')
fronts['SB']['latbins'] = fronts['SB']['latbins'].map(to_bin).round(3)
fronts['SB']['lonbins'] = fronts['SB']['lonbins'].map(to_bin).round(3)
fronts['SB'] = fronts['SB'].set_index(['latbins','lonbins']).squeeze()
fronts['SB'] = fronts['SB'][~fronts['SB'].index.duplicated(keep='first')]
# fronts['SB'] = fronts['SB'].reindex_like(models_combined.loc[1])
# front_data.close(); del front_data
#------------------------------------------------------------------------------
SA = gsw.SA_from_SP(SP=X_full.loc[:,'SAL'].values, p=1, lon=X_full.index.get_level_values('lonbins').values, lat=X_full.index.get_level_values('latbins').values)
CT = gsw.CT_from_t(SA=SA, t=X_full.loc[:,'SST'].values, p=1)
density = gsw.density.rho(SA=SA,CT=CT,p=1)
density = pd.Series(density, index=X_full.loc[:,'chl'].index)
#%% Model Sea-Air Fluxes
#-----------------------------------------------------------------------------
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# ===================
# RFR Model
# ===================
#-----------------------------------------------------------------------------
# Fluxes (umol m^-2 d^-1):
RFR_flux = dict()
k_dms, RFR_flux['GM12'] = calculate_fluxes(data=np.sinh(RFR_y_pred).values,
ice_cover=X_full.loc[:,'ice'].values,
wind_speed=X_full.loc[:,'wind'].values,
T=X_full.loc[:,'SST'].values,
parameterization='GM12')
_, RFR_flux['SD02'] = calculate_fluxes(data=np.sinh(RFR_y_pred).values,
ice_cover=X_full.loc[:,'ice'].values,
wind_speed=X_full.loc[:,'wind'].values,
T=X_full.loc[:,'SST'].values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
RFR_flux['GM12'] = pd.Series(RFR_flux['GM12'], index=X_full.loc[:,'SST'].index, name='DMS flux')
# filter out negative estimates
RFR_flux['GM12'] = RFR_flux['GM12'][(RFR_flux['GM12'] >= 0) & (RFR_flux['GM12'].notna())].reindex_like(RFR_y_pred)
RFR_flux['SD02'] = pd.Series(RFR_flux['SD02'], index=X_full.loc[:,'SST'].index, name='DMS flux')
#-----------------------------------------------------------------------------
# ===================
# ANN Model
# ===================
#-----------------------------------------------------------------------------
ANN_flux = dict()
_, ANN_flux['GM12'] = calculate_fluxes(data=np.sinh(ANN_y_pred).values,
ice_cover=X_full.loc[:,'ice'].values,
wind_speed=X_full.loc[:,'wind'].values,
T=X_full.loc[:,'SST'].values,
parameterization='GM12')
_, ANN_flux['SD02'] = calculate_fluxes(data=np.sinh(ANN_y_pred).values,
ice_cover=X_full.loc[:,'ice'].values,
wind_speed=X_full.loc[:,'wind'].values,
T=X_full.loc[:,'SST'].values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
ANN_flux['GM12'] = pd.Series(ANN_flux['GM12'], index=X_full.loc[:,'SST'].index, name='DMS flux')
# filter out negative estimates
ANN_flux['GM12'] = ANN_flux['GM12'][(ANN_flux['GM12'] >= 0) & (ANN_flux['GM12'].notna())].reindex_like(ANN_y_pred)
ANN_flux['SD02'] = pd.Series(ANN_flux['SD02'], index=X_full.loc[:,'SST'].index, name='DMS flux')
#-----------------------------------------------------------------------------
# ===================
# Actual
# ===================
#-----------------------------------------------------------------------------
obs_flux = dict()
_, obs_flux['GM12'] = calculate_fluxes(data=np.sinh(y).values,
ice_cover=X.loc[:,'ice'].values,
wind_speed=X.loc[:,'wind'].values,
T=X.loc[:,'SST'].values,
parameterization='GM12')
_, obs_flux['SD02'] = calculate_fluxes(data=np.sinh(y).values,
ice_cover=X.loc[:,'ice'].values,
wind_speed=X.loc[:,'wind'].values,
T=X.loc[:,'SST'].values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
obs_flux['GM12'] = pd.Series(obs_flux['GM12'], index=X.loc[:,'SST'].index, name='DMS flux')
# filter out negative estimates
obs_flux['GM12'] = obs_flux['GM12'][(obs_flux['GM12'] >= 0) & (obs_flux['GM12'].notna())].reindex_like(y)
obs_flux['SD02'] = pd.Series(obs_flux['SD02'], index=X.loc[:,'SST'].index, name='DMS flux')
#-----------------------------------------------------------------------------
# ===================
# Regional Fluxes
# ===================
#-----------------------------------------------------------------------------
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Constants:
A = ((max_lat-min_lat)*111*1000)*((max_lon-min_lon)*111*1000) # total regional area
A_ocean = A*frac_ocean # fraction of total area covered by ocean
S_mol_mass = 32.06 # molar mass of sulfur
num_days = np.sum(np.array([31,30,31,31,28,31,30])) # number of total days in the dataset
#-----------------------------------------------------------------------------
# Regional modelled flux (convert to Tg over total days)
RFR_flux_reg = (RFR_flux['GM12']*S_mol_mass*A_ocean*num_days)/(1e6*1e12)
ANN_flux_reg = (ANN_flux['GM12']*S_mol_mass*A_ocean*num_days)/(1e6*1e12)
obs_flux_reg = (obs_flux['GM12']*S_mol_mass*A_ocean*num_days)/(1e6*1e12)
fluxes_combined = pd.concat([RFR_flux['GM12'], ANN_flux['GM12']], axis=1).mean(axis=1)
#%% Lana Climatology Sea-air Fluxes
files = os.listdir(lana_dir)
# Set 1x1o coords
lana_coords = dict()
lana_coords['lat'] = pd.Series(np.arange(-89,91,1), name='latbins')
lana_coords['lon'] = pd.Series(np.arange(-179,181,1), name='lonbins')
time_match = {'OCT':10,'NOV':11,'DEC':12,'JAN':1,'FEB':2,'MAR':3,'APR':4}
# Retrive DMS climatology values, adding lats/lons to dataframes
lana_clim = []
for file in files:
frame = pd.DataFrame(np.flipud(pd.read_csv(lana_dir+file, header=None)),
index=lana_coords['lat'], columns=lana_coords['lon'])
frame = frame.stack(dropna=False)
frame = frame.reset_index()
frame['datetime'] = np.tile(float(time_match[file.split('.')[0][-3:]]), len(frame))
frame = frame.set_index(['datetime','latbins','lonbins']).squeeze()
frame.name = 'DMS'
lana_clim.append(frame)
lana_clim = pd.concat(lana_clim)
# Regrid variables to compute sea-air fluxes
lana = dict()
for var in ['wind','ice','SST']:
lana[var] = X_full.loc[:,var].copy()
lana[var] = lana[var].reset_index()
lana[var] = lana[var].rename(columns={'lonbins':'lon','latbins':'lat'})
# regrid to nearest degree (i.e. 1x1o grid)
lana[var]['latbins'] = lana[var].lat.round(0).astype('int32')
lana[var]['lonbins'] = lana[var].lon.round(0).astype('int32')
lana[var] = lana[var].set_index(['datetime','latbins','lonbins'])
lana[var] = lana[var].drop(columns=['lat','lon'])
lana[var] = lana[var].groupby(['datetime','latbins','lonbins']).mean().squeeze()
lana[var] = lana[var].sort_index().reindex_like(lana_clim)
print(var+' regrid complete')
# Compute sea-air flux
#-----------------------------------------------------------------------------
lana_flux = dict()
_, lana_flux['GM12'] = calculate_fluxes(data=lana_clim.values,
ice_cover=lana['ice'].values,
wind_speed=lana['wind'].values,
T=lana['SST'].values,
parameterization='GM12')
_, lana_flux['SD02'] = calculate_fluxes(data=lana_clim.values,
ice_cover=lana['ice'].values,
wind_speed=lana['wind'].values,
T=lana['SST'].values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
lana_flux['GM12'] = pd.Series(lana_flux['GM12'], index=lana['SST'].index, name='DMS flux')
# filter out negative estimates
lana_flux['GM12'] = lana_flux['GM12'][(lana_flux['GM12'] >= 0) & (lana_flux['GM12'].notna())].reindex_like(lana_clim)
lana_flux['SD02'] = pd.Series(lana_flux['SD02'], index=lana['SST'].index, name='DMS flux')
#-----------------------------------------------------------------------------
del frame
#%% Jarnikova Climatology Sea-air Fluxes
# This climatology is from Dec to Feb (Jarnikova & Tortell, 2016)
mat = scipy.io.loadmat(jarnikova_dir+'nov26product.mat')
tj_dms = mat['structname'][0,1]['barnessmooth'][0,0]
tj_lats = mat['structname'][0,1]['latvec'][0,0][0,:]
tj_lons = mat['structname'][0,1]['lonvec'][0,0][0,:]
jarnikova_clim = pd.DataFrame(tj_dms, index=tj_lats, columns=tj_lons)
jarnikova_clim.index = jarnikova_clim.index.rename('latbins')
jarnikova_clim.columns = jarnikova_clim.columns.rename('lonbins')
jarnikova_clim = jarnikova_clim.stack()
# Reindex like lana et al. climatology
jarnikova_clim = jarnikova_clim.reindex_like(lana_clim.loc[[12,1,2]].groupby(['latbins','lonbins']).mean())
# Calculate the fluxes
#-----------------------------------------------------------------------------
jarnikova_flux = dict()
_, jarnikova_flux['GM12'] = calculate_fluxes(data=jarnikova_clim,
ice_cover=lana['ice'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean(),
wind_speed=lana['wind'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean(),
T=lana['SST'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean(),
parameterization='GM12')
_, jarnikova_flux['SD02'] = calculate_fluxes(data=jarnikova_clim.values,
ice_cover=lana['ice'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().values,
wind_speed=lana['wind'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().values,
T=lana['SST'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
jarnikova_flux['GM12'] = pd.Series(jarnikova_flux['GM12'], index=lana['SST'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().index, name='DMS flux')
# filter out negative estimates
jarnikova_flux['GM12'] = jarnikova_flux['GM12'][(jarnikova_flux['GM12'] >= 0) & (jarnikova_flux['GM12'].notna())].reindex_like(jarnikova_clim)
jarnikova_flux['SD02'] = pd.Series(jarnikova_flux['SD02'], index=lana['SST'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().index, name='DMS flux')
#-----------------------------------------------------------------------------
del mat
#%% Compute KDEs for fluxes
def KDE(y):
"""
A modifed wrapper function pulled from the Pandas source code
(https://github.com/pandas-dev/pandas/blob/0.21.x/pandas/plotting/_core.py#L1381-L1430)
that returns the kernel density estimates of a Pandas Series/sliced DataFrame
using scipy's gaussian_kde function. It is efficient like the pandas native
plotting function (because it only fits a subset of only 1000 points from the
distribution) but it returns the actual values instead of an axes handle.
Parameters
----------
y : Series or sliced Dataframe
Input data.
Returns
-------
evals : Series or Dataframe
col1: Fitted indices (1000 samples between data max/min bounds);
col2: evaluated kernel density estimates at each indice.
"""
from scipy.stats import gaussian_kde
y = y.dropna()
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range, 1000)
kde = gaussian_kde(y.dropna())
vals = kde.evaluate(ind)
evals = pd.concat([pd.Series(ind, name='ind'), pd.Series(vals, name='kde')],axis=1)
return evals
# Function speeds up computation, but its still faster to load up the data
# rather than rerun the function:
if first_run is True:
# Calculate the KDEs
lana_kde = KDE(lana_flux['GM12'])
jarnikova_kde = KDE(jarnikova_flux['GM12'])
RFR_kde = KDE(RFR_flux['GM12'])
RFR_kde_3mon = KDE(RFR_flux['GM12'].loc[[12,1,2],:,:])
ANN_kde = KDE(ANN_flux['GM12'])
ANN_kde_3mon = KDE(ANN_flux['GM12'].loc[[12,1,2],:,:])
# Write each to a csv files
lana_kde.to_csv(write_dir[:-14]+'lana_kde.csv')
jarnikova_kde.to_csv(write_dir[:-14]+'jarnikova_kde.csv')
RFR_kde.to_csv(write_dir[:-14]+'RFR_kde.csv')
RFR_kde_3mon.to_csv(write_dir[:-14]+'RFR_kde_3mon.csv')
ANN_kde.to_csv(write_dir[:-14]+'ANN_kde.csv')
ANN_kde_3mon.to_csv(write_dir[:-14]+'ANN_kde_3mon.csv')
else:
# load up the csv files
lana_kde = pd.read_csv(write_dir[:-14]+'lana_kde.csv')
jarnikova_kde = pd.read_csv(write_dir[:-14]+'jarnikova_kde.csv')
RFR_kde = pd.read_csv(write_dir[:-14]+'RFR_kde.csv')
RFR_kde_3mon = | pd.read_csv(write_dir[:-14]+'RFR_kde_3mon.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.model_selection import ParameterGrid
from itertools import product
from explore.ContCat import ContCat
def data_iter():
np.random.seed(2342)
n = 30
# 2 classes
cont = np.random.normal(size=n)
cat = np.random.choice([0, 1], size=n).astype(str)
for cont, cat in format_iter(cont, cat):
yield cont, cat
# 3 classes
cont = np.random.normal(size=n)
cat = np.random.choice([0, 1, 3], size=n).astype(str)
for cont, cat in format_iter(cont, cat):
yield cont, cat
def format_iter(cont, cat):
"""
Iterates over various formats
"""
yield cont, cat
cont = | pd.Series(cont) | pandas.Series |
#Creates temperature mean from Tmin and Tmax average
import sys
import numpy as np
import pandas as pd
import rasterio
from osgeo import gdal
from affine import Affine
from pyproj import Transformer
#NAMING SETTINGS & OUTPUT FLAGS----------------------------------------------#
MASTER_DIR = r'/home/hawaii_climate_products_container/preliminary/'
CODE_MASTER_DIR = MASTER_DIR + r'air_temp/daily/code/'
RUN_MASTER_DIR = MASTER_DIR + r'air_temp/data_outputs/'
MAP_OUTPUT_DIR = RUN_MASTER_DIR + r'tiffs/daily/county/' #Set subdirectories based on varname and iCode
SE_OUTPUT_DIR = RUN_MASTER_DIR + r'tiffs/daily/county/'
CV_OUTPUT_DIR = RUN_MASTER_DIR + r'tables/loocv/daily/county/'
TIFF_SUFFIX = '.tif'
SE_SUFFIX = '_se.tif'
CV_SUFFIX = '_loocv.csv'
NO_DATA_VAL = -9999
#END SETTINGS----------------------------------------------------------------#
#FUNCTION DEFINITION---------------------------------------------------------#
def get_Coordinates(GeoTiff_name):
# Read raster
with rasterio.open(GeoTiff_name) as r:
T0 = r.transform # upper-left pixel corner affine transform
A = r.read() # pixel values
# All rows and columns
cols, rows = np.meshgrid(np.arange(A.shape[2]), np.arange(A.shape[1]))
# Get affine transform for pixel centres
T1 = T0 * Affine.translation(0.5, 0.5)
# Function to convert pixel row/column index (from 0) to easting/northing
# at centre
def rc2en(r, c): return T1 * (c, r)
# All eastings and northings (there is probably a faster way to do this)
eastings, northings = np.vectorize(
rc2en, otypes=[
float, float])(
rows, cols)
transformer = Transformer.from_proj(
'EPSG:4326',
'+proj=longlat +datum=WGS84 +no_defs +type=crs',
always_xy=True,
skip_equivalent=True)
LON, LAT = transformer.transform(eastings, northings)
return LON, LAT
def get_island_df(tiff_name,varname):
lon,lat = get_Coordinates(tiff_name)
lon = lon.reshape(-1)
lat = lat.reshape(-1)
df_dict = {'LON':lon,'LAT':lat}
raster_img = rasterio.open(tiff_name)
raster_data = raster_img.read(1)
raster_mask = raster_img.read_masks(1)
raster_mask[raster_mask > 0] = 1
masked_array = raster_data * raster_mask
masked_array[raster_mask == 0] = np.nan
masked_array = masked_array.reshape(-1)
df_dict[varname] = masked_array
island_df = pd.DataFrame.from_dict(df_dict)
shape = raster_data.shape
return island_df, shape
def output_tiff(data,base_tiff_name,out_tiff_name,tiff_shape):
cols,rows = tiff_shape
ds = gdal.Open(base_tiff_name)
driver = gdal.GetDriverByName("GTiff")
outdata = driver.Create(out_tiff_name, rows, cols, 1, gdal.GDT_Float32)
# sets same geotransform as input
outdata.SetGeoTransform(ds.GetGeoTransform())
outdata.SetProjection(ds.GetProjection()) # sets same projection as input
outdata.GetRasterBand(1).WriteArray(data.reshape(tiff_shape))
# if you want these values (in the mask) transparent
outdata.GetRasterBand(1).SetNoDataValue(NO_DATA_VAL)
outdata.FlushCache() # saves to disk!!
outdata = None
band = None
ds = None
#END FUNCTIONS---------------------------------------------------------------#
#Open raster files for Tmin and Tmax
#Merge standard error as well
#"Main functions"
def generate_county_mean(iCode,date_str,datadir=None):
date_tail = ''.join(date_str.split('-'))
iCode = iCode.upper()
min_varname = 'Tmin'
max_varname = 'Tmax'
mean_varname = 'Tmean'
#[SET_DIR]
if datadir == None:
TMIN_TIFF_DIR = MAP_OUTPUT_DIR + min_varname + '/' + iCode + '/'
TMAX_TIFF_DIR = MAP_OUTPUT_DIR + max_varname + '/' + iCode + '/'
TMEAN_TIFF_DIR = MAP_OUTPUT_DIR + mean_varname + '/' + iCode + '/'
else:
TMIN_TIFF_DIR = datadir[0]
TMAX_TIFF_DIR = datadir[1]
TMEAN_TIFF_DIR = datadir[2]
#Set tiff filename
tmin_tiff_name = TMIN_TIFF_DIR + '_'.join((min_varname,'map',iCode,date_tail)) + TIFF_SUFFIX
tmax_tiff_name = TMAX_TIFF_DIR + '_'.join((max_varname,'map',iCode,date_tail)) + TIFF_SUFFIX
tmean_tiff_name = TMEAN_TIFF_DIR + '_'.join((mean_varname,'map',iCode,date_tail)) + TIFF_SUFFIX
#Open raster files and convert to dataframes
min_df, shape = get_island_df(tmin_tiff_name,min_varname)
max_df, shape = get_island_df(tmax_tiff_name,max_varname)
#Merge them so that we have Tmin and Tmax in same data frame only at matching lat-lons
#(Although lat-lons should not have any mismatch)
merged_df = min_df.merge(max_df,how='inner',on=['LON','LAT'])
#Take the mean of Tmin and Tmax columns, convert into dataframe
tmean = merged_df[[min_varname,max_varname]].mean(axis=1).values
tmean = np.round(tmean,1)
tmean[np.isnan(tmean)] = NO_DATA_VAL
#Output to new tiff
output_tiff(tmean,tmin_tiff_name,tmean_tiff_name,shape)
def generate_se_mean(iCode,date_str,datadir=None):
#Setting file names
date_tail = ''.join(date_str.split('-'))
iCode = iCode.upper()
min_varname = 'Tmin'
max_varname = 'Tmax'
mean_varname = 'Tmean'
#[SET_DIR]
if datadir == None:
TMIN_TIFF_DIR = MAP_OUTPUT_DIR + min_varname + '_se/' + iCode + '/'
TMAX_TIFF_DIR = MAP_OUTPUT_DIR + max_varname + '_se/' + iCode + '/'
TMEAN_TIFF_DIR = MAP_OUTPUT_DIR + mean_varname + '_se/' + iCode + '/'
else:
TMIN_TIFF_DIR = datadir[0]
TMAX_TIFF_DIR = datadir[1]
TMEAN_TIFF_DIR = datadir[2]
se_min_tiff_name = TMIN_TIFF_DIR + '_'.join((min_varname,'map',iCode,date_tail)) + SE_SUFFIX
se_max_tiff_name = TMAX_TIFF_DIR + '_'.join((max_varname,'map',iCode,date_tail)) + SE_SUFFIX
se_mean_tiff_name = TMEAN_TIFF_DIR + '_'.join((mean_varname,'map',iCode,date_tail)) + SE_SUFFIX
#Reading tiff data and getting statistic values
se_min_df, tiff_shape = get_island_df(se_min_tiff_name,min_varname)
se_max_df, tiff_shape = get_island_df(se_max_tiff_name,max_varname)
#Create an array from the combined standard deviations
#Square each and divide by sample size (? what is the sample size?)
se_min2 = se_min_df[min_varname]**2
se_max2 = se_max_df[max_varname]**2
combined = np.sqrt((se_min2) + (se_max2)) / 2.0
combined = np.round(combined,1)
combined[np.isnan(combined)] = NO_DATA_VAL
output_tiff(combined.values,se_min_tiff_name,se_mean_tiff_name,tiff_shape)
# This code has been automatically covnerted to comply with the pep8 convention
# This the Linux command:
# $ autopep8 --in-place --aggressive <filename>.py
if __name__ == '__main__':
iCode = str(sys.argv[1]) # 'bi'
#main_dir = sys.argv[2] #Parent dir, assuming standard temp file tree
date_range = str(sys.argv[2]) #YYYYMMDD_start-YYYYMMDD_end
date_range = date_range.split('-')
st_date = | pd.to_datetime(date_range[0],format='%Y%m%d') | pandas.to_datetime |
__author__ = "<NAME>"
__version__ = ".2"
import pandas as pd
import numpy as np
from datetime import datetime
from dateutil.relativedelta import relativedelta
class MetricsFunctions:
def average_los_in_es_shelter(self, entries_df, cleaned=False):
"""
Used For:
:param entries_df:
:return:
"""
stays = entries_df[
(
entries_df["Entry Exit Provider Id"].str.contains("Hansen") |
entries_df["Entry Exit Provider Id"].str.contains("Columbia") |
entries_df["Entry Exit Provider Id"].str.contains("Willamette") |
entries_df["Entry Exit Provider Id"].str.contains("SOS") |
entries_df["Entry Exit Provider Id"].str.contains("5th")
)
]
stays["Entry Date"] = pd.to_datetime(stays["Entry Exit Entry Date"]).dt.date
stays["Exit Date"] = pd.to_datetime(stays["Entry Exit Exit Date"]).dt.date
stays["Exit Date"].fillna(value=datetime(year=2017, month=9, day=30, hour=23, minute=59, second=59))
stays["LOS"] = (stays["Exit Date"] - stays["Entry Date"]).dt.days
filtered_stays = stays[["Client Uid", "Entry Date", "Exit Date", "LOS"]]
total_days = filtered_stays["LOS"].sum()
total_stays = len(filtered_stays.index)
un_cleaned_mean = filtered_stays["LOS"].mean()
cleaned_los_data = filtered_stays[
np.abs(filtered_stays.LOS - filtered_stays.LOS.mean()) <= (3 * filtered_stays.LOS.std(ddof=0))
]
cleaned_mean = cleaned_los_data.mean()
if cleaned:
return tuple(["Length of stay per emergency shelter resident (days)", "", cleaned_mean])
else:
return tuple(["Length of stay per emergency shelter resident (days)", "", un_cleaned_mean])
def average_los_in_res_shelter(self, entries_df, cleaned=False):
"""
Used For: Agency
:param entries_df: data frame of all entries report
:param cleaned: if cleaned == True method will return the mean excluding outliers, which are defined as 3
standard deviations from the norm
:return:
"""
stays = entries_df[
(
entries_df["Entry Exit Provider Id"].str.contains("Doreen's") |
entries_df["Entry Exit Provider Id"].str.contains("Clark Center") |
entries_df["Entry Exit Provider Id"].str.contains("Jean's")
)
]
stays["Entry Date"] = pd.to_datetime(stays["Entry Exit Entry Date"]).dt.date
stays["Exit Date"] = pd.to_datetime(stays["Entry Exit Exit Date"]).dt.date
stays["Exit Date"].fillna(value=datetime(year=2017, month=9, day=30, hour=23, minute=59, second=59))
stays["LOS"] = (stays["Exit Date"] - stays["Entry Date"]).dt.days
filtered_stays = stays[["Client Uid", "Entry Date", "Exit Date", "LOS"]]
total_days = filtered_stays["LOS"].sum()
total_stays = len(filtered_stays.index)
un_cleaned_mean = filtered_stays["LOS"].mean()
cleaned_los_data = filtered_stays[
np.abs(filtered_stays.LOS - filtered_stays.LOS.mean()) <= (3 * filtered_stays.LOS.std())
]
cleaned_mean = cleaned_los_data.mean()
if cleaned:
return tuple(["Length of stay per residential shelter resident (days)", "", cleaned_mean])
else:
return tuple(["Length of stay per residential shelter resident (days)", "", un_cleaned_mean])
def calculate_average_wait_list_length(self, waitlist_df, waitlist="Men"):
"""
Used by: Agency
Used for: LOS on Men's Waitlist; LOS on Women's Waitlist
:param waitlist: This should be either 'Men' or 'Women'
:param waitlist_df:
:return:If there are no values that result from a removal of outlier items (3 standard deviations from the mean)
then a basic mean will be returned other wise the return value will exclude outliers.
"""
def check_list(waitlist, data_frame=waitlist_df):
if waitlist == "Men":
specific_wait_list = data_frame[data_frame["Waitlist Name"].str.contains(waitlist)]
return specific_wait_list
elif waitlist == "Women":
specific_wait_list = data_frame[
(
data_frame["Waitlist Name"].str.contains(waitlist) |
data_frame["Waitlist Name"].str.contains("Jean's")
)
]
return specific_wait_list
specified = check_list(waitlist, waitlist_df.dropna(axis=0, subset=["Waitlist Name"]))
specified["Event Date"] = pd.to_datetime(specified["Waitlist Event Date"])
filtered = specified[["ClientID", "Event Date", "Waitlist Event Code"]]
filtered["q_start"] = datetime(year=2017, month=10, day=1, hour=0, minute=0, second=0)
filtered["q_end"] = datetime(year=2017, month=12, day=31, hour=23, minute=59, second=59)
in_shelter = filtered[
(
filtered["Waitlist Event Code"].str.contains("IN") &
(filtered["Event Date"] >= filtered["q_start"]) &
(filtered["Event Date"] <= filtered["q_end"])
)
]
in_shelter["In Date"] = in_shelter["Event Date"]
in_shelter_clean = in_shelter[["ClientID", "In Date"]]
new_to_list = filtered[
(
filtered["Waitlist Event Code"].str.contains("NEW") &
filtered["ClientID"].isin(in_shelter["ClientID"])
)
]
new_to_list_sorted = new_to_list.sort_values(by="Event Date").drop_duplicates(subset="ClientID", keep="first")
new_to_list_clean = new_to_list_sorted[["ClientID", "Event Date"]]
merged = in_shelter_clean.merge(new_to_list_clean, on="ClientID", how="left")
merged["time_on_list"] = (merged["In Date"] - merged["Event Date"]).dt.days
all_values_mean = merged.time_on_list.mean()
clean_mean = merged[
np.abs(merged.time_on_list - merged.time_on_list.mean()) <= (3 * merged.time_on_list.std())
].time_on_list.mean()
if clean_mean:
return tuple([
"Length of {}'s shelter waitlist (in days)".format(waitlist),
"",
"{} Days".format(clean_mean)
])
else:
return tuple([
"Length of {}'s shelter waitlist (in days)".format(waitlist),
"",
"{} Days".format(all_values_mean)
])
def count_access_employment_services(self, services_df, staff_on_team, direct=False):
"""
prior to 7/1/2017 = ["<NAME>(7792)"]
from 7/1/2017 - 10/1/2017 = ["<NAME>(8044)", "<NAME>(7792)"]
post 10/1/2017 = ["<NAME>(7727)", "<NAME>(8044)", "<NAME>(7792)"]
Numbers from this report for the quarters run separately then merged and de-duplicated are not matching numbers
run for multiple quarters. This is strange and seems to indicate an error somewhere. Set unit tests and
investigate.
Currently, due to staffing changes during the previous quarter I am running this for multiple quarters using the
direct=True parameter, assigning the output for each quarter's data to a different variable. I am then merging
the variables on the Client Uid column as an 'outer' merge, de-duplicating, and returning the len of the
resulting data frame's index.
:param services_df:
:param staff_on_team:
:return:
"""
if direct == True and type(staff_on_team) == list:
return services_df[
services_df["Service User Creating"].isin(staff_on_team) &
services_df["Service Provide Provider"].str.contains("Support")
].drop_duplicates(subset="Client Uid")
elif direct == True and type(staff_on_team) == str:
return services_df[
services_df["Service User Creating"].str.contains(staff_on_team) &
services_df["Service Provide Provider"].str.contains("Support")
].drop_duplicates(subset="Client Uid")
elif type(staff_on_team) == list:
served = services_df[
services_df["Service User Creating"].isin(staff_on_team) &
services_df["Service Provide Provider"].str.contains("Support")
].drop_duplicates(subset="Client Uid")
return tuple(["600 participants will access employment services", 600, len(served.index)])
elif type(staff_on_team) == str:
served = services_df[
(services_df["Service User Creating"] == staff_on_team) &
services_df["Service Provide Provider"].str.contains("Support")
].drop_duplicates(subset="Client Uid")
return tuple(["600 participants will access employment services", 600, len(served.index)])
else:
return tuple(["Error", "Error", "Error"])
def count_employment_services(self, employment_df, start_date, end_date, metric):
"""
Currently this method uses some rather flawed data for reporting individuals served. Instead please used the
count_access_employment_services method to report on this metric.
The end date should be the last day of the quarter or reporting period.
:param employment_df: A data-frame made from the employment tracker access datasheet available on tprojects.info
on the support services page.
:param start_date: date string using the standard U.S. mm/dd/YYYY format
:param end_date: date string using the standard U.S. mm/dd/YYYY format
:return: An integer indidcating the number of unique individuals served by the employment a
"""
start = datetime.strptime(start_date, "%m/%d/%Y")
end = datetime.strptime(end_date, "%m/%d/%Y")
employment_df["Start Date"] = start
employment_df["End Date"] = end
if metric.lower() == "served":
in_period_access = employment_df[
(
(employment_df["Start Date"] <= employment_df["Created"]) &
(employment_df["Created"] <= employment_df["End Date"])
) |
(
employment_df["Date Income Changed"].notna() &
(
(employment_df["Start Date"] <= employment_df["Date Income Changed"]) &
(employment_df["Date Income Changed"] <= employment_df["End Date"])
)
)
]
served = len(in_period_access.drop_duplicates(subset="PT ID").index)
return tuple(["600 participants will access employment services", 600, served])
elif metric.lower() == "employment":
in_period_gained = employment_df[
employment_df["Employment Gained"].notna() &
(
(employment_df["Start Date"] <= employment_df["Employment Gained"]) &
(employment_df["Employment Gained"] <= employment_df["End Date"])
)
]
gained = len(in_period_gained.drop_duplicates(subset="PT ID").index)
return tuple(["","", gained])
elif metric.lower() == "income":
in_period_gained = employment_df[
employment_df["Date Income Changed"].notna() &
(
(employment_df["Start Date"] <= employment_df["Employment Gained"]) &
(employment_df["Date Income Changed"] <= employment_df["End Date"])
)
]
gained = len(in_period_gained.drop_duplicates(subset="PT ID").index)
return tuple(["15% of participants will increase their incomes ****", "15%", gained])
else:
return tuple(["Error", "Error", "Error"])
def count_employment_services_by_provider(self, employment_df, entries_df, provider):
entries = entries_df[
entries_df["Entry Exit Provider Id"].str.contains(provider)
].drop_duplicates(subset="Client Uid")
employment = employment_df[["PT ID", "Employment Gained", "Date Income Changed", "Employment Lost"]]
merged = entries.merge(employment, left_on="Client Uid", right_on="PT ID", how="left")
cleaned = merged[
(
merged["Employment Gained"].notna() &
(merged["Entry Exit Entry Date"] <= merged["Employment Gained"]) &
(merged["Employment Gained"] <= merged["Entry Exit Exit Date"])
) |
(
merged["Date Income Changed"].notna() &
(merged["Entry Exit Entry Date"] <= merged["Date Income Changed"]) &
(merged["Date Income Changed"] <= merged["Entry Exit Exit Date"])
)
]
return tuple([
"",
"",
"{} / {} = {}%".format(
len(cleaned.index), len(entries.index), 100 * (len(cleaned.index) / len(entries.index))
)
])
def count_all_ep(self, placements_df):
"""
Used by: Agency
:param placements_df:
:return:
"""
ep_placements = placements_df[
placements_df["Intervention Type (TPI)(8745)"] == "Eviction Prevention"
]
de_duplicated_ep = len(ep_placements.drop_duplicates(subset="Client Uid").index)
return tuple(["124 participants will have their evictions prevented", 124, de_duplicated_ep])
def count_all_pp(self, placements_df):
"""
Used by: Agency
:param placements_df:
:return:
"""
pp_placements = placements_df[
placements_df["Intervention Type (TPI)(8745)"].notnull &
placements_df["Intervention Type (TPI)(8745)"].str.contains("Permanent")
]
de_duplicated_pp = len(pp_placements.drop_duplicates(subset="Client Uid").index)
return tuple(["1,065 participants will be permanently housed*", 1065, de_duplicated_pp])
def count_all_placed(self, placements_df):
"""
:param placements_df:
:return:
"""
return placements_df.drop_duplicates(subset="Client Uid")
def count_all_placed_by_provider(
self,
placements_df,
provider=["ACCESS", "SSVF - TPI", "Retention", "Residential CM"]
):
"""
Used By: ACCESS, SSVF, Retention, Residential CM
:param placements_df:
:param provider:
:return:
"""
de_duplicated = placements_df[
placements_df["Department Placed From(3076)"].isin(provider)
].drop_duplicates(subset="Client Uid")
return len(de_duplicated.index)
def count_entries_by_provider(self, entries_df, provider):
"""
Used By:
:param entries_df:
:param provider:
:return:
"""
if type(provider) == str:
count = len(entries_df[
entries_df["Entry Exit Provider Id"].str.contains(provider)
].drop_duplicates(subset="Client Uid", keep="first").index)
if provider == "Residential":
return tuple(["Engage 900 participants in case management", 900, count])
elif provider == "Retention":
pass
elif provider == "SSVF":
pass
elif provider == "ACCESS":
return tuple(["Engage 1200 participants in case management", 1200, count])
elif provider.lower() == "columbia":
return tuple(["700 unduplicated participants have a safe place to sleep", 700, count])
elif provider.lower() == "wil":
return tuple(["1000 unduplicated participants have a safe place to sleep", 1000, count])
elif provider.lower() == "5th":
return tuple(["700 unduplicated participants have a safe place to sleep", 700, count])
elif provider.lower() == "han":
return tuple(["1000 unduplicated participants have a safe place to sleep", 1000, count])
elif provider.lower() == "sos":
return tuple(["700 unduplicated participants have a safe place to sleep", 700, count])
elif (provider.lower() == "clark center") or (provider.lower() == "doreen's"):
return tuple([
"550 unduplicated participants have a safe place to sleep at {}".format(provider),
550,
count
])
elif provider.lower() == "jean's place":
return tuple([
"350 unduplicated participants have a safe place to sleep at Jean's Place",
350,
count
])
elif type(provider) == list:
full_provider_name = {
"cc": "Transition Projects (TPI) - Clark Center - SP(25)",
"col": "Transition Projects (TPI) - Columbia Shelter(5857)",
"dp": "Transition Projects (TPI) - Doreen's Place - SP(28)",
"h": "Transition Projects (TPI) - Hansen Emergency Shelter - SP(5588)",
"jp": "Transition Projects (TPI) - Jean's Place L1 - SP(29)",
"sos": "Transition Projects (TPI) - SOS Shelter(2712)",
"dpgpd": "Transition Projects (TPI) - VA Grant Per Diem (inc. Doreen's Place GPD) - SP(3189)",
"wc": "Transition Projects (TPI) - Willamette Center(5764)",
"access": "Transition Projects (TPI) - ACCESS - CM(5471)",
"res": "Transition Projects (TPI) - Residential - CM(5473)",
"ret": "Transition Projects (TPI) - Retention - CM(5472)",
"ca": "Transition Projects (TPI) Housing - Clark Annex PSH - SP(2858)",
"cagpd": "Transition Projects (TPI) Housing - Clark Annex GPD - SP(4259)"
}
provider_list = []
for dept in provider:
name = full_provider_name[dept.lower()]
provider_list.append(name)
count = len(entries_df[
entries_df["Entry Exit Provider Id"].isin(provider_list)
].drop_duplicates(subset="Client Uid", keep="first").index)
return tuple([
"1850 unduplicated participants in all emergency shelters will have a safe place to sleep",
1850,
count
])
def count_exit_destination_by_shelter_group(self, entries_df, shelter_group):
low_barrier = [
"Transition Projects (TPI) - SOS Shelter(2712)",
"Transition Projects (TPI) - Willamette Center(5764)",
"Transition Projects (TPI) - Columbia Shelter(5857)",
"Transition Projects (TPI) - 5th Avenue Shelter(6281)",
"Transition Projects (TPI) - Hansen Emergency Shelter - SP(5588)",
]
residential = [
"Transition Projects(TPI) - Clark Center - SP(25)",
"Transition Projects(TPI) - VA Grant Per Diem(inc.Doreen's Place GPD) - SP(3189)",
"Transition Projects(TPI) - Jean's Place L1 - SP(29)",
"Transition Projects(TPI) - Doreen's Place - SP(28)",
"Transition Projects(TPI) - Jean’s Place VA Grant Per Diem(GPD) - SP(3362)"
]
perm_destination = [
"Owned by client, no ongoing housing subsidy (HUD)",
"Owned by client, with ongoing housing subsidy (HUD)",
"Permanent housing for formerly homeless persons (HUD)",
"Rental by client, no ongoing housing subsidy (HUD)",
"Rental by client, with other ongoing housing subsidy (HUD)",
"Rental by client, with VASH subsidy (HUD)",
"Staying or living with family, permanent tenure (HUD)",
"Staying or living with friends, permanent tenure (HUD)",
"Foster care home or foster care group home (HUD)",
"Rental by client, with GPD TIP subsidy (HUD)",
"Permanent housing (other than RRH) for formerly homeless persons (HUD)",
"Moved from one HOPWA funded project to HOPWA PH (HUD)",
"Long-term care facility or nursing home (HUD)",
"Residential project or halfway house with no homeless criteria (HUD)"
]
temp_destination = [
# "Emergency shelter, including hotel or motel paid for with emergency shelter voucher (HUD)",
"Hospital or other residential non-psychiatric medical facility (HUD)",
"Hotel or motel paid for without emergency shelter voucher (HUD)",
"Jail, prison or juvenile detention facility (HUD)",
"Staying or living with family, temporary tenure (e.g., room, apartment or house)(HUD)",
"Staying or living with friends, temporary tenure (e.g., room apartment or house)(HUD)",
"Transitional housing for homeless persons (including homeless youth) (HUD)",
"Moved from one HOPWA funded project to HOPWA TH (HUD)",
"Substance abuse treatment facility or detox center (HUD)",
"Psychiatric hospital or other psychiatric facility (HUD)"
]
if shelter_group.lower() == "res":
perm = entries_df[
entries_df["Entry Exit Provider Id"].isin(residential) &
entries_df["Entry Exit Destination"].notna() &
entries_df["Entry Exit Destination"].isin(perm_destination)
].drop_duplicates(subset="Client Uid")
temp = entries_df[
entries_df["Entry Exit Provider Id"].isin(residential) &
entries_df["Entry Exit Destination"].notna() &
entries_df["Entry Exit Destination"].isin(temp_destination)
].drop_duplicates(subset="Client Uid")
all = entries_df[
entries_df["Entry Exit Provider Id"].isin(residential) &
entries_df["Entry Exit Exit Date"].notna()
].drop_duplicates(subset="Client Uid")
return (len(perm.index), len(temp.index), len(all.index))
elif shelter_group.lower() == "low":
perm = entries_df[
entries_df["Entry Exit Provider Id"].isin(low_barrier) &
entries_df["Entry Exit Destination"].notna() &
entries_df["Entry Exit Destination"].isin(perm_destination)
].drop_duplicates(subset="Client Uid")
temp = entries_df[
entries_df["Entry Exit Provider Id"].isin(low_barrier) &
entries_df["Entry Exit Destination"].notna() &
entries_df["Entry Exit Destination"].isin(temp_destination)
].drop_duplicates(subset="Client Uid")
all = entries_df[
entries_df["Entry Exit Provider Id"].isin(low_barrier) &
entries_df["Entry Exit Exit Date"].notna()
].drop_duplicates(subset="Client Uid")
return (len(perm.index), len(temp.index), len(all.index))
else:
pass
def count_households_screened(self, entries_hh_df):
"""
Used By:
:param entries_hh_df:
:return:
"""
screened = len(entries_hh_df[
entries_hh_df["Entry Exit Provider Id"].str.contains("Screening")
].drop_duplicates(subset="Household Uid", keep="first").index)
return tuple(["Screen 784 veteran families for services", 784, screened])
def count_hygiene_services_by_provider(self, services_df, provider="Day Center"):
"""
Use for: Agency, Day Center
Question: participants will receive hygiene services
Warning: Do not update the services_1 list to include the newer service code description of
"Personal Goods/Services" as this will cause the module to return a count including non-hygiene services that
are sharing this same service code description
:return: a count of unique participants receiving any hygiene service
"""
services_1 = [
"Bathing Facilities",
"Personal/Grooming Supplies",
"Hairdressing/Nail Care"
]
services_2 = [
"Shower",
"Showers",
"Laundry Supplies",
"Clothing",
"Hairdressing/Nail Care",
"Personal Grooming Supplies"
]
if provider == "Day Center":
services = services_df[
((services_df["Service Code Description"].isin(services_1)) | (
services_df["Service Provider Specific Code"].isin(services_2))) &
services_df["Service Provide Provider"].str.contains(provider)
]
services_provided = services[
services["Service Provide Provider"].str.contains("Day Center")
]
return tuple(["40,000 hygiene services provided", 40000, len(services_provided.index)])
elif provider == "Agency":
services = services_df[
((services_df["Service Code Description"].isin(services_1)) | (
services_df["Service Provider Specific Code"].isin(services_2)))
]
de_duped = services.drop_duplicates(subset="Client Uid", inplace=False)
return tuple(["7,500 participants will receive hygiene services", 7500, len(de_duped.index)])
def count_id_assistance_by_provider(self, services_df, provider="Day Center"):
"""
Used by: Day Center
:param services_df:
:param provider:
:return:
"""
id_assistance = ["Birth Certificate", "Driver's License/State ID Card"]
served = services_df[
services_df["Service Provide Provider"].str.contains(provider) & services_df[
"Service Provider Specific Code"].isin(id_assistance)
].drop_duplicates(subset="Client Uid")
return tuple(["1500 individuals received assistance obtaining ID documents", 1500, len(served.index)])
def count_mailing_services_by_day_center(self, services_df):
"""
Used For: Day Center
:param services_df:
:return:
"""
mail_services = len(services_df[
services_df["Service Code Description"] == "Temporary Mailing Address"
].index)
return tuple(["43,000 mailing services provided", 43000, mail_services])
def count_ongoing_cm_services(self, services_df):
"""
Used For: Agency
Issues: Currently, regardless of the method name, this method counts unique individuals served with a CM service
ignoring department. This could cause false counts when the wrong service is selected by a non-cm provider and
does not truly show ongoing services as defined by the agency data dictionary.
:param services_df:
:return:
"""
cm_services = [
"Case Management - Office Visit",
"Case Management - Other",
"Case Management - Phone Meeting",
"Case Management - Home Visit",
"Case Management Meeting - Home Visit",
"Case Management Meeting - Office Visit",
"Case Management Meeting - Phone"
]
receiving = len(services_df[
services_df["Service Provider Specific Code"].isin(cm_services)
].drop_duplicates(subset="Client Uid", inplace=False).index)
return tuple(["2,100 participants served through case management", 2100, receiving])
def count_ongoing_cm_services_by_department(self, services_df, provider="SSVF"):
"""
Used For: Residential CM, Retention, SSVF
Question: Provide ongoing case management to X participants
:param services_df:
:param provider:
:return:
"""
cm_services = [
"Case Management - Office Visit",
"Case Management - Other",
"Case Management - Phone Meeting",
"Case Management - Home Visit",
"Case Management Meeting - Home Visit",
"Case Management Meeting - Office Visit",
"Case Management Meeting - Phone"
]
in_provider = services_df[
services_df["Service Provide Provider"].str.contains(provider)
]
receiving = in_provider[
in_provider["Service Provider Specific Code"].isin(cm_services)
]
ongoing = receiving.groupby(by="Client Uid").count()
output = len(ongoing[ongoing["Service Provider Specific Code"] >= 2].index)
if provider == "Residential":
return tuple(["Provide ongoing case management to 700 participants", 700, output])
elif provider == "Retention":
return tuple(["Provide ongoing case management to 800 participants", 800, output])
elif provider == "SSVF":
return tuple(["Provide ongoing case management to 450 participants", 450, output])
elif provider == "ACCESS":
return tuple(["Provide ongoing case management to 800 participants", 800, output])
else:
return tuple(["PROVIDERERROR", "PROVIDERERROR", "PROVIDERERROR"])
def count_perm_by_provider(self, placements_df, provider=["ACCESS", "SSVF - TPI", "Retention", "Residential CM"]):
"""
Used For: Outreach, SSVF, Retention CM, Residential CM
:param placements_df:
:param provider:
:return:
"""
placements = len(placements_df[
(placements_df["Department Placed From(3076)"].isin(provider)) & (
placements_df["Intervention Type (TPI)(8745)"] == "Permanent Placement")
].drop(
[
"Client First Name",
"Client Last Name",
"Department Placed From(3076)",
"Placement Case Manager(3075)",
"Placement Grant(8743)",
"Reporting Program (TPI)(8748)"
],
axis=1
).index)
if (len(provider) == 1) and (provider[0] == "ACCESS"):
return tuple(["415 participants move into permanent housing", 415, placements])
elif (len(provider) == 1) and (provider[0] == "SSVF - TPI"):
return tuple(["262 veteran families will move into permanent housing", 262, placements])
else:
return "Error: Empty Provider List"
def count_pts_with_barrier_mitigation_and_doc_prep(self, services_df, provider="CHAT"):
"""
Used by: Coordinated Access (CHAT)
:param services_df: Use the standard services spread sheet.
:param provider:
:return:
"""
services = [
"Housing Barrier Resolution",
"Birth Certificate",
"DD214",
"Driver's License/State ID Card",
"General Form Assistance",
"Notary Service"
]
served = len(services_df[
services_df["Service Provide Provider"].str.contains(provider) &
services_df["Service Provider Specific Code"].isin(services)
].drop_duplicates(subset="Client Uid", inplace=False).index)
return tuple([
"Provide barrier mitigation and document prep to 150 individuals",
150,
served
])
def count_ep_by_provider(self, placements_df, provider=["ACCESS", "SSVF - TPI", "Retention", "Residential CM"]):
"""
Used For: Outreach, SSVF, Retention CM, Residential CM
:param placements_df:
:param provider:
:return:
"""
placements = placements_df[
(placements_df["Department Placed From(3076)"].isin(provider)) & (
placements_df["Intervention Type (TPI)(8745)"] == "Eviction Prevention")
].drop(
[
"Client First Name",
"Client Last Name",
"Department Placed From(3076)",
"Placement Case Manager(3075)",
"Placement Grant(8743)",
"Reporting Program (TPI)(8748)"
],
axis=1
)
if (len(provider) == 1) and (provider[0] == "SSVF - TPI"):
return tuple(["56 veteran families will have evictions prevented", 56, len(placements.index)])
else:
return tuple(["", "", len(placements.index)])
def count_exclusions_by_provider(self, exclusions_df, provider):
exclusions = exclusions_df[
exclusions_df["Infraction Provider"].str.contains(provider) &
exclusions_df["Infraction Banned Code"].notna() &
(exclusions_df["Infraction Banned Code"] != "Warning") &
(exclusions_df["Infraction Banned Code"] != "Safety Alert") &
(exclusions_df["Infraction Banned Code"] != "Other")
]
return tuple([
"20% reduction in participant exclusions",
"20%",
"{} <--- Must be divided by last quarter's numbers -(last quarter/ this quarter)".format(
len(exclusions.index)
)
])
def count_latinos_served_by_provider(self, services_df, provider="Wellness Access"):
"""
Used For: Wellness Access
Use: Standard All Services Report
:param services_df:
:param provider:
:return:
"""
original = services_df
cleaned = original[
(
original["Service Provide Provider"].str.contains(provider) &
(original["Ethnicity (Hispanic/Latino)(896)"].str.contains("Hispanic/Latino"))
)
].drop_duplicates(subset="Client Uid")
return tuple(["80 Latino participants outreached to per year", 80, len(cleaned.index)])
def count_legal_barriers_mitigated(self, entries_df, services_df, provider):
"""
Used For: SSVF
Needs to be modified to look at the version of the all services report which includes needs outcomes.
:param cm_provider:
:param services_df:
:param entries_df:
:return: a count of the participants
"""
no_na = entries_df.dropna(axis=0, subset=["Entry Exit Provider Id"])
entries = no_na[no_na["Entry Exit Provider Id"].str.contains(provider)]
in_provider_list = entries["Client Uid"].tolist()
legal_services = len(services_df[
(
(services_df["Service Code Description"] == "Legal Services") &
(services_df["Client Uid"].isin(in_provider_list))
)
].drop_duplicates(subset="Client Uid").index)
return tuple(["50 veteran families will have legal barriers mitigated", 50, legal_services])
def count_poc_placed(self, placements_df, services_df):
"""
Used For: Agency
:param placements_df:
:return:
"""
poc_placements = len(placements_df[
placements_df["Client Uid"].isin(self.return_poc_list(services_df))
].drop_duplicates(subset="Client Uid").index)
return poc_placements
def count_poc_placed_by_provider(
self,
placements_df,
services_df,
provider=["ACCESS", "SSVF - TPI", "Retention", "Residential CM"]
):
"""
Used For:
:param placements_df:
:param services_df:
:param provider:
:return:
"""
poc_placements = placements_df[
(
(placements_df["Client Uid"].isin(self.return_poc_list(services_df))) &
(placements_df["Department Placed From(3076)"].isin(provider))
)
]
return len(poc_placements.drop_duplicates(subset="Client Uid").index)
def count_provider(self, entries_df, cm_provider, goal):
"""
Used For: Agency
:param entries_df:
:param cm_provider:
:param goal:
:return:
"""
provider_ee = len(entries_df[
entries_df["Entry Exit Provider Id"].str.contains(cm_provider)
].drop_duplicates(subset="Client Uid", keep="first").index)
return tuple(["{} participants served by {}".format(goal, cm_provider), goal, provider_ee])
def count_referrals_resulting_in_connections(self, services_df, needs_df, provider, referrals, metric):
"""
Used by: Wellness Access
Method of the method: First remove rows from the services data frame which were not created by the provider and
were not in the list of referral services.
Then, remove rows from the needs data frame where the Client Uid is not in the Client Uid column of the services
data frame and the need status is not fully met.
Merge left the needs data frame into the services data frame using Client Uid from both data frames as well as
Need Creation Date from the need data frame (right) and the service creation data from the services data frame
(left). This will be done using the pd.merge method and entering the columns as lists of strings in the
left_on and right_on params.
medical_referrals = ["Referral - Eye Care", "Referral - Dental Care", "Referral - Medical Care"]
mh_referrals = [
"Referral - A&D Support", "Referral - DV Support", "Referral - Mental Health Care", "Referral - MH Support"
]
:param provider:
:param services_df:
:param needs_df:
:param referrals: provide a list of strings or this will return an error
:param metric: enter one of the following strings - 'med count', 'mh sud count', 'percent med', 'percent mh sud'
:return:
"""
services = services_df[
services_df["Service Provide Provider"].str.contains(provider) &
services_df["Service Provider Specific Code"].isin(referrals)
]
needs = needs_df[
needs_df["Client Uid"].isin(services["Client Uid"].tolist()) &
(needs_df["Need Status"] == "Closed") &
(needs_df["Need Outcome"] == "Fully Met")
]
served = pd.merge(
services,
needs,
how="left",
left_on=["Client Uid", "Service Provide Start Date"],
right_on=["Client Uid", "Need Date Set"]
)
if metric == "med count":
return tuple([
"200 connections to medical care per year",
200,
len(served.index)
])
elif metric == "mh sud count":
return tuple([
"700 connections to mental health or SUD services per year",
700,
len(served.index)
])
elif metric == "percent med":
all = len(served.index)
clean = served.dropna(axis=0, how="any", subset=["Need Uid"])
success = len(clean.index)
return tuple([
"50% of referrals result in connection to medical care provider",
"50%",
"{}/{} = {}%".format(success, all, 100*(success/all))
])
elif metric == "percent mh sud":
all = len(served.index)
success = len(served.dropna(axis=0, how="any", subset=["Need Uid"]).index)
return tuple([
"50% of referrals result in connection to mental health and/or SUD services",
"50%",
"{}/{} = {}%".format(success, all, 100 * (success / all))
])
else:
return "Param Error: metric's value was not among the list of used values"
def count_rent_assist(self, services_df):
"""
Use by: Agency
Question: participants will receive rent assistance
:return: a count of unique participants receiving any rent assistance service
"""
rent_services = [
"Rent Payment Assistance",
"Rental Application Fee Payment Assistance",
"Rental Deposit Assistance"
]
rent_service_2 = [
"Application Fee",
"Arrears / Property Debt",
"Deposit",
"Rent Payment Assistance"
]
services = services_df[
(services_df["Service Code Description"].isin(rent_services)) | (
services_df["Service Provider Specific Code"].isin(rent_service_2))
]
unique = len(services.drop_duplicates(
subset="Client Uid",
keep="first",
inplace=False
).index)
return tuple(["900 participants will receive rent assistance", 900, unique])
def count_rent_well(self, services_df, type):
"""
Used by: Agency, RentWell
This method will identify the participants who either had at least a
single service by TPI RentWell or a graduation service during the
reporting period.
:param services_df:
:return:
"""
if type.lower() == "attendance":
attended = services_df[
services_df["Service Provider Specific Code"].notna() &
(
services_df["Service Provider Specific Code"].str.contains("RentWell - Attendence") |
services_df["Service Provider Specific Code"].str.contains("RentWell - Graduation")
)
].drop_duplicates(subset="Client Uid")
return tuple(["400 participants will enroll in Rent Well ", 400, len(attended.index)])
elif type.lower() == "graduation":
graduated = services_df[
services_df["Service Provider Specific Code"].notna() &
services_df["Service Provider Specific Code"].str.contains("RentWell - Graduation")
].drop_duplicates(subset="Client Uid")
return tuple(["240 Participants will graduate RentWell", 240, len(graduated.index)])
elif type.lower() == "services":
services = services_df[
services_df["Service Provider Specific Code"].notna() &
(
services_df["Service Provider Specific Code"].str.contains("RentWell - Attendence") |
services_df["Service Provider Specific Code"].str.contains("RentWell - Graduation")
)
]
return tuple(["", "", len(services.index)])
elif type.lower() == "all served":
attended = services_df[
services_df["Service Provider Specific Code"].notna() &
(
services_df["Service Provider Specific Code"].str.contains("RentWell - Attendence") |
services_df["Service Provider Specific Code"].str.contains("RentWell - Graduation")
)
].drop_duplicates(subset="Client Uid")
return attend[["Client Uid", "Service Provide Start Date"]]
elif type.lower() == "all graduates":
graduated = services_df[
services_df["Service Provider Specific Code"].notna() &
services_df["Service Provider Specific Code"].str.contains("RentWell - Graduation")
].drop_duplicates(subset="Client Uid")
return graduated[["Client Uid", "Service Provide Start Date"]]
else:
return tuple(["ERROR", "ERROR", "ERROR"])
def count_retention_by_length(self, retention_df, length, provider="agency"):
"""
Used by: Agency
For this to work you need to add a Months Post Subsidy column to the follow ups report. The column must be filled down with the following formula:
=IF(ISBLANK(B2),"",DATEDIF(B2,C2,"M"))
:param length: int: 1-12
:return:
"""
no_na = retention_df.dropna(axis=0, subset=["Months Post Subsidy"])
fu_of_duration = no_na[
(
(no_na["Months Post Subsidy"] >= length - 1) &
(no_na["Months Post Subsidy"] <= length + 1)
)
]
fu_positive = len(fu_of_duration[fu_of_duration["Is Client Still in Housing?(2519)"] == "Yes (HUD)"].index)
# print(len(retention_df.index), len(no_na.index), fu_positive, len(fu_of_duration.index))
if provider.lower() == "agency":
return tuple([
"80% participants retain their housing for 12 months post-subsidy*",
"80%",
"{}/{} = {}%".format(
fu_positive,
len(fu_of_duration.index),
100 * (fu_positive / len(fu_of_duration.index))
)
])
elif provider.lower() == "ssvf":
return tuple([
"90% of participants will remain in housing 12 months after entering housing",
"90%",
"{}/{}={}".format(fu_positive, len(fu_of_duration.index), 100*(fu_positive/len(fu_of_duration.index)))
])
else:
if len(fu_of_duration.index) > 0:
return tuple([
"80% participants retain their housing for 12 months post-subsidy*",
"80%",
"{}/{} = {}%".format(
fu_positive,
len(fu_of_duration.index),
100 * (fu_positive / len(fu_of_duration.index))
)
])
else:
return tuple([
"80% participants retain their housing for 12 months post-subsidy*",
"80%",
"{}/{} = {}%".format(
fu_positive,
len(fu_of_duration.index),
"N/A"
)
])
def count_served_by_provider(self, services_df, provider=""):
"""
Used For: Day Center
:param services_df:
:param provider:
:return:
"""
served = len(services_df[
services_df["Service Provide Provider"].str.contains(provider)
].drop_duplicates(subset="Client Uid").index)
if provider != "Day Center":
return served
else:
return tuple([
"7000 unduplicated participants received services through the day center*",
7000,
served
])
def count_services_by_provider(self, services_df, provider):
"""
USed For: Day Center
:param services_df:
:param provider:
:return:
"""
services = len(services_df[services_df["Service Provide Provider"].str.contains(provider)].index)
return tuple(["85000 total services in the {}".format(provider), 85000, services])
def count_shelter_stays(self, entries_df, agency=True):
"""
Used For: Agency, Residential Shelters
:param entries_df:
:return:
"""
if agency:
entries = entries_df[
(entries_df["Entry Exit Provider Id"].str.contains("Clark Center")) | (
entries_df["Entry Exit Provider Id"].str.contains("Jean's Place L1")) | (
entries_df["Entry Exit Provider Id"].str.contains("Doreen's")) | (
entries_df["Entry Exit Provider Id"].str.contains("SOS")) | (
entries_df["Entry Exit Provider Id"].str.contains("Hansen")) |(
entries_df["Entry Exit Provider Id"].str.contains("Peace 2")) | (
entries_df["Entry Exit Provider Id"].str.contains("Columbia")) | (
entries_df["Entry Exit Provider Id"].str.contains("Willamette")) | (
entries_df["Entry Exit Provider Id"].str.contains("Maher")) | (
entries_df["Entry Exit Provider Id"].str.contains("5th")) | (
entries_df["Entry Exit Provider Id"].str.contains("Clark Annex"))
]
de_duped = len(entries.drop_duplicates(subset="Client Uid", inplace=False).index)
return tuple(["2,850 participants will have a safe place to sleep at night*", 2850, de_duped])
else:
entries = entries_df[
(entries_df["Entry Exit Provider Id"].str.contains("Clark Center")) | (
entries_df["Entry Exit Provider Id"].str.contains("Doreen's")) | (
entries_df["Entry Exit Provider Id"].str.contains("Jean's Place L1"))
]
de_duped = len(entries.drop_duplicates(subset="Client Uid", inplace=False).index)
return tuple(["1,000 participants will have a safe place to sleep", 1000, de_duped])
def count_shelter_to_perm_w_group(self, entries_df, services_df, low_barrier=True):
"""
Used For: Strategic Initiative
Current Issue: need to check for zero in denominator of output and return 0 instead of attempting illegal
division
:param entries_df:
:param services_df:
:param low_barrier:
:return:
"""
perm_destination = [
"Owned by client, no ongoing housing subsidy (HUD)",
"Owned by client, with ongoing housing subsidy (HUD)",
"Permanent housing for formerly homeless persons (HUD)",
"Rental by client, no ongoing housing subsidy (HUD)",
"Rental by client, with other ongoing housing subsidy (HUD)",
"Rental by client, with VASH subsidy (HUD)",
"Staying or living with family, permanent tenure (HUD)",
"Staying or living with friends, permanent tenure (HUD)",
"Foster care home or foster care group home (HUD)",
"Rental by client, with GPD TIP subsidy (HUD)",
"Permanent housing (other than RRH) for formerly homeless persons (HUD)",
"Moved from one HOPWA funded project to HOPWA PH (HUD)",
"Long-term care facility or nursing home (HUD)",
"Residential project or halfway house with no homeless criteria (HUD)"
]
if low_barrier:
attended = self.percent_low_barrier_in_groups(entries_df, services_df, True, False)
else:
attended = self.percent_low_barrier_in_groups(entries_df, services_df, False, False)
exited = entries_df[
entries_df["Client Uid"].isin(attended) &
entries_df["Entry Exit Exit Date"].notnull()
]
perm = exited[exited["Entry Exit Destination"].isin(perm_destination)]
if low_barrier and len(exited.index) > 0:
return tuple([
"10% increase in housing placements for participants who attend groups",
"10%",
"{}/{} = {}% for current quarter. Please subtract from number from previous quarter".format(
len(perm.index),
len(exited.index),
100*(len(perm.index)/len(exited.index))
)
])
elif len(exited.index) == 0:
return tuple([
"10% increase in housing placements for participants who attend groups",
"10%",
"Error: Denominator == 0"
])
else:
return tuple([
"10% increase in service-intensive shelter placements for participants who attend groups",
"10%",
"{}/{} = {}% for current quarter. Please subtract from number from previous quarter".format(
len(perm.index),
len(exited.index),
100 * (len(perm.index) / len(exited.index))
)
])
def count_transportation_passes_by_provider(self, services_df, provider="Day Center"):
"""
Used For: Day Center
:param services_df:
:param provider:
:return:
"""
served = services_df[
services_df["Service Provide Provider"].str.contains(provider) & (
services_df["Service Code Description"] == "Transportation Passes")
].drop_duplicates(subset="Client Uid")
return tuple(["1300 individuals received local transit passes", 1300, len(served.index)])
def days_from_id_to_placement(self, placements_df, entries_df, cm_provider, placement_provider):
"""
Used For:
:param entries_df:
:param placements_df:
:param cm_provider: just use the short name for the provider
:param placement_provider: Use one of the following options Retention: ACCESS, Residential CM, Clark Center,
SSVF - TPI, Doreen's Place, Hansen, Willamette Center
:return: the mean of the days from id to placement column
"""
clean_placements = placements_df.dropna(axis=0, how="any", subset=["Department Placed From(3076)"])
placement_df = clean_placements[clean_placements["Department Placed From(3076)"].isin(placement_provider)]
entry_dates = entries_df[entries_df["Entry Exit Provider Id"].str.contains(cm_provider)]
pd.to_datetime(placement_df["Placement Date(3072)"])
pd.to_datetime(entry_dates["Entry Exit Entry Date"])
entry_dates = entry_dates[["Client Uid", "Entry Exit Entry Date"]]
merged = pd.merge(placement_df, entry_dates, on="Client Uid", how="left")
merged["Days from ID to Placement"] = placement_df["Placement Date(3072)"] - merged["Entry Exit Entry Date"]
return tuple([
"Number of days from identification to placement <90*",
"<90",
merged["Days from ID to Placement"].mean().days
])
def exit_destination_by_shelter_type(self, entries_df, shelter_group):
low_barrier_shelters = [
"Transition Projects (TPI) - Willamette Center(5764)",
"Transition Projects (TPI) - Hansen Emergency Shelter - SP(5588)",
"Transition Projects (TPI) - Columbia Shelter(5857)",
"Transition Projects (TPI) - SOS Shelter(2712)",
"Transition Projects (TPI) - 5th Avenue Shelter(6281)"
]
perm_destination = [
"Owned by client, no ongoing housing subsidy (HUD)",
"Owned by client, with ongoing housing subsidy (HUD)",
"Permanent housing for formerly homeless persons (HUD)",
"Rental by client, no ongoing housing subsidy (HUD)",
"Rental by client, with other ongoing housing subsidy (HUD)",
"Rental by client, with VASH subsidy (HUD)",
"Staying or living with family, permanent tenure (HUD)",
"Staying or living with friends, permanent tenure (HUD)",
"Foster care home or foster care group home (HUD)",
"Rental by client, with GPD TIP subsidy (HUD)",
"Permanent housing (other than RRH) for formerly homeless persons (HUD)",
"Moved from one HOPWA funded project to HOPWA PH (HUD)",
"Long-term care facility or nursing home (HUD)",
"Residential project or halfway house with no homeless criteria (HUD)"
]
temp_destination = [
# "Emergency shelter, including hotel or motel paid for with emergency shelter voucher (HUD)",
"Hospital or other residential non-psychiatric medical facility (HUD)",
"Hotel or motel paid for without emergency shelter voucher (HUD)",
"Jail, prison or juvenile detention facility (HUD)",
"Staying or living with family, temporary tenure (e.g., room, apartment or house)(HUD)",
"Staying or living with friends, temporary tenure (e.g., room apartment or house)(HUD)",
"Transitional housing for homeless persons (including homeless youth) (HUD)",
"Moved from one HOPWA funded project to HOPWA TH (HUD)",
"Substance abuse treatment facility or detox center (HUD)",
"Psychiatric hospital or other psychiatric facility (HUD)"
]
entries = entries_df[entries_df["Entry Exit Provider Id"].isin(low_barrier_shelters)]
exits = entries[entries["Entry Exit Exit Date"].notna()]
positive = exits[
exits["Entry Exit Destination"].isin(temp_destination) | exits["Entry Exit Destination"].isin(
perm_destination)
].drop_duplicates(subset="Client Uid")
perm = exits[
exits["Entry Exit Destination"].isin(perm_destination)
].drop_duplicates(subset="Client Uid")
temp = exits[
exits["Entry Exit Destination"].isin(temp_destination)
].drop_duplicates(subset="Client Uid")
return tuple([
"% all low barrier shelter to stable or perm",
"15%",
"{}/{}={}%".format(
len(positive.index),
len(exits.drop_duplicates(subset="Client Uid").index),
100 * (len(positive.index) / len(exits.drop_duplicates(subset="Client Uid").index))
),
len(perm.index),
len(temp.index)
])
def exit_destination_by_provider(self, entries_df, provider, exit_type="perm temp"):
"""
Used For: Strategic Initiative, Low-Barrier Shelters
:param entries_df:
:param provider:
:param exit_type:
:return:
"""
perm_destination = [
"Owned by client, no ongoing housing subsidy (HUD)",
"Owned by client, with ongoing housing subsidy (HUD)",
"Permanent housing for formerly homeless persons (HUD)",
"Rental by client, no ongoing housing subsidy (HUD)",
"Rental by client, with other ongoing housing subsidy (HUD)",
"Rental by client, with VASH subsidy (HUD)",
"Staying or living with family, permanent tenure (HUD)",
"Staying or living with friends, permanent tenure (HUD)",
"Foster care home or foster care group home (HUD)",
"Rental by client, with GPD TIP subsidy (HUD)",
"Permanent housing (other than RRH) for formerly homeless persons (HUD)",
"Moved from one HOPWA funded project to HOPWA PH (HUD)",
"Long-term care facility or nursing home (HUD)",
"Residential project or halfway house with no homeless criteria (HUD)"
]
temp_destination = [
# "Emergency shelter, including hotel or motel paid for with emergency shelter voucher (HUD)",
"Hospital or other residential non-psychiatric medical facility (HUD)",
"Hotel or motel paid for without emergency shelter voucher (HUD)",
"Jail, prison or juvenile detention facility (HUD)",
"Staying or living with family, temporary tenure (e.g., room, apartment or house)(HUD)",
"Staying or living with friends, temporary tenure (e.g., room apartment or house)(HUD)",
"Transitional housing for homeless persons (including homeless youth) (HUD)",
"Moved from one HOPWA funded project to HOPWA TH (HUD)",
"Substance abuse treatment facility or detox center (HUD)",
"Psychiatric hospital or other psychiatric facility (HUD)"
]
entries = entries_df[entries_df["Entry Exit Provider Id"].str.contains(provider)]
exits = entries.dropna(axis=0, subset=["Entry Exit Exit Date"])
perm = exits[exits["Entry Exit Destination"].isin(perm_destination)]
temp = exits[exits["Entry Exit Destination"].isin(temp_destination)]
if exit_type == "all":
return entries, perm, temp, exits
elif exit_type == "perm temp":
return perm, temp, exits
elif exit_type == "perm":
return perm, exits
elif exit_type == "temp":
return temp, exits
elif exit_type == "count perm":
return len(perm.index)
elif exit_type == "count temp":
return len(temp.index)
elif exit_type == "count exits":
return len(exits.index)
elif exit_type == "count entries":
return len(entries.index)
elif exit_type == "percent perm":
return "{}/{} = {}%".format(len(perm), len(exits), 100*(len(perm)/len(exits)))
elif exit_type == "percent temp":
return "{}/{} = {}%".format(len(temp), len(exits), 100*(len(temp)/len(exits)))
elif exit_type == "perm and temp percent":
entries = entries_df[entries_df["Entry Exit Provider Id"].str.contains(provider)]
exits = entries.dropna(axis=0, subset=["Entry Exit Exit Date"])
perm_or_temp = exits[
exits["Entry Exit Destination"].isin(perm_destination) |
exits["Entry Exit Destination"].isin(temp_destination)
].drop_duplicates(subset="Client Uid")
all = exits.drop_duplicates(subset="Client Uid")
return "{} / {} = {} %".format(len(perm_or_temp.index),len(all.index), 100*(len(perm_or_temp)/len(all)))
def percent_exits_from_low_barrier_to_service_intensive(self, entries_df, low_barrier_provider):
"""
Used For: Strategic Initiative
:param entries_df:
:param low_barrier_provider:
:return:
"""
count = 0
intensive_entries = entries_df[
entries_df["Entry Exit Provider Id"].str.contains("Clark Center") |
entries_df["Entry Exit Provider Id"].str.contains("Doreen's Place") |
entries_df["Entry Exit Provider Id"].str.contains("Jean's Place")
]
intensive_entries["Start"] = pd.to_datetime(intensive_entries["Entry Exit Entry Date"]).dt.date
intensive = intensive_entries[["Client Uid", "Start"]]
exiting_shelter = entries_df[
entries_df["Entry Exit Provider Id"].str.contains(low_barrier_provider) &
entries_df["Entry Exit Exit Date"].notnull()
]
exiting_shelter["Exit"] = pd.to_datetime(exiting_shelter["Entry Exit Exit Date"]).dt.date
for row in exiting_shelter.index:
client = exiting_shelter.loc[row, "Client Uid"]
exit = exiting_shelter.loc[row, "Exit"]
entry_data = intensive[intensive["Client Uid"] == client]
for e_row in entry_data.index:
if exit >= (entry_data.loc[e_row, "Start"] + relativedelta(days=-5)):
count += 1
else:
pass
exit_count = len(exiting_shelter.index)
to_intensive_percent = 100*(count/exit_count)
return tuple([
"16% of Hansen participants move to a services-intensive shelter",
"16%",
"{} / {} = {}%".format(count, exit_count, to_intensive_percent)
])
def percent_exits_caused_by_exclusion(self, entries_df_plus_reason, shelter_type):
"""
Used For: Service Intensive Shelters
:param entries_df_plus_reason:
:param shelter_type:
:return:
"""
if shelter_type.lower() == "res":
leavers = entries_df_plus_reason[
entries_df_plus_reason["Entry Exit Exit Date"].notnull() & (
entries_df_plus_reason["Entry Exit Provider Id"].str.contains("Doreen's") |
entries_df_plus_reason["Entry Exit Provider Id"].str.contains("Jean's") |
entries_df_plus_reason["Entry Exit Provider Id"].str.contains("Clark Center")
)
]
else:
leavers = entries_df_plus_reason[
entries_df_plus_reason["Entry Exit Exit Date"].notnull() & (
entries_df_plus_reason["Entry Exit Provider Id"].str.contains("Columbia") |
entries_df_plus_reason["Entry Exit Provider Id"].str.contains("Hansen") |
entries_df_plus_reason["Entry Exit Provider Id"].str.contains("SoS") |
entries_df_plus_reason["Entry Exit Provider Id"].str.contains("5th") |
entries_df_plus_reason["Entry Exit Provider Id"].str.contains("Willamette")
)
]
excluded = leavers[leavers["Entry Exit Reason Leaving"] == "Non-compliance with program"]
leaver_count = len(leavers.index)
excluded_count = len(excluded.index)
percent_excluded = 100 * (excluded_count / leaver_count)
return tuple([
"15% decrease in behavior based exclusions",
"<= -15%",
"{} / {} = {} <--- Don't forget to subtract this from the previous quarters numbers".format(excluded_count,
leaver_count,
percent_excluded)
])
def percent_iap_successful(self, entries_plus_df):
"""
Used For: Wellness Access
:param entries_plus_df:
:return:
"""
all_entries = entries_plus_df[
(
entries_plus_df["Entry Exit Provider Id"].str.contains("IAP") |
entries_plus_df["Entry Exit Provider Id"].str.contains("PIAP")
)
]
entry_count = len(all_entries.index)
successful = len(all_entries[
all_entries["Entry Exit Exit Date"].notnull() &
all_entries["Entry Exit Reason Leaving"].str.contains("Completed")
].index)
any_exit = len(all_entries[all_entries["Entry Exit Exit Date"].notnull()].index)
return tuple([
"60% of IAPs will be successfully completed by participants",
"60%",
"{}/{} = {}%".format(successful, any_exit, 100*(successful/any_exit))
])
def percent_low_barrier_in_groups(self, entries_df, services_df, low_barrier=True, direct=True):
"""
Used For: Strategic Initiative
Potential Issue: If a participant does not have an end date for their shelter stay this method will provide an
end date of the current day. This will lead to services outside of the reporting quarter being counted as
acceptable. This will likely need to be rectified prior to releasing related numbers.
:param entries_df:
:param services_df:
:param low_barrier:
:param direct:
:return:
"""
low_barrier_shelters = [
"Transition Projects (TPI) - Willamette Center(5764)",
"Transition Projects (TPI) - Hansen Emergency Shelter - SP(5588)",
"Transition Projects (TPI) - Columbia Shelter(5857)",
"Transition Projects (TPI) - SOS Shelter(2712)",
"Transition Projects (TPI) - 5th Avenue Shelter(6281)"
]
service_intensive = [
"Transition Projects (TPI) - Clark Center - SP(25)",
"Transition Projects (TPI) - Doreen's Place - SP(28)",
"Transition Projects (TPI) - Jean's Place L1 - SP(29)"
]
if low_barrier:
entries = entries_df[entries_df["Entry Exit Provider Id"].isin(low_barrier_shelters)]
else:
entries = entries_df[entries_df["Entry Exit Provider Id"].isin(service_intensive)]
conditions = [entries["Entry Exit Exit Date"].isnull(),entries["Entry Exit Exit Date"].notnull()]
choices = [datetime.now().date(), pd.to_datetime(entries["Entry Exit Exit Date"]).dt.date]
entries["End"] = np.select(conditions, choices, default=datetime.now().date())
entries["Start"] = pd.to_datetime(entries["Entry Exit Entry Date"]).dt.date
pt_list = entries["Client Uid"].tolist()
attendees = services_df[
services_df["Service Provider Specific Code"].str.contains("Group") &
services_df["Client Uid"].isin(pt_list)
]
attendees["Service Date"] = pd.to_datetime(attendees["Service Provide Start Date"]).dt.date
participants = {pt: 0 for pt in list(set(pt_list))}
for row in entries.index:
e_client = entries.loc[row, "Client Uid"]
e_entry = entries.loc[row, "Start"]
e_exit = entries.loc[row, "End"]
for s_row in attendees.index:
s_client = attendees.loc[s_row, "Client Uid"]
s_date = attendees.loc[s_row, "Service Date"]
if (s_client == e_client) and (e_entry <= s_date <= e_exit):
participants[e_client] += 1
else:
pass
all_pt = len(list(set(pt_list)))
final_data = pd.DataFrame.from_dict(participants, orient="index")
served = len(final_data[final_data[0] != 0].index)
if direct:
return tuple([
"35% of shelter residents attend on-site groups or activities",
"35%",
"{} / {} = {}%".format(served, all_pt, 100*(served/all_pt))
])
else:
return final_data[final_data[0] != 0].index.tolist()
def percent_low_barrier_to_perm(self, entries_df):
"""
Used For: Strategic Initiative
:param entries_df:
:return:
"""
h_perm, h_all = self.exit_destination_by_provider(entries_df, "Hansen", "perm")
w_perm, w_all = self.exit_destination_by_provider(entries_df, "Willamette", "perm")
s_perm, s_all = self.exit_destination_by_provider(entries_df, "SOS", "perm")
c_perm, c_all = self.exit_destination_by_provider(entries_df, "Columbia", "perm")
f_perm, f_all = self.exit_destination_by_provider(entries_df, "5th", "perm")
all_perm = len(h_perm.index) + len(w_perm.index) + len(s_perm.index) + len(c_perm.index) + len(f_perm.index)
all_exits = len(h_all.index) + len(w_all.index) + len(s_all.index) + len(c_all.index) + len(f_all.index)
return tuple([
"15% of participants exit to permanent housing",
"15%",
"{} / {} = {}%".format(all_perm, all_exits, 100*(all_perm/all_exits))
])
def percent_low_barrier_to_stable(self, entries_df):
"""
Used For: Strategic Initiative
:param entries_df:
:return:
"""
h_temp, h_all = self.exit_destination_by_provider(entries_df, "Hansen", "temp")
w_temp, w_all = self.exit_destination_by_provider(entries_df, "Will", "temp")
s_temp, s_all = self.exit_destination_by_provider(entries_df, "SOS", "temp")
c_temp, c_all = self.exit_destination_by_provider(entries_df, "Columbia", "temp")
f_temp, f_all = self.exit_destination_by_provider(entries_df, "5th", "temp")
all_temp = len(h_temp.index) + len(w_temp.index) + len(s_temp.index) + len(c_temp.index) + len(f_temp.index)
all_exits = len(h_all.index) + len(w_all.index) + len(s_all.index) + len(c_all.index) + len(f_all.index)
return tuple([
"15% of participants exit to stable housing",
"15%",
"{} / {} = {}%".format(all_temp, all_exits, 100*(all_temp/all_exits))
])
def percent_non_poc_exiting_to_perm_by_provider(
self,
entries_df,
services_df,
provider,
direct=True
):
"""
Used For:
:param entries_df:
:param services_df:
:param provider:
:param direct:
:return:
"""
perm_destination = [
"Owned by client, no ongoing housing subsidy (HUD)",
"Owned by client, with ongoing housing subsidy (HUD)",
"Permanent housing for formerly homeless persons (HUD)",
"Rental by client, no ongoing housing subsidy (HUD)",
"Rental by client, with other ongoing housing subsidy (HUD)",
"Rental by client, with VASH subsidy (HUD)",
"Staying or living with family, permanent tenure (HUD)",
"Staying or living with friends, permanent tenure (HUD)",
"Foster care home or foster care group home (HUD)",
"Rental by client, with GPD TIP subsidy (HUD)",
"Permanent housing (other than RRH) for formerly homeless persons (HUD)",
"Moved from one HOPWA funded project to HOPWA PH (HUD)",
"Long-term care facility or nursing home (HUD)",
"Residential project or halfway house with no homeless criteria (HUD)"
]
exited = entries_df[
entries_df["Entry Exit Exit Date"].notna() &
entries_df["Entry Exit Provider Id"].isin(provider) &
~entries_df["Client Uid"].isin(self.return_poc_list(services_df))
]
perm = len(exited[exited["Entry Exit Destination"].isin(perm_destination)])
if direct:
return tuple([
"Participants housed are at least 40% people of color",
"40%",
"{}/{} = {}%".format(perm, len(exited), 100*(perm/len(exited)))
])
else:
return perm, len(exited.index)
def percent_poc_exiting_to_perm_by_provider(
self,
entries_df,
services_df,
providers=[
"Transition Projects (TPI) Housing - Clark Annex GPD - SP(4259)",
"Transition Projects (TPI) Housing - Clark Annex PSH - SP(2858)",
"Transition Projects (TPI) Housing - Barbara Maher Apartments PSH - SP(3018)"
],
direct=True
):
"""
Used For:
:param entries_df:
:param services_df:
:param providers:
:param direct:
:return:
"""
perm_destination = [
"Owned by client, no ongoing housing subsidy (HUD)",
"Owned by client, with ongoing housing subsidy (HUD)",
"Permanent housing for formerly homeless persons (HUD)",
"Rental by client, no ongoing housing subsidy (HUD)",
"Rental by client, with other ongoing housing subsidy (HUD)",
"Rental by client, with VASH subsidy (HUD)",
"Staying or living with family, permanent tenure (HUD)",
"Staying or living with friends, permanent tenure (HUD)",
"Foster care home or foster care group home (HUD)",
"Rental by client, with GPD TIP subsidy (HUD)",
"Permanent housing (other than RRH) for formerly homeless persons (HUD)",
"Moved from one HOPWA funded project to HOPWA PH (HUD)",
"Long-term care facility or nursing home (HUD)",
"Residential project or halfway house with no homeless criteria (HUD)"
]
exited = entries_df[
entries_df["Entry Exit Exit Date"].notna() &
entries_df["Entry Exit Provider Id"].isin(providers) &
entries_df["Client Uid"].isin(self.return_poc_list(services_df))
]
perm = len(exited[exited["Entry Exit Destination"].isin(perm_destination)])
if direct:
if len(exited) > 0:
return tuple([
"Participants housed are at least 40% people of color",
"40%",
"{}/{} = {}%".format(perm, len(exited), 100*(perm/len(exited)))
])
else:
return tuple([
"Participants housed are at least 40% people of color",
"40%",
"{}/{} = {}%".format(perm, len(exited), "N/A")
])
else:
return perm, len(exited.index)
def percent_of_pt_w_home_visits_by_provider(self, services_df, entries_df, provider):
"""
Used by: SSVF
:param services_df:
:param entries_df:
:param provider:
:return:
"""
hv_services = [
"Case Management Meeting - Home Visit"
]
hv_serviced = len(
services_df[
(services_df["Service Provider Specific Code"].isin(hv_services)) &
(services_df["Service Provide Provider"].str.contains(provider))
].drop_duplicates(subset="Client Uid").index
)
all_w_entry = len(entries_df[
entries_df["Entry Exit Provider Id"].str.contains(provider)
].drop_duplicates(subset="Client Uid").index)
output = "{} / {} = {}%".format(hv_serviced, all_w_entry, 100 * (hv_serviced / all_w_entry))
return tuple(["25% of participants will have quarterly home visits", "25%", output])
def percent_placed_are_poc(self, placements_df, services_df):
"""
Used For: Agency
:param placements_df:
:param services_df:
:return:
"""
poc = self.count_poc_placed(placements_df, services_df)
all = len(self.count_all_placed(placements_df).index)
return tuple([
"Participants housed are at least 41% participants of color",
"41%",
"{}/{} = {}%".format(poc, all, 100*(poc/all))
])
def percent_poc_placed_by_provider(self, placements_df, services_df, provider):
"""
Used For: SSVF
:param placements_df:
:param services_df:
:param provider:
:return:
"""
poc_placed = self.count_poc_placed_by_provider(placements_df, services_df, provider)
all_by_provider = self.count_all_placed_by_provider(placements_df, provider)
if provider == ["SSVF - TPI"]:
return tuple([
"Veterans housed are at least 25% people of color",
"25%",
"{}/{} = {}%".format(poc_placed, all_by_provider, 100 * (poc_placed / all_by_provider))
])
else:
return tuple([
"Participants housed are at least 41% people of color",
"41%",
"{}/{} = {}%".format(poc_placed, all_by_provider, 100 * (poc_placed / all_by_provider))
])
def percent_poc_placed_vs_percent_white_placed_by_shelter(self, entries_df, services_df, provider):
"""
Used For: Service Intensive Shelters
:param entries_df:
:param services_df:
:param provider:
:return:
"""
if provider == "res":
poc_perm, poc_all = self.percent_poc_exiting_to_perm_by_provider(
entries_df,
services_df,
providers=[
"Transition Projects (TPI) - Doreen's Place - SP(28)",
"Transition Projects (TPI) - Clark Center - SP(25)",
"Transition Projects (TPI) - Jean's Place L1 - SP(29)"
],
direct=False
)
perm, all = self.percent_non_poc_exiting_to_perm_by_provider(entries_df, services_df, [
"Transition Projects (TPI) - Doreen's Place - SP(28)",
"Transition Projects (TPI) - Clark Center - SP(25)",
"Transition Projects (TPI) - Jean's Place L1 - SP(29)"
], False)
poc_perm = poc_perm
poc_all = poc_all
perm = perm
all_served = all
return tuple([
"Participants who are people of color that exit the program have housing outcomes greater than or equal to those of non-people of color",
">= 0%",
"({}/{})-({}/{})={}%".format(
poc_perm,
poc_all,
perm,
all_served,
100*((poc_perm / poc_all) - (perm / all_served))
)
])
else:
poc_perm, poc_all = self.percent_poc_exiting_to_perm_by_provider(
entries_df,
services_df,
providers=provider,
direct=False
)
perm, all = self.percent_non_poc_exiting_to_perm_by_provider(entries_df, services_df, provider, False)
return tuple([
"Participants who are people of color that exit the program have housing outcomes greater than or equal to those of non-people of color",
">= 0%",
"({}/{})-({}/{})={}%".format(
poc_perm,
poc_all,
perm,
all,
100*((poc_perm / poc_all) - (perm / all))
)
])
def percent_poc_w_small_s_support_services_by_provider(self, services_df, provider):
"""
Used by: Day Center
:param services_df:
:param provider:
:return:
"""
poc_list = self.return_poc_list(services_df)
services_1 = [
"Bathing Facilities",
"Personal/Grooming Supplies",
"Hairdressing/Nail Care"
]
services_2 = [
"Shower",
"Showers",
"Laundry Supplies",
"Clothing",
"Hairdressing/Nail Care",
"Personal Grooming Supplies"
]
services = services_df[
~(
(services_df["Service Code Description"].isin(services_1)) |
(services_df["Service Provider Specific Code"].isin(services_2))
) &
services_df["Service Provide Provider"].str.contains("Day") &
services_df["Client Uid"].isin(poc_list)
]
small_s_de_duped = services.drop_duplicates(subset="Client Uid", inplace=False)
served_by_day = services_df[
services_df["Service Provide Provider"].str.contains("Day") &
services_df["Client Uid"].isin(poc_list)
].drop_duplicates(subset="Client Uid")
poc_ss_served = len(small_s_de_duped.index)
all_served = len(served_by_day.index)
percent = 100*(poc_ss_served/all_served)
return tuple([
"50% of people of color served by the Day Center access a supportive service",
">= 50%",
"{}/{} = {}%".format(poc_ss_served, all_served, percent)
])
def percent_residents_oriented_in_ten_days(self, entries_df, services_df, providers):
"""
Used For: Service Intensive Shelters
Future Improvement Idea: Add a way to break out individual shelter data to make this metric more useful to
each of the shelter managers.
:param entries_df:
:param services_df:
:param providers:
:return:
"""
orientation_services = ["Shelter Orientation", "Orientation - Residential Program"]
entry_to_providers = entries_df[
entries_df["Entry Exit Provider Id"].isin(providers)
]
entry_to_providers["Entry Date"] = pd.to_datetime(entry_to_providers["Entry Exit Entry Date"]).dt.date
entry_date = entry_to_providers.filter(["Client Uid", "Entry Date"], axis=1)
orientation_services = services_df[
services_df["Service Provider Specific Code"].isin(orientation_services)
]
orientation_services["Service Date"] = pd.to_datetime(orientation_services["Service Provide Start Date"]).dt.date
orient_date = orientation_services.filter(["Client Uid", "Service Date"])
merged = entry_date.merge(orient_date, on="Client Uid", how="left")
conditions = [merged["Service Date"].notnull(), merged["Service Date"].isnull()]
choices = [((merged["Service Date"] - merged["Entry Date"]).dt.days), np.nan]
merged["Days to Orientation"] = np.select(conditions, choices, default=np.nan)
not_na = merged[merged["Days to Orientation"].notna()]
oriented_in_seven_days = len(
not_na[
(not_na["Days to Orientation"].astype(float) <= 10)
].index
)
all_entries = len(merged.index)
return tuple([
"95% of participants will attend orientation within the first 10 days",
">= 95%",
"{} / {} = {}".format(oriented_in_seven_days, all_entries, 100*(oriented_in_seven_days/all_entries))
])
def percent_shelter_stays_less_than_seven_days(self, entries_df, providers):
"""
Use For: Service Intensive Shelters
:param entries_df:
:param providers:
:return:
"""
exits = entries_df[
entries_df["Entry Exit Exit Date"].notnull() &
entries_df["Entry Exit Provider Id"].isin(providers)
]
exits["Entry Date"] = pd.to_datetime(exits["Entry Exit Entry Date"]).dt.date
exits["Exit Date"] = | pd.to_datetime(exits["Entry Exit Exit Date"]) | pandas.to_datetime |
from builtins import print
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from scipy.io import arff
import scipy.io as sio
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
import os
import operator
import h5py
import glob
import scipy
from collections import Counter
def readucr(filename):
data = np.loadtxt(filename)
# 混洗 shuffle 默认打乱第一维度
np.random.shuffle(data)
Y = data[:, 0]
X = data[:, 1:]
return X, Y
def readmts(filename):
data = arff.loadarff(filename + '_TRAIN.arff')
df = | pd.DataFrame(data[0]) | pandas.DataFrame |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause-Clear
# Copyright (c) 2019, The Numerical Algorithms Group, Ltd. All rights reserved.
"""Shared routines for different Metric Sets
"""
from warnings import warn
import numpy
import pandas
from ..trace import Trace
from ..traceset import TraceSet
from .._plotsettings import pypop_mpl_params, figparams
__all__ = ["Metric", "MetricSet"]
class Metric:
"""Individual performance metrics to be used within a metricset. Defines metric name,
properties and method of calculation.
"""
def __init__(
self,
key,
level,
displayname=None,
desc=None,
is_inefficiency=False,
freq_corr=False,
):
"""
Parameters
----------
key: str
Key by which to identify metric.
level: int
Level at which to display metric in the stack.
displayname: str or None
Display name to use for metric in table etc. Defaults to key.
desc: str or None
Detailed description of the metric.
is_inefficiency: bool
Tag metric as an inefficiency (rather than efficiency) for correct display
and shading. Default False.
freq_corr: bool
Correct performance metrics based on average clock frequency (use to
correct for node dynamic clocking issues). Default False.
"""
self.key = key
self.level = level
self.description = str(desc) if desc else ""
self.is_inefficiency = is_inefficiency
if displayname:
self.displayname = r"↪ " * bool(self.level) + displayname
else:
self.displayname = r"↪ " * bool(self.level) + self.key
class MetricSet:
"""Calculate and plot POP MPI metrics
Statistics data is expected to have been produced with `collect_statistics()`
Attributes
----------
metric_data
metric_definition
"""
_programming_model = None
_default_metric_key = "Number of Processes"
_default_group_key = None
_default_scaling_key = "Total Threads"
_key_descriptions = {
"Number of Processes": "",
"Threads per Process": "",
"Total Threads": "",
"Hybrid Layout": "",
"Tag": "",
}
def __init__(self, stats_data, ref_key=None, sort_keys=True):
"""
Parameters
----------
stats_data: TraceSet instance, dict, iterable or instance of Trace
Statistics as collected with `collect_statistics()`. Dictionary keys will be
used as the dataframe index. If a list, a dict will be constructed by
enumeration.
ref_key: str or None
Key of stats_dict that should be used as the reference for calculation of
scaling values. By default the trace with smallest number of processes and
smallest number of threads per process will be used.
sort_keys: bool
If true (default), lexically sort the keys in the returned DataFrame.
"""
self._stats_dict = MetricSet._dictify_stats(stats_data)
self._metric_data = None
self._sort_keys = sort_keys
self._ref_key = (
self._choose_ref_key(self._stats_dict) if ref_key is None else ref_key
)
def _calculate_metrics(self):
raise NotImplementedError
def _repr_html_(self):
return self.metric_data._repr_html_()
@staticmethod
def _choose_ref_key(stats_dict):
""" Take the stats dict and choose an appropriate reference trace.
As a default choice choose the smallest number of total threads, breaking ties
with smallest number of threads per process
"""
return min(
stats_dict.items(),
key=lambda x: "{:05}_{:05}_{}".format(
sum(x[1].metadata.threads_per_process),
max(x[1].metadata.threads_per_process),
x[1].metadata.tag,
),
)[0]
@property
def metric_data(self):
"""pandas.DataFrame: Calculated metric data.
"""
if self._metric_data is None:
self._calculate_metrics(ref_key=self._ref_key)
return self._metric_data
@staticmethod
def _dictify_stats(stats_data):
if isinstance(stats_data, TraceSet):
return {k: v for k, v in enumerate(stats_data.traces)}
else:
if isinstance(stats_data, Trace):
return {0: stats_data}
if not isinstance(stats_data, dict):
stats_data = {k: v for k, v in enumerate(stats_data)}
for df in stats_data.values():
if not isinstance(df, Trace):
raise ValueError("stats_dict must be an iterable of pypop.trace.Trace")
return stats_data
@property
def metrics(self):
"""List of :py:class:`pypop.metrics.Metric`: List of metrics that will be
calculated.
"""
return self._metric_list
def _create_subdataframe(self, metadata, idxkey):
if len(set(metadata.threads_per_process)) != 1:
warn(
"The supplied trace has a varying number of threads per process. "
"The PyPOP metrics were designed assuming a homogenous number of "
"threads per process -- analysis results may be inaccurate."
)
layout_keys = {
"Number of Processes": pandas.Series(
data=[metadata.num_processes], index=[idxkey]
),
"Threads per Process": pandas.Series(
data=[metadata.threads_per_process[0]], index=[idxkey]
),
"Total Threads": pandas.Series(
data=[sum(metadata.threads_per_process)], index=[idxkey]
),
"Hybrid Layout": pandas.Series(
data=[
"{}x{}".format(
metadata.num_processes, metadata.threads_per_process[0]
)
],
index=[idxkey],
),
"Tag": | pandas.Series(data=[metadata.tag], index=[idxkey]) | pandas.Series |
"""
Unit test of Inverse Transform
"""
import unittest
import pandas as pd
import numpy as np
import category_encoders as ce
import catboost as cb
import sklearn
import lightgbm
import xgboost
from shapash.utils.transform import inverse_transform, apply_preprocessing, get_col_mapping_ce
class TestInverseTransformCaterogyEncoder(unittest.TestCase):
def test_inverse_transform_1(self):
"""
Test no preprocessing
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR']})
original = inverse_transform(train)
pd.testing.assert_frame_equal(original, train)
def test_inverse_transform_2(self):
"""
Test multiple preprocessing
"""
train = pd.DataFrame({'Onehot1': ['A', 'B', 'A', 'B'], 'Onehot2': ['C', 'D', 'C', 'D'],
'Binary1': ['E', 'F', 'E', 'F'], 'Binary2': ['G', 'H', 'G', 'H'],
'Ordinal1': ['I', 'J', 'I', 'J'], 'Ordinal2': ['K', 'L', 'K', 'L'],
'BaseN1': ['M', 'N', 'M', 'N'], 'BaseN2': ['O', 'P', 'O', 'P'],
'Target1': ['Q', 'R', 'Q', 'R'], 'Target2': ['S', 'T', 'S', 'T'],
'other': ['other', np.nan, 'other', 'other']})
test = pd.DataFrame({'Onehot1': ['A', 'B', 'A'], 'Onehot2': ['C', 'D', 'ZZ'],
'Binary1': ['E', 'F', 'F'], 'Binary2': ['G', 'H', 'ZZ'],
'Ordinal1': ['I', 'J', 'J'], 'Ordinal2': ['K', 'L', 'ZZ'],
'BaseN1': ['M', 'N', 'N'], 'BaseN2': ['O', 'P', 'ZZ'],
'Target1': ['Q', 'R', 'R'], 'Target2': ['S', 'T', 'ZZ'],
'other': ['other', '123', np.nan]})
expected = pd.DataFrame({'Onehot1': ['A', 'B', 'A'], 'Onehot2': ['C', 'D', 'missing'],
'Binary1': ['E', 'F', 'F'], 'Binary2': ['G', 'H', 'missing'],
'Ordinal1': ['I', 'J', 'J'], 'Ordinal2': ['K', 'L', 'missing'],
'BaseN1': ['M', 'N', 'N'], 'BaseN2': ['O', 'P', np.nan],
'Target1': ['Q', 'R', 'R'], 'Target2': ['S', 'T', 'NaN'],
'other': ['other', '123', np.nan]})
y = | pd.DataFrame(data=[0, 1, 0, 0], columns=['y']) | pandas.DataFrame |
import xml.etree.ElementTree as ET
from pathlib import Path
import cv2
import pandas as pd
from tqdm import tqdm
from manga_ocr_dev.env import MANGA109_ROOT
def get_books():
root = MANGA109_ROOT / 'Manga109s_released_2021_02_28'
books = (root / 'books.txt').read_text().splitlines()
books = pd.DataFrame({
'book': books,
'annotations': [str(root / 'annotations' / f'{book}.xml') for book in books],
'images': [str(root / 'images' / book) for book in books],
})
return books
def export_frames():
books = get_books()
data = []
for book in tqdm(books.itertuples(), total=len(books)):
tree = ET.parse(book.annotations)
root = tree.getroot()
for page in root.findall('./pages/page'):
for frame in page.findall('./frame'):
row = {}
row['book'] = book.book
row['page_index'] = int(page.attrib['index'])
row['page_path'] = str(Path(book.images) / f'{row["page_index"]:03d}.jpg')
row['page_width'] = int(page.attrib['width'])
row['page_height'] = int(page.attrib['height'])
row['id'] = frame.attrib['id']
row['xmin'] = int(frame.attrib['xmin'])
row['ymin'] = int(frame.attrib['ymin'])
row['xmax'] = int(frame.attrib['xmax'])
row['ymax'] = int(frame.attrib['ymax'])
data.append(row)
data = pd.DataFrame(data)
data.page_path = data.page_path.apply(lambda x: '/'.join(Path(x).parts[-4:]))
data.to_csv(MANGA109_ROOT / 'frames.csv', index=False)
def export_crops():
crops_root = MANGA109_ROOT / 'crops'
crops_root.mkdir(parents=True, exist_ok=True)
margin = 10
books = get_books()
data = []
for book in tqdm(books.itertuples(), total=len(books)):
tree = ET.parse(book.annotations)
root = tree.getroot()
for page in root.findall('./pages/page'):
for text in page.findall('./text'):
row = {}
row['book'] = book.book
row['page_index'] = int(page.attrib['index'])
row['page_path'] = str(Path(book.images) / f'{row["page_index"]:03d}.jpg')
row['page_width'] = int(page.attrib['width'])
row['page_height'] = int(page.attrib['height'])
row['id'] = text.attrib['id']
row['text'] = text.text
row['xmin'] = int(text.attrib['xmin'])
row['ymin'] = int(text.attrib['ymin'])
row['xmax'] = int(text.attrib['xmax'])
row['ymax'] = int(text.attrib['ymax'])
data.append(row)
data = | pd.DataFrame(data) | pandas.DataFrame |
import subprocess
import os
import pandas as pd
import glob
def setup_module(module):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(THIS_DIR)
def teardown_module(module):
cmd = ["make clean"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
def run_command(cmd):
try:
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print(e.output)
exit()
return cmdOutput
from contextlib import contextmanager
import os
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def read_report(report, data_list):
assert(len(report) >= 10)
for line in report:
#print(line)
if("NaN found" in line or "INF found" in line):
line_set = line.split(" ")
elif("underflow (subnormal)" in line):
line_set = line.split(" ")
# elif("Total Division by 0" in line):
# line_set = line.split(" ")
elif("Kernels" in line):
line_set = line.split(" ")
elif("Inst. count" in line):
line_set = line.split(" ")
else:
line_set = []
#print(line_set)
if(len(line_set)==2):
num = line_set[1].split("\n")[0]
data_list.append(num)
#print(report)
print(len(data_list))
assert(len(data_list) == 9)
return data_list
def to_time_s(t):
time_list = t.split('m')
return float(time_list[0])*60+float(time_list[1].split('s')[0])
def time_info(infos):
count = 0
for line in infos:
count = count +1
if("real" in line):
assert("user" in infos[count])
assert("sys" in infos[count+1])
t = line.split(" ")[1]
t_s = to_time_s(t)
return t_s
df = | pd.DataFrame(columns = ["program","fp64_NAN", "fp64_INF", "fp64_SUB","fp32_NAN", "fp32_INF", "fp32_SUB","kernel","FP instructions","check_time","ori_time","slowdown"]) | pandas.DataFrame |
# Copyright(C) 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0(the "License"); you may not
# use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the
# License for the specific language governing permissions and limitations under
# the License.
import operator
import os
import string
import warnings
from datetime import date, datetime
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
from pytest import param
import ibis
import ibis.expr.datatypes as dt
from ibis import literal as L
import ibis.expr.types as ir
from ibis.expr.window import rows_with_max_lookback
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('snowflake')
pytestmark = pytest.mark.snowflake
@pytest.fixture
def guid(con):
name = ibis.util.guid()
try:
yield name
finally:
con.drop_table(name, force=True)
@pytest.fixture
def guid2(con):
name = ibis.util.guid()
try:
yield name
finally:
con.drop_table(name, force=True)
@pytest.mark.parametrize(
('left_func', 'right_func'),
[
param(
lambda t: t.double_col.cast('int8'),
lambda at: sa.cast(at.c.double_col, sa.SMALLINT),
id='double_to_int8',
),
param(
lambda t: t.double_col.cast('int16'),
lambda at: sa.cast(at.c.double_col, sa.SMALLINT),
id='double_to_int16',
),
param(
lambda t: t.string_col.cast('double'), # https://docs.snowflake.com/en/sql-reference/data-types-numeric.html#double-double-precision-real
lambda at: sa.cast(
at.c.string_col, sa.DECIMAL
),
id='string_to_double',
),
param(
lambda t: t.string_col.cast('float'),
lambda at: sa.cast(at.c.string_col, sa.FLOAT),
id='string_to_float',
),
param(
lambda t: t.string_col.cast('decimal'),
lambda at: sa.cast(at.c.string_col, sa.NUMERIC(9, 0)),
id='string_to_decimal_no_params',
),
param(
lambda t: t.string_col.cast('decimal(9, 3)'),
lambda at: sa.cast(at.c.string_col, sa.NUMERIC(9, 3)),
id='string_to_decimal_params',
),
],
)
def test_cast(alltypes, at, translate, left_func, right_func):
left = left_func(alltypes)
right = right_func(at)
assert str(translate(left).compile()) == str(right.compile())
def test_date_cast(alltypes, at, translate):
result = alltypes.date_string_col.cast('date')
expected = sa.cast(at.c.date_string_col, sa.DATE)
assert str(translate(result)) == str(expected)
@pytest.mark.parametrize(
'column',
[
'`INDEX`',
'Unnamed: 0',
'id',
'bool_col',
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'date_string_col',
'string_col',
'timestamp_col',
'year',
'month',
],
)
def test_noop_cast(alltypes, at, translate, column):
col = alltypes[column]
result = col.cast(col.type())
expected = at.c[column]
assert result.equals(col)
assert str(translate(result)) == str(expected)
def test_timestamp_cast_noop(alltypes, at, translate):
result1 = alltypes.timestamp_col.cast('timestamp')
result2 = alltypes.int_col.cast('timestamp')
assert isinstance(result1, ir.TimestampColumn)
assert isinstance(result2, ir.TimestampColumn)
expected1 = at.c.timestamp_col
assert str(translate(result1)) == "CAST({} AS TIMESTAMP)".format(str(expected1))
@pytest.mark.parametrize(
('func', 'expected'),
[
param(operator.methodcaller('year'), 2015, id='year'),
param(operator.methodcaller('month'), 9, id='month'),
param(operator.methodcaller('day'), 1, id='day'),
param(operator.methodcaller('hour'), 14, id='hour'),
param(operator.methodcaller('minute'), 48, id='minute'),
param(operator.methodcaller('second'), 5, id='second'),
param(lambda x: x.day_of_week.index(), 1, id='day_of_week_index'),
param(
lambda x: x.day_of_week.full_name(),
'Tue',
id='day_of_week_full_name',
),
],
)
def test_simple_datetime_operations(con, func, expected, translate):
value= L('2015-09-01 14:48:05.359').cast(dt.string).cast(dt.timestamp)
assert con.execute(func(value)) == expected
@pytest.mark.parametrize(
('func', 'left', 'right', 'expected'),
[
param(operator.add, L(3), L(4), 7, id='add'),
param(operator.sub, L(3), L(4), -1, id='sub'),
param(operator.mul, L(3), L(4), 12, id='mul'),
param(operator.truediv, L(12), L(4), 3, id='truediv_no_remainder'),
param(operator.pow, L(12), L(2), 144, id='pow'),
param(operator.mod, L(12), L(5), 2, id='mod'),
param(operator.truediv, L(7), L(2), 3.5, id='truediv_remainder'),
param(operator.floordiv, L(7), L(2), 3, id='floordiv'),
param(
lambda x, y: x.floordiv(y), L(7), 2, 3, id='floordiv_no_literal'
),
param(
lambda x, y: x.rfloordiv(y), L(2), 7, 3, id='rfloordiv_no_literal'
),
],
)
def test_binary_arithmetic(con, func, left, right, expected):
expr = func(left, right)
result = con.execute(expr)
assert result == expected
@pytest.mark.parametrize(
('value', 'expected'),
[
param(L('foo_bar'), 'VARCHAR', id='text'),
param(L(5), 'INTEGER', id='integer'),
param(ibis.NA, None, id='null'),
# TODO(phillipc): should this really be double?
param(L(1.2345), 'DECIMAL', id='numeric'),
param(
L('2015-09-01 14:48:05.359').cast(dt.string).cast(dt.timestamp),
'TIMESTAMP_NTZ',
id='timestamp_without_time_zone',
)
],
)
def test_typeof(con, value, expected):
assert con.execute(value.typeof()) == expected
@pytest.mark.parametrize(('value', 'expected'), [(0, None), (5.5, 5.5)])
def test_nullifzero(con, value, expected):
assert con.execute(L(value).nullifzero()) == expected
@pytest.mark.parametrize(('value', 'expected'), [('foo_bar', 7), ('', 0)])
def test_string_length(con, value, expected):
assert con.execute(L(value).length()) == expected
@pytest.mark.parametrize(
('op', 'expected'),
[
param(operator.methodcaller('left', 3), 'foo', id='left'),
param(operator.methodcaller('right', 3), 'bar', id='right'),
param(operator.methodcaller('substr', 0, 3), 'foo', id='substr_0_3'),
param(operator.methodcaller('substr', 4, 3), 'bar', id='substr_4, 3'),
param(operator.methodcaller('substr', 1), 'oo_bar', id='substr_1'),
],
)
def test_string_substring(con, op, expected):
value = L('foo_bar')
assert con.execute(op(value)) == expected
@pytest.mark.parametrize(
('opname', 'expected'),
[('lstrip', 'foo '), ('rstrip', ' foo'), ('strip', 'foo')],
)
def test_string_strip(con, opname, expected):
op = operator.methodcaller(opname)
value = L(' foo ')
assert con.execute(op(value)) == expected
@pytest.mark.parametrize(
('opname', 'count', 'char', 'expected'),
[('lpad', 6, ' ', ' foo'), ('rpad', 6, ' ', 'foo ')],
)
def test_string_pad(con, opname, count, char, expected):
op = operator.methodcaller(opname, count, char)
value = L('foo')
assert con.execute(op(value)) == expected
def test_string_reverse(con):
assert con.execute(L('foo').reverse()) == 'oof'
def test_string_upper(con):
assert con.execute(L('foo').upper()) == 'FOO'
def test_string_lower(con):
assert con.execute(L('FOO').lower()) == 'foo'
@pytest.mark.parametrize(
('haystack', 'needle', 'expected'),
[
('foobar', 'bar', True),
('foobar', 'foo', True),
('foobar', 'baz', False),
('100%', '%', True),
('a_b_c', '_', True),
],
)
def test_string_contains(con, haystack, needle, expected):
value = L(haystack)
expr = value.contains(needle)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('value', 'expected'),
[('foo bar foo', 'Foo Bar Foo'), ('foobar Foo', 'Foobar Foo')],
)
def test_capitalize(con, value, expected):
assert con.execute(L(value).capitalize()) == expected
def test_repeat(con):
expr = L('bar ').repeat(3)
assert con.execute(expr) == 'bar bar bar '
def test_re_replace(con):
expr = L('fudge|||chocolate||candy').re_replace('\\|{2,3}', ', ')
assert con.execute(expr) == 'fudge, chocolate, candy'
def test_translate(con):
expr = L('faab').translate('a', 'b')
assert con.execute(expr) == 'fbbb'
@pytest.mark.parametrize(
('raw_value', 'expected'), [('a', 0), ('b', 1), ('d', -1), (None, 3)]
)
def test_find_in_set(demonstration, con, raw_value, expected):
value = L('a', dt.string)
haystack = demonstration.array1
expr = value.find_in_set(haystack)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('raw_value', 'opname', 'expected'),
[
(None, 'isnull', True),
(1, 'isnull', False),
(None, 'notnull', False),
(1, 'notnull', True),
],
)
def test_isnull_notnull(con, raw_value, opname, expected):
lit = L(raw_value)
op = operator.methodcaller(opname)
expr = op(lit)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(L('foobar').find('bar'), 3, id='find_pos'),
param(L('foobar').find('baz'), -1, id='find_neg'),
param(L('foobar').like('%bar'), True, id='like_left_pattern'),
param(L('foobar').like('foo%'), True, id='like_right_pattern'),
param(L('foobar').like('%baz%'), False, id='like_both_sides_pattern'),
param(L('foobar').like(['%bar']), True, id='like_list_left_side'),
param(L('foobar').like(['foo%']), True, id='like_list_right_side'),
param(L('foobar').like(['%baz%']), False, id='like_list_both_sides'),
param(
L('foobar').like(['%bar', 'foo%']), True, id='like_list_multiple'
),
param(L('foobarfoo').replace('foo', 'H'), 'HbarH', id='replace'),
param(L('a').ascii_str(), ord('a'), id='ascii_str'),
],
)
def test_string_functions(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(L('San Francisco').re_search('San* [fF].*'), True, id='re_search_match'),
param(L('abcd').re_search(r'[\d]+'), False, id='re_search_no_match'),
param(
L('1222').re_search(r'[\d]+'), True, id='re_search_match_number'
),
],
)
def test_regexp(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(
L('abcd').re_extract('([a-z]+)', 1), 'abcd', id='re_extract_whole'
),
param(
L('How are you doing today?').re_extract('\\b\\S*o\\S*\\b', 3), 'you', id='re_extract_first'
),
# valid group number but no match => NULL for snowflake
param(L('abcd').re_extract(r'(\d)', 1), None, id='re_extract_no_match'),
# match but not a valid group number => NULL
param(L('abcd').re_extract('abcd', 3), None, id='re_extract_match'),
],
)
def test_regexp_extract(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(ibis.NA.fillna(5), 5, id='filled'),
param(L(5).fillna(10), 5, id='not_filled'),
param(L(5).nullif(5), None, id='nullif_null'),
param(L(10).nullif(5), 10, id='nullif_not_null'),
],
)
def test_fillna_nullif(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(ibis.coalesce(5, None, 4), 5, id='first'),
param(ibis.coalesce(ibis.NA, 4, ibis.NA), 4, id='second'),
param(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14, id='third'),
],
)
def test_coalesce(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(ibis.coalesce(ibis.NA, ibis.NA), None, id='all_null'),
param(
ibis.coalesce(ibis.NA, ibis.NA, ibis.NA.cast('double')),
None,
id='all_nulls_with_one_cast',
),
param(
ibis.coalesce(
ibis.NA.cast('int8'),
ibis.NA.cast('int8'),
ibis.NA.cast('int8'),
),
None,
id='all_nulls_with_all_cast',
),
],
)
def test_coalesce_all_na(con, expr, expected):
assert con.execute(expr) == expected
def test_numeric_builtins_work(alltypes, df):
expr = alltypes.double_col.fillna(0)
result = expr.execute()
expected = df.double_col.fillna(0)
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('op', 'pandas_op'),
[
param(
lambda t: (t.double_col > 20).ifelse(10, -20),
lambda df: pd.Series(
np.where(df.double_col > 20, 10, -20), dtype='int8'
),
id='simple',
),
param(
lambda t: (t.double_col > 20).ifelse(10, -20).abs(),
lambda df: pd.Series(
np.where(df.double_col > 20, 10, -20), dtype='int8'
).abs(),
id='abs',
),
],
)
def test_ifelse(alltypes, df, op, pandas_op):
expr = op(alltypes)
result = expr.execute()
result.name = None
expected = pandas_op(df)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('distinct1', 'distinct2', 'expected1', 'expected2'),
[
(True, True, 'UNION', 'UNION'),
(True, False, 'UNION', 'UNION ALL'),
(False, True, 'UNION ALL', 'UNION'),
(False, False, 'UNION ALL', 'UNION ALL'),
],
)
def test_union_cte(alltypes, distinct1, distinct2, expected1, expected2):
t = alltypes
expr1 = t.group_by(t.string_col).aggregate(metric=t.double_col.sum())
expr2 = expr1.view()
expr3 = expr1.view()
expr = expr1.union(expr2, distinct=distinct1).union(
expr3, distinct=distinct2
)
result = '\n'.join(
map(
lambda line: line.rstrip(), # strip trailing whitespace
str(
expr.compile().compile(compile_kwargs=dict(literal_binds=True))
).splitlines(),
)
)
expected = """\
WITH anon_1 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM functional_alltypes AS t0 GROUP BY t0.string_col),
anon_2 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM functional_alltypes AS t0 GROUP BY t0.string_col),
anon_3 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM functional_alltypes AS t0 GROUP BY t0.string_col)
(SELECT anon_1.string_col, anon_1.metric
FROM anon_1 {} SELECT anon_2.string_col, anon_2.metric
FROM anon_2) {} SELECT anon_3.string_col, anon_3.metric
FROM anon_3""".format(
expected1, expected2
)
assert str(result) == expected
@pytest.mark.parametrize(
('func', 'pandas_func'),
[
param(
lambda t, cond: t.bool_col.count(),
lambda df, cond: df.bool_col.count(),
id='count',
),
param(
lambda t, cond: t.double_col.sum(),
lambda df, cond: df.double_col.sum(),
id='sum',
),
param(
lambda t, cond: t.double_col.mean(),
lambda df, cond: df.double_col.mean(),
id='mean',
),
param(
lambda t, cond: t.double_col.min(),
lambda df, cond: df.double_col.min(),
id='min',
),
param(
lambda t, cond: t.double_col.max(),
lambda df, cond: df.double_col.max(),
id='max',
),
param(
lambda t, cond: t.double_col.var(),
lambda df, cond: df.double_col.var(),
id='var',
),
param(
lambda t, cond: t.double_col.std(),
lambda df, cond: df.double_col.std(),
id='std',
),
param(
lambda t, cond: t.double_col.var(how='sample'),
lambda df, cond: df.double_col.var(ddof=1),
id='samp_var',
),
param(
lambda t, cond: t.double_col.std(how='pop'),
lambda df, cond: df.double_col.std(ddof=0),
id='pop_std',
),
param(
lambda t, cond: t.bool_col.count(where=cond),
lambda df, cond: df.bool_col[cond].count(),
id='count_where',
),
param(
lambda t, cond: t.double_col.mean(where=cond),
lambda df, cond: df.double_col[cond].mean(),
id='mean_where',
),
param(
lambda t, cond: t.double_col.min(where=cond),
lambda df, cond: df.double_col[cond].min(),
id='min_where',
),
param(
lambda t, cond: t.double_col.max(where=cond),
lambda df, cond: df.double_col[cond].max(),
id='max_where',
),
param(
lambda t, cond: t.double_col.var(where=cond),
lambda df, cond: df.double_col[cond].var(),
id='var_where',
),
param(
lambda t, cond: t.double_col.std(where=cond),
lambda df, cond: df.double_col[cond].std(),
id='std_where',
),
param(
lambda t, cond: t.double_col.var(where=cond, how='sample'),
lambda df, cond: df.double_col[cond].var(),
id='samp_var_where',
),
param(
lambda t, cond: t.double_col.std(where=cond, how='pop'),
lambda df, cond: df.double_col[cond].std(ddof=0),
id='pop_std_where',
),
],
)
def test_aggregations(alltypes, df, func, pandas_func):
table = alltypes.limit(100)
df = df.head(table.count().execute())
cond = table.string_col.isin(['1', '7'])
expr = func(table, cond)
result = expr.execute()
expected = pandas_func(df, cond.execute())
np.testing.assert_allclose(result, expected)
def test_not_contains(alltypes, df):
n = 100
table = alltypes.limit(n)
expr = table.string_col.notin(['1', '7'])
result = expr.execute()
expected = ~df.head(n).string_col.isin(['1', '7'])
tm.assert_series_equal(result, expected, check_names=False)
def test_group_concat(alltypes, df):
expr = alltypes.string_col.group_concat()
result = expr.execute()
expected = ','.join(df.string_col.dropna())
assert result == expected
def test_distinct_aggregates(alltypes, df):
expr = alltypes.limit(100).double_col.nunique()
result = expr.execute()
assert result == df.head(100).double_col.nunique()
def test_not_exists(alltypes, df):
t = alltypes
t2 = t.view()
expr = t[~((t.string_col == t2.string_col).any())]
result = expr.execute()
left, right = df, t2.execute()
expected = left[left.string_col != right.string_col]
tm.assert_frame_equal(
result, expected, check_index_type=False, check_dtype=False
)
def test_subquery(alltypes, df):
t = alltypes
expr = (
t.mutate(d=t.double_col.fillna(0))
.limit(1000)
.group_by('string_col')
.size()
)
result = expr.execute().sort_values('string_col').reset_index(drop=True)
expected = (
df.assign(d=df.double_col.fillna(0))
.head(1000)
.groupby('string_col')
.string_col.count()
.reset_index(name='count')
.sort_values('string_col')
.reset_index(drop=True)
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('func', ['mean', 'sum'])
def test_rolling_window(alltypes, func, df):
t = alltypes
df = (
df[['double_col', 'timestamp_col']]
.sort_values('timestamp_col')
.reset_index(drop=True)
)
window = ibis.window(order_by=t.timestamp_col, preceding=6, following=0)
f = getattr(t.double_col, func)
df_f = getattr(df.double_col.rolling(7, min_periods=0), func)
result = (
t.projection([f().over(window).name('double_col')])
.execute()
.double_col
)
expected = df_f()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['min', 'max'])
def test_cumulative_ordered_window(alltypes, func, df):
t = alltypes
df = df.sort_values('timestamp_col').reset_index(drop=True)
window = ibis.cumulative_window(order_by=t.timestamp_col)
f = getattr(t.double_col, func)
expr = t.projection([(t.double_col - f().over(window)).name('double_col')])
result = expr.execute().double_col
expected = df.double_col - getattr(df.double_col, 'cum%s' % func)()
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
#! /usr/bin/env python3
"""
Copyright 2021 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#
# Classify applications into 104 classes given their raw code.
#
# The representation (graph) is created from IR.
#
import os
import sys
import glob
import numpy as np
import pandas as pd
from absl import app, flags, logging
from yacos.essential import IO
from yacos.info import compy as R
from yacos.info.compy.extractors import LLVMDriver
import matplotlib.pyplot as plt
from silence_tensorflow import silence_tensorflow
silence_tensorflow()
import stellargraph as sg
from stellargraph.mapper import PaddedGraphGenerator
from stellargraph.layer import DeepGraphCNN
from stellargraph import StellarDiGraph
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Conv1D, MaxPool1D, Dropout, Flatten
from tensorflow.keras.losses import categorical_crossentropy
def get_num_labels(indexes,
labels):
"""Get the number of labels"""
num_labels = {}
for index in indexes:
if labels[index] not in num_labels:
num_labels[labels[index]] = 0
num_labels[labels[index]] += 1
return num_labels
def graph2stellar(graph):
"""Convert the graph to StellarGraph representation."""
nodes_features = graph.get_nodes_inst2vec_embeddings()
n_index = [index for index, _, _ in nodes_features]
n_features = [features for _, _, features in nodes_features]
nodes = pd.DataFrame(n_features, index=n_index)
edges = graph.get_edges_dataFrame()
s_graph = StellarDiGraph(nodes,
edges=edges,
edge_type_column="type")
# print(s_graph.info(show_attributes=True, truncate=None))
return s_graph
def prepare_data(data_directory,
graph_type,
ratio):
"""Extract the representation from the source code."""
# Instantiate the LLVM driver.
driver = LLVMDriver()
# Define the builder
builder = R.LLVMGraphBuilder(driver)
# Define the visitor
visitors = {'programl': R.LLVMProGraMLVisitor,
'programlnoroot': R.LLVMProGraMLNoRootVisitor,
'cfg': R.LLVMCFGVisitor,
'cfgcompact': R.LLVMCFGCompactVisitor,
'cfgcall': R.LLVMCFGCallVisitor,
'cfgcallnoroot': R.LLVMCFGCallNoRootVisitor,
'cfgcallcompact': R.LLVMCFGCallCompactVisitor,
'cfgcallcompactnoroot': R.LLVMCFGCallCompactNoRootVisitor,
'cdfg': R.LLVMCDFGVisitor,
'cdfgcompact': R.LLVMCDFGCompactVisitor,
'cdfgcall': R.LLVMCDFGCallVisitor,
'cdfgcallnoroot': R.LLVMCDFGCallNoRootVisitor,
'cdfgcallcompact': R.LLVMCDFGCallCompactVisitor,
'cdfgcallcompactnoroot': R.LLVMCDFGCallCompactNoRootVisitor,
'cdfgplus': R.LLVMCDFGPlusVisitor,
'cdfgplusnoroot': R.LLVMCDFGPlusNoRootVisitor}
index = 0
graphs = []
labels = []
indexes = {'ir_train': [], 'ir_val': [], 'ir_test': []}
# Load data from all folders
for dir in ['ir_train', 'ir_val', 'ir_test']:
top_dir = os.path.join(data_directory, dir)
folders = [
os.path.join(top_dir, subdir)
for subdir in os.listdir(top_dir)
if os.path.isdir(os.path.join(top_dir, subdir))
]
for folder in folders:
label = folder.replace('{}/'.format(top_dir), '')
sources = glob.glob('{}/*.ll'.format(folder))
nof_dataset_itens = int(len(sources) * ratio)
for item, source in enumerate(sources):
# Extract "information" from the file
# (data to construct the graph).
extractionInfo = builder.ir_to_info(source)
# Build the graph.
graph = builder.info_to_representation(extractionInfo,
visitors[graph_type])
indexes[dir].append(index)
graphs.append(graph2stellar(graph))
labels.append(label)
index += 1
if item == nof_dataset_itens - 1:
break
labels = pd.Series(labels, name='label', dtype="category")
return graphs, labels, indexes
def execute(argv):
"""Extract a graph representation."""
del argv
# Print summary
print('='*80, flush=True)
print('Classify applications into 104 classes given their raw code.')
print('='*80, flush=True)
FLAGS = flags.FLAGS
print('Deep Graph Convolutional Neural Network')
print('='*80, flush=True)
# Verify datset directory.
if not os.path.isdir(FLAGS.dataset_directory):
logging.error('Dataset directory {} does not exist.'.format(
FLAGS.dataset_directory)
)
sys.exit(1)
#
# IMPORT THE DATA
#
# Prepare the datasets
graphs, graph_labels, graph_indexes = prepare_data(FLAGS.dataset_directory,
FLAGS.graph,
FLAGS.ratio)
# Summary statistics of the sizes of the graphs
print('Dataset', flush=True)
summary = pd.DataFrame(
[(g.number_of_nodes(), g.number_of_edges()) for g in graphs],
columns=['nodes', 'edges'],
)
print('\n', summary.describe().round(1), flush=True)
print('\n', graph_labels.value_counts().to_frame(), flush=True)
print('='*80, flush=True)
# Dataset statistics
dataset = {'training': {}, 'validation': {}, 'test': {}}
training = len(graph_indexes['ir_train'])
dataset['training']['counter'] = training
validation = len(graph_indexes['ir_val'])
dataset['validation']['counter'] = validation
test = len(graph_indexes['ir_test'])
dataset['test']['counter'] = test
print('Training:', training, flush=True)
dataset['training']['labels'] = {}
num_labels = get_num_labels(graph_indexes['ir_train'], graph_labels)
for label, counter in num_labels.items():
print('\t', label, '\t', counter)
dataset['training']['labels'][label] = counter
print('Validation:', validation, flush=True)
dataset['validation']['labels'] = {}
num_labels = get_num_labels(graph_indexes['ir_val'], graph_labels)
for label, counter in num_labels.items():
print('\t', label, '\t', counter)
dataset['validation']['labels'][label] = counter
print('Test:', test, flush=True)
dataset['test']['labels'] = {}
num_labels = get_num_labels(graph_indexes['ir_test'], graph_labels)
for label, counter in num_labels.items():
print('\t', label, '\t', counter)
dataset['test']['labels'][label] = counter
print('='*80, flush=True)
test_labels_original = graph_labels[training+validation:]
# Encode class values
graph_labels = | pd.get_dummies(graph_labels) | pandas.get_dummies |
"""Read data files in different formats"""
import json as jsonlib
import pandas as pd
from eln.decorators.register_reader import register_reader, READERS as _READERS
from eln.helpers.logger import log_error
class UnsupportedFileFormatError(TypeError):
"""Unsupported file format"""
def read(plugin, *args, **kwargs):
if plugin in _READERS:
return _READERS[plugin](*args, **kwargs)
raise UnsupportedFileFormatError()
@register_reader
def csv(file_path):
"""Read CSV file, return DataFrame"""
return | pd.read_csv(file_path) | pandas.read_csv |
# TODO move away from this test generator style since its we need to manage the generator file,
# which is no longer in this project workspace, as well as the output test file.
## ##
# #
# THIS TEST WAS AUTOGENERATED BY groupby_test_generator.py #
# #
##
# TODO refactor this into table driven tests using pytest parameterize since each test body follows the same structure
# and a single test body with multiple test tabe entries will be more readable and flexible.
from .groupby_unit_test_parameters import *
import pandas as pd
import riptable as rt
import unittest
class autogenerated_gb_tests(unittest.TestCase):
def safe_assert(self, ary1, ary2):
for a, b in zip(ary1, ary2):
if a == a and b == b:
self.assertAlmostEqual(a, b, places=7)
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(1, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(4, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(7, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(2, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(5, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(1, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(4, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(7, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(2, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(5, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(1, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(4, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(7, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(2, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(5, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(1, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(4, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(7, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(2, 2, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(5, 2, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(1, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(4, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(7, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(2, 1, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(5, 1, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(1, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(4, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(7, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(2, 3, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(5, 3, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 2, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 2, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 1, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 1, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 3, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 3, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 2, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 2, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
| pd.DataFrame(test_class.data) | pandas.DataFrame |
""" test indexing with ix """
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.types.common import is_scalar
from pandas.compat import lrange
from pandas import Series, DataFrame, option_context, MultiIndex
from pandas.util import testing as tm
from pandas.core.common import PerformanceWarning
class TestIX(tm.TestCase):
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
| tm.assert_frame_equal(df2, expected) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
project_name = "reco-tut-mlh"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
# In[2]:
if not os.path.exists(project_path):
get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
get_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
get_ipython().system(u'mkdir "{path}"')
get_ipython().magic(u'cd "{path}"')
import sys; sys.path.append(path)
get_ipython().system(u'git config --global user.email "<EMAIL>"')
get_ipython().system(u'git config --global user.name "reco-tut"')
get_ipython().system(u'git init')
get_ipython().system(u'git remote add origin https://"{mykeys.git_token}":[email protected]/"{account}"/"{project_name}".git')
get_ipython().system(u'git pull origin "{branch}"')
get_ipython().system(u'git checkout main')
else:
get_ipython().magic(u'cd "{project_path}"')
# In[34]:
get_ipython().system(u'git status')
# In[35]:
get_ipython().system(u'git add . && git commit -m \'commit\' && git push origin "{branch}"')
# In[7]:
import sys
sys.path.insert(0, './code')
# ---
# # Collaborative Filtering Comparison
#
# In this notebook we compare different recommendation systems starting with the state-of-the-art LightGCN and going back to the winning algorithm for 2009's Netflix Prize competition, SVD++.
#
# Models include in order are LightGCN, NGCF, SVAE, SVD++, and SVD. Each model has their own individual notebooks where we go more indepth, especially LightGCN and NGCF, where we implemented them from scratch in Tensorflow.
#
# The last cell compares the performance of the different models using ranking metrics:
#
#
# * Precision@k
# * Recall@k
# * Mean Average Precision (MAP)
# * Normalized Discounted Cumulative Gain (NDCG)
#
# where $k=10$
#
#
# # Imports
# In[4]:
get_ipython().system(u'pip install -q surprise')
# In[8]:
import math
import numpy as np
import os
import pandas as pd
import random
import requests
import scipy.sparse as sp
import surprise
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.python.framework.ops import disable_eager_execution
from tqdm import tqdm
from utils import stratified_split, numpy_stratified_split
import build_features
import metrics
from models import SVAE
from models.GCN import LightGCN, NGCF
# # Prepare data
# In[9]:
fp = os.path.join('./data/bronze', 'u.data')
raw_data = pd.read_csv(fp, sep='\t', names=['userId', 'movieId', 'rating', 'timestamp'])
print(f'Shape: {raw_data.shape}')
raw_data.sample(10, random_state=123)
# In[10]:
# Load movie titles.
fp = os.path.join('./data/bronze', 'u.item')
movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1')
print(f'Shape: {movie_titles.shape}')
movie_titles.sample(10, random_state=123)
# In[15]:
train_size = 0.75
train, test = stratified_split(raw_data, 'userId', train_size)
print(f'Train Shape: {train.shape}')
print(f'Test Shape: {test.shape}')
print(f'Do they have the same users?: {set(train.userId) == set(test.userId)}')
# In[16]:
combined = train.append(test)
n_users = combined['userId'].nunique()
print('Number of users:', n_users)
n_movies = combined['movieId'].nunique()
print('Number of movies:', n_movies)
# In[17]:
# Create DataFrame with reset index of 0-n_movies.
movie_new = combined[['movieId']].drop_duplicates()
movie_new['movieId_new'] = np.arange(len(movie_new))
train_reindex = pd.merge(train, movie_new, on='movieId', how='left')
# Reset index to 0-n_users.
train_reindex['userId_new'] = train_reindex['userId'] - 1
train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']]
test_reindex = pd.merge(test, movie_new, on='movieId', how='left')
# Reset index to 0-n_users.
test_reindex['userId_new'] = test_reindex['userId'] - 1
test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']]
# Create dictionaries so we can convert to and from indexes
item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new']))
id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId']))
user2id = dict(zip(train['userId'], train_reindex['userId_new']))
id2user = dict(zip(train_reindex['userId_new'], train['userId']))
# In[18]:
# Create user-item graph (sparse matix where users are rows and movies are columns.
# 1 if a user reviewed that movie, 0 if they didn't).
R = sp.dok_matrix((n_users, n_movies), dtype=np.float32)
R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1
# Create the adjaceny matrix with the user-item graph.
adj_mat = sp.dok_matrix((n_users + n_movies, n_users + n_movies), dtype=np.float32)
# List of lists.
adj_mat.tolil()
R = R.tolil()
# Put together adjacency matrix. Movies and users are nodes/vertices.
# 1 if the movie and user are connected.
adj_mat[:n_users, n_users:] = R
adj_mat[n_users:, :n_users] = R.T
adj_mat
# In[19]:
# Calculate degree matrix D (for every row count the number of nonzero entries)
D_values = np.array(adj_mat.sum(1))
# Square root and inverse.
D_inv_values = np.power(D_values + 1e-9, -0.5).flatten()
D_inv_values[np.isinf(D_inv_values)] = 0.0
# Create sparse matrix with the values of D^(-0.5) are the diagonals.
D_inv_sq_root = sp.diags(D_inv_values)
# Eval (D^-0.5 * A * D^-0.5).
norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root)
# In[20]:
# to COOrdinate format first ((row, column), data)
coo = norm_adj_mat.tocoo().astype(np.float32)
# create an index that will tell SparseTensor where the non-zero points are
indices = np.mat([coo.row, coo.col]).transpose()
# covert to sparse tensor
A_tilde = tf.SparseTensor(indices, coo.data, coo.shape)
A_tilde
# # Train models
# ## Graph Convoultional Networks (GCNs)
# ### Light Graph Convolution Network (LightGCN)
# In[21]:
light_model = LightGCN(A_tilde,
n_users = n_users,
n_items = n_movies,
n_layers = 3)
# In[22]:
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer)
# ### Neural Graph Collaborative Filtering (NGCF)
# In[23]:
ngcf_model = NGCF(A_tilde,
n_users = n_users,
n_items = n_movies,
n_layers = 3
)
ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer)
# ### Recommend with LightGCN and NGCF
# In[24]:
# Convert test user ids to the new ids
users = np.array([user2id[x] for x in test['userId'].unique()])
recs = []
for model in [light_model, ngcf_model]:
recommendations = model.recommend(users, k=10)
recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item})
recommendations = recommendations.merge(movie_titles,
how='left',
on='movieId'
)[['userId', 'movieId', 'title', 'prediction']]
# Create column with the predicted movie's rank for each user
top_k = recommendations.copy()
top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that are also in the test set
recs.append(top_k)
# ## Standard Variational Autoencoder (SVAE)
# In[26]:
# Binarize the data (only keep ratings >= 4)
df_preferred = raw_data[raw_data['rating'] > 3.5]
df_low_rating = raw_data[raw_data['rating'] <= 3.5]
df = df_preferred.groupby('userId').filter(lambda x: len(x) >= 5)
df = df.groupby('movieId').filter(lambda x: len(x) >= 1)
# Obtain both usercount and itemcount after filtering
usercount = df[['userId']].groupby('userId', as_index = False).size()
itemcount = df[['movieId']].groupby('movieId', as_index = False).size()
unique_users =sorted(df.userId.unique())
np.random.seed(123)
unique_users = np.random.permutation(unique_users)
HELDOUT_USERS = 200
# Create train/validation/test users
n_users = len(unique_users)
train_users = unique_users[:(n_users - HELDOUT_USERS * 2)]
val_users = unique_users[(n_users - HELDOUT_USERS * 2) : (n_users - HELDOUT_USERS)]
test_users = unique_users[(n_users - HELDOUT_USERS):]
train_set = df.loc[df['userId'].isin(train_users)]
val_set = df.loc[df['userId'].isin(val_users)]
test_set = df.loc[df['userId'].isin(test_users)]
unique_train_items = pd.unique(train_set['movieId'])
val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)]
test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)]
# Instantiate the sparse matrix generation for train, validation and test sets
# use list of unique items from training set for all sets
am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items)
am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items)
am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items)
# Obtain the sparse matrix for train, validation and test sets
train_data, _, _ = am_train.gen_affinity_matrix()
val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix()
test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix()
# Split validation and test data into training and testing parts
val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123)
test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123)
# Binarize train, validation and test data
train_data = np.where(train_data > 3.5, 1.0, 0.0)
val_data = np.where(val_data > 3.5, 1.0, 0.0)
test_data = np.where(test_data > 3.5, 1.0, 0.0)
# Binarize validation data
val_data_tr = np.where(val_data_tr > 3.5, 1.0, 0.0)
val_data_te_ratings = val_data_te.copy()
val_data_te = np.where(val_data_te > 3.5, 1.0, 0.0)
# Binarize test data: training part
test_data_tr = np.where(test_data_tr > 3.5, 1.0, 0.0)
# Binarize test data: testing part (save non-binary version in the separate object, will be used for calculating NDCG)
test_data_te_ratings = test_data_te.copy()
test_data_te = np.where(test_data_te > 3.5, 1.0, 0.0)
# retrieve real ratings from initial dataset
test_data_te_ratings=pd.DataFrame(test_data_te_ratings)
val_data_te_ratings= | pd.DataFrame(val_data_te_ratings) | pandas.DataFrame |
# importing modules
import numpy as np
import pandas as pd
###ETL reddit data
#----------------------------------------------------------------------------------------------------------------------------------------
def pq (names, subredits='allstocks', sort='relevance', date='all', comments=False):
#importing reddit api
from reddit_api import get_reddit
reddit = get_reddit()
#preparing the inputs to be search
if isinstance(names, str):
if names.isupper()==False:
if names[0].isupper()==False:
name1 = names
name2 = names.capitalize()
name3 = names.upper()
else:
name1 = names.lower()
name2 = names
name3 = names.upper()
else:
name1 = names.lower()
name2 = names.lower().capitalize()
name3 = names
pnames = [[name1,name2,name3]]
elif isinstance(names, list):
pnames =[]
for i, n in enumerate(names):
if isinstance(n, str):
n = str(n)
if n.isupper()==False:
if n[0].isupper()==False:
name1 = n
name2 = n.capitalize()
name3 = n.upper()
else:
name1 = n.lower()
name2 = n
name3 = n.upper()
else:
name1 = n.lower()
name2 = n.lower().capitalize()
name3 = n
pnames.append([name1,name2,name3])
else: pnames = []
elif (isinstance(names, str)==False) or (isinstance(names, list)==False): pnames = []
#scraping posts
posts = []
for n in pnames:
if subredits=='allstocks':
stocks = reddit.subreddit('stocks')
for post in stocks.search(n[0] or n[1] or n[3], sort, 'lucene', date):
posts.append([post.title, post.score, post.id, post.subreddit, post.url, post.num_comments, post.selftext, post.created])
stocks = reddit.subreddit('StocksAndTrading')
for post in stocks.search(n[0] or n[1] or n[3], sort, 'lucene', date):
posts.append([post.title, post.score, post.id, post.subreddit, post.url, post.num_comments, post.selftext, post.created])
stocks = reddit.subreddit('stockspiking')
for post in stocks.search(n[0] or n[1] or n[3], sort, 'lucene', date):
posts.append([post.title, post.score, post.id, post.subreddit, post.url, post.num_comments, post.selftext, post.created])
stocks = reddit.subreddit('Stocks_Picks')
for post in stocks.search(n[0] or n[1] or n[3], sort, 'lucene', date):
posts.append([post.title, post.score, post.id, post.subreddit, post.url, post.num_comments, post.selftext, post.created])
stocks = reddit.subreddit('wallstreetbets')
for post in stocks.search(n[0] or n[1] or n[3], sort, 'lucene', date):
posts.append([post.title, post.score, post.id, post.subreddit, post.url, post.num_comments, post.selftext, post.created])
stocks = reddit.subreddit('Wallstreetbetsnew')
for post in stocks.search(n[0] or n[1] or n[3], sort, 'lucene', date):
posts.append([post.title, post.score, post.id, post.subreddit, post.url, post.num_comments, post.selftext, post.created])
stocks = reddit.subreddit('WallStreetbetsELITE')
for post in stocks.search(n[0] or n[1] or n[3], sort, 'lucene', date):
posts.append([post.title, post.score, post.id, post.subreddit, post.url, post.num_comments, post.selftext, post.created])
else:
hot_posts = reddit.subreddit(subredits)
for post in hot_posts.search(n[0] or n[1] or n[3], sort, 'lucene', date):
posts.append([post.title, post.score, post.id, post.subreddit, post.url, post.num_comments, post.selftext, post.created])
posts = pd.DataFrame(posts,columns=['title', 'score', 'post_id', 'subreddit', 'url', 'num_comments', 'body', 'created'])
posts = posts.infer_objects()
posts.drop_duplicates(subset ="post_id", keep = "first", inplace = True)
posts.reset_index(drop=True, inplace=True)
#scraping comments
if comments==True:
comments = []
for index, row in posts.iterrows():
submission = reddit.submission(id=row['post_id'])
submission.comments.replace_more(limit=0)
for comment in submission.comments.list():
comments.append([row['post_id'], row['title'], comment.score, comment.id, comment.body, comment.created])
comments = pd.DataFrame(comments,columns=['post_id', 'post', 'score', 'comment_id', 'body', 'created'])
comments = comments.infer_objects()
#formating posts and optional comments dataframes
posts['created'] = pd.to_datetime(posts['created'], unit='s')
posts.set_index('created', inplace=True)
comments['created'] = pd.to_datetime(comments['created'], unit='s')
comments.set_index('created', inplace=True)
return posts, comments
#formating postsdataframe
posts['created'] = pd.to_datetime(posts['created'], unit='s')
posts.set_index('created', inplace=True)
return posts
###Sentiment analysis
#----------------------------------------------------------------------------------------------------------------------------------------
def sentiment (posts, comments=None):
#importing sentiment model flair
import flair
sentiment_model = flair.models.TextClassifier.load('en-sentiment')
#changing index for processing
posts.reset_index (inplace=True)
posts.set_index('post_id', inplace=True)
#calculating sentiment on body
sentiment = []
score = []
for sentence in posts['body']:
if sentence.strip()=='':
sentiment.append(np.nan)
score.append(np.nan)
else:
sample = flair.data.Sentence(sentence)
sentiment_model.predict(sample)
sentiment.append(sample.labels[0].value)
score.append(sample.labels[0].score)
posts['sentiment'] = sentiment
posts['sentiment_score'] = score
#calculating sentiment on tittle if body is nan
for index in posts[posts["sentiment"].isna()].index:
if posts.loc[index,"title"].strip()!='':
sample = flair.data.Sentence(posts.loc[index,"title"])
sentiment_model.predict(sample)
posts.at[index,"sentiment"] = sample.labels[0].value
posts.at[index,"sentiment_score"] = sample.labels[0].score
#calculating sentiment on comments
if isinstance(comments, pd.DataFrame):
sentiment = []
score = []
for sentence in comments['body']:
if sentence.strip()=='':
sentiment.append(np.nan)
score.append(np.nan)
else:
sample = flair.data.Sentence(sentence)
sentiment_model.predict(sample)
sentiment.append(sample.labels[0].value)
score.append(sample.labels[0].score)
comments['sentiment'] = sentiment
comments['sentiment_score'] = score
#mean sentiment of posts by comments
posts["comments_sentiment"] = np.nan
posts["comments_score"] = np.nan
for post_id in comments["post_id"].unique():
posts.at[posts[posts.index==post_id].index,"comments_sentiment"] = comments["sentiment"].loc[comments["post_id"]==post_id].mode()[0]
posts.at[posts[posts.index==post_id].index,"comments_score"] =comments["sentiment_score"].loc[comments["post_id"]==post_id].mean()
#combined sentiment score
posts["combined_sentiment"] = np.where (posts['comments_sentiment'].isna(), posts['sentiment'],np.where (posts['sentiment'] == posts['comments_sentiment'], 'POSITIVE', 'NEGATIVE'))
posts["combined_score"] = (posts["sentiment_score"]+posts["comments_score"])/2
posts["combined_score"] = np.where(posts["combined_score"].notna()==True, posts["combined_score"], posts["sentiment_score"])
else:
posts["comments_sentiment"] = np.nan
posts["comments_score"] = np.nan
posts["combined_sentiment"] = np.nan
posts["combined_score"] = np.nan
#returning to original index
posts.reset_index(inplace=True)
posts.set_index('created', inplace=True)
#formating new columns
posts['sentiment'] = posts['sentiment'].astype('category')
posts['sentiment_score'] = pd.to_numeric(posts['sentiment_score'])
comments['sentiment'] = comments['sentiment'].astype('category')
comments['sentiment_score'] = | pd.to_numeric(comments['sentiment_score']) | pandas.to_numeric |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
import re
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
import xgboost as xgb
from sklearn.cluster import MiniBatchKMeans
def process_am(x):
aa = ''
if type(x) == pd.core.series.Series:
x = x.values
aa = [aa + x[i] for i in range(len(x))]
aa = aa[0]
aa = re.sub('"'," ", aa)
elif type(x) == str:
aa = x
aa = re.sub('"'," ", aa)
aal = []
_aal = aa.split(',')
for aa in _aal:
aa = re.sub("{"," ", aa)
aa = re.sub("}"," ", aa)
aa = re.sub(","," ", aa)
aa = re.sub(":"," ", aa)
aa = re.sub('’n',"", aa)
aa = aa.strip()
aa = re.sub('\s+',"_", aa)
aa = aa.lower()
if len(aa)>0:
aal.append(aa)
return dict.fromkeys(set(aal), 1)
def perc2float(x):
return float(x.strip('%'))/100
########################
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
print("train:",train.shape)
print("test:",test.shape)
# 1. log_price
print("1. log_price")
y_train = train['log_price']
train = train.drop(['log_price'],axis=1)
assert train.shape[1] == test.shape[1]
for i in range(train.shape[1]):
assert train.columns[i] == test.columns[i]
train_obs = len(train)
all_data = pd.concat([train,test],axis=0)
# 2. property_type, room_type, bed_type
print('--------------> Feature Engineering ... ')
print("2. property_type, room_type, bed_type")
encoder = LabelEncoder()
encoder.fit(all_data['property_type'])
all_data['property_type'] = encoder.transform(all_data['property_type'])
all_data['room_type'] = all_data['room_type'].map( {'Entire home/apt':5, 'Private room':3, 'Shared room':1})
all_data.bed_type = all_data.bed_type.fillna('missing')
encoder = LabelEncoder()
encoder.fit(all_data['bed_type'])
all_data['bed_type'] = encoder.transform(all_data['bed_type'])
# 3. amenities
print("3. amenities")
am_list = [process_am( all_data.iloc[i]['amenities']) for i in range(len(all_data))]
assert len(am_list) == len(all_data)
v = DictVectorizer(sparse=False)
X = v.fit_transform(am_list)
amenities_df = pd.DataFrame(data=X,columns=v.feature_names_)
amenities_df.index = all_data.index
all_data = pd.concat([all_data,amenities_df],axis=1)
all_data = all_data.drop(['amenities'],axis=1)
del amenities_df
#4. accommodates , bathrooms
#5. cancellation_policy, cleaning_fee
print("5. cancellation_policy, cleaning_fee")
all_data['cancellation_policy'] = all_data['cancellation_policy'].map( {
'super_strict_60':20,
'super_strict_30':30,
'strict':50,
'moderate':10,
'flexible':5,
'long_term':1,
})
all_data['cleaning_fee'] = all_data['cleaning_fee'].map( {
True:1,
False:0
})
# 6. city
print("6. city")
encoder = LabelEncoder()
encoder.fit(all_data['city'])
all_data['city'] = encoder.transform(all_data['city'])
# 7. description TODO
print("7. description ... TODO")
all_data['description'] = all_data['description'].fillna('')
all_data = all_data.drop(['description'],axis=1)
# 8. first_review , last_review , number_of_reviews , review_scores_rating
print("7. 8. first_review , last_review , number_of_reviews , review_scores_rating ... TODO better")
most_recent_review = pd.to_datetime(all_data.last_review).max()
delta_last_review = most_recent_review - pd.to_datetime(all_data.last_review)
delta_last_review = delta_last_review.fillna(-1)
delta_last_review = delta_last_review.map(lambda x: x.total_seconds()/(60*60*24))
all_data['delta_most_recent_review'] = delta_last_review
delta_rev = pd.to_datetime(all_data.last_review) - | pd.to_datetime(all_data.first_review) | pandas.to_datetime |
import pandas as pd
STRING_COLS = ["slug", "token"]
INT_COLS = ["tok_id", "length", "label"]
FLOAT_COLS = [
"page",
"x0",
"y0",
"x1",
"y1",
"gross_amount",
"match",
"digitness",
"log_amount",
]
BOOL_COLS = ["is_dollar"]
def fix_type(df, col, na_value, dtype, downcast=False):
if col not in df.columns:
return
df[col] = df[col].fillna(na_value).astype(dtype)
if downcast:
try:
df[col] = | pd.to_numeric(df[col], downcast=dtype) | pandas.to_numeric |
from typing import Dict, List
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm import tqdm
import wandb
api = wandb.Api()
entity = "proteins"
import matplotlib.ticker as ticker
class StupidLogFormatter(ticker.LogFormatter):
def __init__(
self,
base: float = 10.0,
labelOnlyBase=False,
minor_thresholds=None,
linthresh=None,
):
super().__init__(
base=base,
labelOnlyBase=labelOnlyBase,
minor_thresholds=minor_thresholds,
linthresh=linthresh,
)
def _num_to_string(self, x, vmin, vmax):
if x > 20000:
s = "%1.0e" % x
elif x < 1:
s = "%1.0e" % x
else:
s = self._pprint_val(x, vmax - vmin)
return s
def multimsa_pair_plot(df, k1, k2, m="auc"):
filtered_df_x = df[df.sweep_name == k1]
filtered_df_y = df[df.sweep_name == k2]
lsuffix = "_x"
rsuffix = "_y"
merged_df = pd.merge(
filtered_df_x, filtered_df_y, on="pdb_idx", suffixes=(lsuffix, rsuffix)
)
print("comparison families: ", len(merged_df))
lseqs = "num_seqs" + lsuffix
rseqs = "num_seqs" + rsuffix
if lseqs in merged_df.keys():
seqs_column = lseqs
elif rseqs in merged_df.keys():
seqs_column = rseqs
else:
print("no seqs found for ")
print(df_x["model"].iloc(0))
print(df_y["model"].iloc(0))
return
plt.plot([0, 1], [0, 1], c="k")
num_seqs = merged_df[seqs_column]
plt.scatter(
merged_df[m + lsuffix],
merged_df[m + rsuffix],
c=num_seqs,
s=9,
norm=colors.LogNorm(vmin=num_seqs.min(), vmax=num_seqs.max()),
cmap="viridis",
)
formatter = StupidLogFormatter(base=2.0)
cbar = plt.colorbar(format=formatter)
# cbar.locator = matplotlib.ticker.LogLocator(base=2)
# cbar.update_ticks()
plt.xlabel(k1)
plt.ylabel(k2)
cbar.set_label("\# of MSA sequences")
print("spoagef")
cbar.set_ticks(
ticker.LogLocator(base=2.0),
update_ticks=True,
)
cbar.minorticks_off()
def add_apc_default(df: pd.DataFrame, sweep_name: str) -> pd.DataFrame:
# Adds modified sweep whose default metrics are apc'd
d = df[df.sweep_name == sweep_name]
d.loc[:, "sweep_name"] = sweep_name + "-apc"
d.loc[:, "pr_at_L"] = d.loc[:, "pr_at_L_apc"]
d.loc[:, "pr_at_L_5"] = d.loc[:, "pr_at_L_5_apc"]
d.loc[:, "auc"] = d.loc[:, "auc_apc"]
d["apc"] = True
return df.append(d)
def parse_old_model(df):
d = df[
[
"sweep_name",
"pdb",
"pdb_idx",
"len_ref",
"num_seqs",
"run_state",
"Train_Precision_@_l/1",
"Train_Precision_apc_@_l/1",
"Train_Precision_@_l/5",
"Train_Precision_apc_@_l/5",
"Train_Auc",
"Train_Auc_apc",
]
]
d = d.rename(
columns={
"Train_Precision_@_l/1": "pr_at_L",
"Train_Precision_apc_@_l/1": "pr_at_L_apc",
"Train_Precision_@_l/5": "pr_at_L_5",
"Train_Precision_apc_@_l/5": "pr_at_L_5_apc",
"Train_Auc": "auc",
"Train_Auc_apc": "auc_apc",
"len_ref": "msa_length",
}
)
d["log_num_seqs"] = np.log2(d.num_seqs)
d["model"] = d["sweep_name"].map(lambda x: x.split("-")[0])
if "use_bias" in df.columns:
d["use_bias"] = df.use_bias
else:
d["use_bias"] = False
return d
def load_attention_msa_df(sweep_id, sweep_name, model_name, pdb_map):
# Loads sweep df for runs from old repo
project = "gremlin-contacts"
runs = api.runs(f"{entity}/{project}", {"sweep": f"{sweep_id}"}, per_page=1000)
print(f"{sweep_id} has {len(runs)} runs")
id_list = []
summary_list = []
config_list = []
name_list = []
model_list = []
state_list = []
tags_list = []
num_contacts_list = []
for run in tqdm(runs):
tags_list.append(run.tags)
state_list.append(run.state)
id_list.append(run.id)
# run.summary are the output key/values like accuracy. We call ._json_dict to omit large files
summary_list.append(run.summary._json_dict)
# run.config is the input metrics. We remove special values that start with _.
config_list.append(
{str(k): v for k, v in run.config.items() if not k.startswith("_")}
)
# run.name is the name of the run.
name_list.append(run.name)
model_list.append(model_name)
# currently unused, very slow to download true contact files
# num_contacts_list.append(get_num_true_contacts(run))
# num_contacts_df = pd.DataFrame({'num_true_contacts': num_contacts_list})
summary_df = pd.DataFrame.from_records(summary_list)
config_df = pd.DataFrame.from_dict(config_list)
pdb_id_df = pd.DataFrame({"pdb_idx": list(config_df.pdb.map(pdb_map))})
state_df = pd.DataFrame({"run_state": state_list})
name_df = | pd.DataFrame({"name": name_list}) | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import click
import logging
from dotenv import find_dotenv, load_dotenv
import pandas as pd
import numpy as np
import re
from sklearn.externals import joblib
# This program reads in both train and test data set
# and creates a dataset dictionary
# of cleaned and sanitized data.
# result format:
# {
# 'train': <pandas.DataFrame>
# 'test': <pandas.DataFrame>
# }
# extracts title from a name, i.e.
# extract_title('Caldwell, Mr. <NAME>') = 'Mr.'
def extract_title(name):
m = re.search('[^,]+, ([^\.]+)\..*', name)
return m.group(1)
@click.command()
@click.argument('train_filepath', type=click.Path(exists=True))
@click.argument('test_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(train_filepath, test_filepath, output_filepath):
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
df_train = | pd.read_csv(train_filepath, dtype={'Age': np.float64}) | pandas.read_csv |
"""
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
# Evaluate price action for past hour, day, week, and month
General Rules:
- Never buy the top of a range...unless extreme volume and a quick trade
- Never buy a falling knife that has no support
- Never buy a product that is more than 10% above 14 period SMA
- Sell top of range if price has been ranging for less than 4 hours or more than 1 day
Questions:
- Is price going up, down, or ranging?
- up
Consider selling open positions
- down
Consider adding new positions
- ranging
Consider a series of buy-low-sell-high trades until it doesnt work anymore.
- Is volume weak, medeocre, or strong?
- weak
- on ranging price take ranging strategy
- medeocre
- on up price consider taking profits, ranging/down price hold position
- strong
- on up price hold until there's a clear sign to sell or we have a very
healthy profit.
- on down price consider looking for past support and bottom-fishing
- on ranging price heavily consider adding a long
- Is price a healthy level near 14 and 8 period SMA?
- far above
Probably a little to risky to buy, good idea to close open positions
only exception is if price has CONSISTENTLY been above these SMAs
- right around
Probably a good idea to hold - not much you can get from this signal
- far below
Probably a good idea to bottom fish, especially on a volume spike and on past support.
- What is current price velocity?
- Fast
- Consider quicker trading timeframes too much in one direction cant be good.
- Medeocre
- Consider mid-longer term timeframes with tight stops
- Low
- Consider longer term timeframes, not much to do here.
- Is 14 period RSI Overbought, Oversold, or in the Middle zone?
Overbought:
Consider taking profit unless embedded overbought
Oversold:
Consider bottom-fishing play
Middle Zone:
Consider holding and maybe adjusting stop to right below latest support.
How to determine an entry, target, and exit price:
1) Consider the above questions, do most of them say buy now?
- If no wait until they do
2) A healthy target is maybe 30% of the average range for the trading time period.
3) A stop loss should be set just under the next buy wall near the support price
- This may need to be monitored to follow the buy wall since those seem to move constantly.
4) A trading strategy should try 10 times and lose 2% each time with fewer than 5 wins before
requesting help and shutting down.
Preferably should be back-tested against past data to determine
profit/loss before being used in production.
"""
import os
from stockstats import StockDataFrame
import pandas as pd
from stocklook.crypto.gdax import GdaxChartData, Gdax
from stocklook.utils.timetools import now, now_minus, now_plus, timestamp_to_path
from stocklook.config import config
def get_velocity(df, price='price', date='date'):
df.sort_values(date, ascending=True, inplace=True)
class TradeSet:
BUY = 'buy'
SELL = 'sell'
TYPES = [BUY, SELL]
def __init__(self, margin=False, funds=10000):
self.data = list()
self._df = None
self._pos_size = 0
self.margin = margin
self.funds = funds
self.start_funds = funds
self._trades = 0
@property
def df(self):
if self._df is None:
self.to_frame()
return self._df
@property
def trades(self):
return self._trades
@property
def position_size(self):
return self._pos_size
def add_trade(self, time, size, price, type):
assert type in self.TYPES
if type == self.SELL:
if size < 0:
size = abs(size)
if self._pos_size < size and not self.margin:
if self._pos_size <= 0:
# Can't sell coin you dont
# have without margin.
return False
size = self._pos_size
self._pos_size -= abs(size)
elif type == self.BUY:
if size > 0:
size = -size
total = size * price
if self.funds + total < 0:
if self.funds <= 0:
# Can't buy coin without funds.
return False
size = self.funds / price
size = -size
self._pos_size += abs(size)
self.funds += size * price
assert price > 0
contents = [time, size, price, type]
self.data.append(contents)
#print("Trade: {} - position: {} - funds: {}".format(contents, self._pos_size, self.funds))
self._trades += 1
return True
def buy(self, time, size, price):
return self.add_trade(time, size, price, self.BUY)
def sell(self, time, size, price):
return self.add_trade(time, size, price, self.SELL)
def clear(self):
self.data.clear()
def close_positions(self, time, price):
df = self.to_frame()
size_sum = df['size'].sum()
if size_sum < 0:
self.add_trade(time, size_sum, price, self.SELL)
elif size_sum > 0:
self.add_trade(time, size_sum, price, self.BUY)
def get_profit(self):
return self.funds - self.start_funds
def get_pnl(self):
return round(((self.funds / self.start_funds) * 100) - 100, 2)
def get_total_bought(self):
df = self.df
msk = df['type'].isin([self.BUY])
t = df.loc[msk, 'size'].sum()
return abs(t)
def get_total_sold(self):
df = self.df
msk = df['type'].isin([self.SELL])
t = df.loc[msk, 'size'].sum()
return abs(t)
def to_frame(self):
data = self.data
columns = ['time', 'size', 'price', 'type']
idx = range(len(data))
df = pd.DataFrame(data=data, columns=columns, index=idx)
df.loc[:, 'total'] = df['size'] * df['price']
self._df = df
return df
class DecisionMaker:
"""
A decision maker analyzes a row of data
and decides whether or not to buy or sell.
"""
def __init__(self, stock_data_frame, trade_set, size, **kwargs):
self.sdf = stock_data_frame
self.tset = trade_set
self.kwargs = kwargs
self.size = size
self.trades = list()
def calculate(self):
raise NotImplementedError("Child classes should update "
"their sdf (StockDataFrame) here.")
def execute(self, row):
"""
Should buy or sell a position based on data found in the row.
:param row:
:return:
"""
raise NotImplementedError("Child classes should buy or sell here.")
def inputs(self):
raise NotImplementedError("Child classes should return "
"key class properties here.")
def register_trade(self, row, type):
row['type'] = type
self.trades.append(row)
def __repr__(self):
return "DecisionMaker(size='{}', trades='{}')".format(self.size, len(self.trades))
class MACDRSIMaker(DecisionMaker):
def __init__(self, *args, buy_ratio=1.7, sell_ratio=0.95):
DecisionMaker.__init__(self, *args)
self.buy_ratio = buy_ratio
self.sell_ratio = sell_ratio
self.macd_buy_point = None
self.macd_sell_point = None
self.rsi_buy_point = None
self.rsi_sell_point = None
if self.sdf is not None:
self.calculate()
def inputs(self):
return dict(buy_ratio=self.buy_ratio,
sell_ratio=self.sell_ratio,
macd_buy_point=self.macd_buy_point,
macd_sell_point=self.macd_sell_point,
rsi_buy_point=self.rsi_buy_point,
rsi_sell_point=self.rsi_sell_point)
def calculate(self):
self.sdf.get('macd')
self.sdf.get('rsi_6')
df = self.sdf.dropna(subset=['rsi_6', 'macd'], how='any')
df = df.loc[df['rsi_6'] > 0.1, :]
df = df.loc[df['macd'] > 0.1, :]
macd, rsi = df['macd'], df['rsi_6']
min_macd, max_macd = macd.min(), macd.max()
min_rsi, max_rsi = rsi.min(), rsi.max()
self.macd_buy_point = min_macd * self.buy_ratio
self.macd_sell_point = max_macd * self.sell_ratio
self.rsi_buy_point = min_rsi * self.buy_ratio
self.rsi_sell_point = max_rsi * self.sell_ratio
def execute(self, row):
tset, rec = self.tset, row
macd_buy = rec['macd'] <= self.macd_buy_point
rsi_buy = rec['rsi_6'] <= self.rsi_buy_point
macd_sell = rec['macd'] >= self.macd_sell_point
rsi_sell = rec['rsi_6'] >= self.rsi_sell_point
time = rec['time']
price = rec['close']
size = self.size
if macd_buy or rsi_buy:
buyable = tset.funds / price
if buyable > size * 3:
s = buyable / 3
else:
s = size
s = round(s, 0)
t = tset.buy(time, s, price)
if t:
self.register_trade(row, tset.BUY)
elif macd_sell or rsi_sell:
if tset.position_size > size * 3:
s = round(tset.position_size * .5, 0)
else:
s = size
t = tset.sell(time, s, price)
if t:
self.register_trade(row, tset.SELL)
def __repr__(self):
return ','.join("{}='{}'".format(k, v) for k, v in self.inputs().items())
class Strategy:
def __init__(self, stock_data_frame=None, margin=False, funds=1500, position_size=5):
self.tset = TradeSet(margin=margin, funds=funds)
self.stock_data_frame = stock_data_frame
self.position_size = position_size
self.makers = list()
def add_decision_maker(self, cls, **kwargs):
trade_set = TradeSet(margin=self.tset.margin,
funds=self.tset.funds)
maker = cls(self.stock_data_frame,
trade_set,
self.position_size,
**kwargs)
self.makers.append(maker)
def execute(self):
"""
You can override this however you want with subclassed
:param stock_data_frame:
:return:
"""
sdf = self.stock_data_frame
sdf.sort_values(['time'], ascending=[True], inplace=True)
[[maker.execute(rec) for maker in self.makers]
for _, rec in sdf.iterrows()]
rec = sdf.iloc[-1]
[maker.tset.close_positions(rec['time'], rec['close'])
for maker in self.makers]
def set_stock_df(self, df):
self.stock_data_frame = df
for maker in self.makers:
maker.sdf = df
try:
maker.calculate()
except:
pass
def run_macd_rsi_decisions(data_dir, product, start, end, granularity, overwrite=True, strat=None):
# File paths to be saved at the end.
out_name = '{}-BTEST-{}-{}.csv'.format(product, granularity, timestamp_to_path(end))
tout_name = out_name.split('.')[0] + '-TRADES.csv'
pout_name = out_name.split('.')[0] + '-PNL.csv'
out_path = os.path.join(data_dir, out_name) # Chart Data
tout_path = os.path.join(data_dir, tout_name) # Trade Data
pout_path = os.path.join(data_dir, pout_name) # PNL Data
if os.path.exists(tout_path) and not overwrite:
tdf = pd.read_csv(tout_path, parse_dates=['time'])
return tout_path, tdf
if os.path.exists(out_path) and not overwrite:
df = pd.read_csv(out_path, parse_dates=['time'])
else:
data = GdaxChartData(Gdax(), product, start, end, granularity=granularity)
try:
df = data.df
except ValueError:
return None, | pd.DataFrame() | pandas.DataFrame |
"""
This script is for analysing the outputs from the implementation of DeepAR in GluonTS
"""
import os, time
from pathlib import Path
import streamlit as st
import pandas as pd
import numpy as np
from gluonts.model.predictor import Predictor
from gluonts.dataset.common import ListDataset
from gluonts.transform import FieldName
from gluonts.evaluation.backtest import make_evaluation_predictions
import autodraft.visualization as viz
import autodraft.gluonts as glu
import autodraft.api as nhl
from bokeh.sampledata.perceptions import probly
# @st.cache
def load_model(file_path):
model = Predictor.deserialize(Path(file_path))
return model
@st.cache
def get_data(path='../../data/input/full_dataset_4_seasons.csv'):
data = pd.read_csv(path)
return data
# @st.cache
# def load_predictions(path='/home/ubuntu/AutoDraft/data/deepar_truncated_results_ne100_lre-4_bs64.csv'):
# data = pd.read_csv(path, index_col=2)
# return data
@st.cache
def load_predictions(path='../../data/output/deepar_truncated_results_unit_s_ne300_lr1e-3_bs64_nl3_cl3.csv'):
data = pd.read_csv(path, index_col=2)
model_name = path.split('/')[-1].split('.')[0]
return data, model_name
@st.cache
def load_joe():
joe = pd.read_csv('../../data/input/joe_schmo_4_seasons.csv')
return joe
@st.cache
def get_roster(path='../../data/input/full_roster_4_seasons.csv'):
data = pd.read_csv(path)
return data
@st.cache
def process_data(data, roster):
train, test, targets, targets_meta, stat_cat_features, dyn_cat_features, dyn_real_features, dyn_real_features_meta = glu.prep_df(data,
roster,
column_list=['name', 'date', 'gameNumber', 'cumStatpoints'],
streamlit=True,
scale=True)
return train, test, targets, targets_meta, stat_cat_features, dyn_cat_features, dyn_real_features, dyn_real_features_meta
# @st.cache
def run_prediction(model, data):
predictions = model.predict(dataset=data)
return list(predictions)
def process_prediction(prediction):
mean = prediction.mean_ts
mean = mean.reset_index()
mean = mean.rename(columns={'index': 'predictions'})
mean = mean.reset_index()
mean = mean.rename(columns={'index': 'gameNumber'})
mean = mean.drop(columns=[0])
mean.loc[:, 'gameNumber'] = mean.loc[:, 'gameNumber'] + 1
conf = pd.DataFrame()
conf.loc[:, 'low'] = prediction.quantile('0.05')
conf.loc[:, 'high'] = prediction.quantile('0.95')
full_df = pd.concat([mean, conf], axis=1)
return full_df
def generate_prediction_df(predictions, train_data, test_data, drop=True, target='cumStatpoints', scaled=None, scaling_loc=None):
if scaled is not None:
scaling_meta = pd.read_pickle(scaling_loc)
st.write(scaling_meta)
names = test_data.loc[:, 'name'].unique()
full_predictions = pd.DataFrame()
for prediction, name in zip(predictions, names):
player_df = pd.DataFrame()
player_test_data = test_data.loc[test_data.loc[:, 'name'] == name].loc[:, ['name', 'date', target]]
player_train_data = train_data.loc[train_data.loc[:, 'name'] == name].loc[:, ['name', 'date', target]]
player_test_data.loc[:, 'date'] = | pd.to_datetime(player_test_data.loc[:, 'date']) | pandas.to_datetime |
import numpy as np
import pytest
from pandas._libs import join as _join
from pandas import Categorical, DataFrame, Index, merge
import pandas._testing as tm
class TestIndexer:
@pytest.mark.parametrize(
"dtype", ["int32", "int64", "float32", "float64", "object"]
)
def test_outer_join_indexer(self, dtype):
indexer = _join.outer_join_indexer
left = np.arange(3, dtype=dtype)
right = np.arange(2, 5, dtype=dtype)
empty = np.array([], dtype=dtype)
result, lindexer, rindexer = indexer(left, right)
assert isinstance(result, np.ndarray)
assert isinstance(lindexer, np.ndarray)
assert isinstance(rindexer, np.ndarray)
tm.assert_numpy_array_equal(result, np.arange(5, dtype=dtype))
exp = np.array([0, 1, 2, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, 0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(empty, right)
tm.assert_numpy_array_equal(result, right)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(left, empty)
tm.assert_numpy_array_equal(result, left)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
def test_left_join_indexer_unique():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = _join.left_join_indexer_unique(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_left_outer_join_bug():
left = np.array(
[
0,
1,
0,
1,
1,
2,
3,
1,
0,
2,
1,
2,
0,
1,
1,
2,
3,
2,
3,
2,
1,
1,
3,
0,
3,
2,
3,
0,
0,
2,
3,
2,
0,
3,
1,
3,
0,
1,
3,
0,
0,
1,
0,
3,
1,
0,
1,
0,
1,
1,
0,
2,
2,
2,
2,
2,
0,
3,
1,
2,
0,
0,
3,
1,
3,
2,
2,
0,
1,
3,
0,
2,
3,
2,
3,
3,
2,
3,
3,
1,
3,
2,
0,
0,
3,
1,
1,
1,
0,
2,
3,
3,
1,
2,
0,
3,
1,
2,
0,
2,
],
dtype=np.int64,
)
right = np.array([3, 1], dtype=np.int64)
max_groups = 4
lidx, ridx = _join.left_outer_join(left, right, max_groups, sort=False)
exp_lidx = np.arange(len(left), dtype=np.int64)
exp_ridx = -np.ones(len(left), dtype=np.int64)
exp_ridx[left == 1] = 1
exp_ridx[left == 3] = 0
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.inner_join_indexer(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([2, 4], dtype=np.int64)
bexp = np.array([1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.inner_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.outer_join_indexer(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int64)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.outer_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.left_join_indexer(a, b)
tm.assert_almost_equal(index, a)
aexp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
bexp = np.array([-1, -1, 1, -1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.left_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = | _join.left_join_indexer(idx2.values, idx.values) | pandas._libs.join.left_join_indexer |
# -*- coding: utf-8 -*-
"""
Covid-19 em São Paulo
Gera gráficos para acompanhamento da pandemia de Covid-19
na cidade e no estado de São Paulo.
@author: https://github.com/DaviSRodrigues
"""
from datetime import datetime, timedelta
from io import StringIO
import locale
import math
from tableauscraper import TableauScraper
import traceback
import unicodedata
import pandas as pd
import plotly.graph_objects as go
import plotly.io as pio
from plotly.subplots import make_subplots
import requests
def main():
locale.setlocale(locale.LC_ALL, 'pt_BR.UTF-8')
print('Carregando dados...')
hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total = carrega_dados_cidade()
dados_munic, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_imunizantes, atualizacao_imunizantes = carrega_dados_estado()
print('\nLimpando e enriquecendo dos dados...')
dados_cidade, dados_munic, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, dados_imunizantes = pre_processamento(hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_munic, dados_imunizantes, atualizacao_imunizantes)
evolucao_cidade, evolucao_estado = gera_dados_evolucao_pandemia(dados_munic, dados_estado, isolamento, dados_vacinacao, internacoes)
evolucao_cidade, evolucao_estado = gera_dados_semana(evolucao_cidade, evolucao_estado, leitos_estaduais, isolamento, internacoes)
print('\nGerando gráficos e tabelas...')
gera_graficos(dados_munic, dados_cidade, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, evolucao_cidade, evolucao_estado, internacoes, doencas, dados_raciais, dados_vacinacao, dados_imunizantes)
print('\nAtualizando serviceWorker.js...')
atualiza_service_worker(dados_estado)
print('\nFim')
def carrega_dados_cidade():
hospitais_campanha = pd.read_csv('dados/hospitais_campanha_sp.csv', sep=',')
leitos_municipais = pd.read_csv('dados/leitos_municipais.csv', sep=',')
leitos_municipais_privados = pd.read_csv('dados/leitos_municipais_privados.csv', sep=',')
leitos_municipais_total = pd.read_csv('dados/leitos_municipais_total.csv', sep=',')
return hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total
def carrega_dados_estado():
hoje = data_processamento
ano = hoje.strftime('%Y')
mes = hoje.strftime('%m')
data = hoje.strftime('%Y%m%d')
try:
print('\tAtualizando dados dos municípios...')
URL = 'https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/dados_covid_sp.csv'
dados_munic = pd.read_csv(URL, sep=';', decimal=',')
opcoes_zip = dict(method='zip', archive_name='dados_munic.csv')
dados_munic.to_csv('dados/dados_munic.zip', sep=';', decimal=',', index=False, compression=opcoes_zip)
except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__)
print('\tErro ao buscar dados_covid_sp.csv do GitHub: lendo arquivo local.\n')
dados_munic = pd.read_csv('dados/dados_munic.zip', sep=';', decimal=',')
try:
print('\tAtualizando dados estaduais...')
URL = 'https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/sp.csv'
dados_estado = pd.read_csv(URL, sep=';')
dados_estado.to_csv('dados/dados_estado_sp.csv', sep=';')
except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__)
print('\tErro ao buscar dados_estado_sp.csv do GitHub: lendo arquivo local.\n')
dados_estado = pd.read_csv('dados/dados_estado_sp.csv', sep=';', decimal=',', encoding='latin-1', index_col=0)
try:
print('\tCarregando dados de isolamento social...')
isolamento = pd.read_csv('dados/isolamento_social.csv', sep=',')
except Exception as e:
print(f'\tErro ao buscar isolamento_social.csv\n\t{e}')
try:
print('\tAtualizando dados de internações...')
URL = ('https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/plano_sp_leitos_internacoes.csv')
internacoes = pd.read_csv(URL, sep=';', decimal=',', thousands='.')
internacoes.to_csv('dados/internacoes.csv', sep=';', decimal=',')
except Exception as e:
try:
print(f'\tErro ao buscar internacoes.csv do GitHub: lendo arquivo da Seade.\n\t{e}')
URL = (f'http://www.seade.gov.br/wp-content/uploads/{ano}/{mes}/Leitos-e-Internacoes.csv')
internacoes = pd.read_csv(URL, sep=';', encoding='latin-1', decimal=',', thousands='.', engine='python',
skipfooter=2)
except Exception as e:
print(f'\tErro ao buscar internacoes.csv da Seade: lendo arquivo local.\n\t{e}')
internacoes = pd.read_csv('dados/internacoes.csv', sep=';', decimal=',', thousands='.', index_col=0)
try:
print('\tAtualizando dados de doenças preexistentes...')
URL = ('https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/casos_obitos_doencas_preexistentes.csv.zip')
doencas = pd.read_csv(URL, sep=';')
if len(doencas.asma.unique()) == 3:
opcoes_zip = dict(method='zip', archive_name='doencas_preexistentes.csv')
doencas.to_csv('dados/doencas_preexistentes.zip', sep=';', compression=opcoes_zip)
else:
global processa_doencas
processa_doencas = False
raise Exception('O arquivo de doeças preexistentes não possui registros SIM/NÃO/IGNORADO para todas as doenças.')
except Exception as e:
try:
print(f'\tErro ao buscar doencas_preexistentes.csv do GitHub: lendo arquivo local.\n\t{e}')
doencas = pd.read_csv('dados/doencas_preexistentes.zip', sep=';', index_col=0)
except Exception as e:
print(f'\tErro ao buscar doencas_preexistentes.csv localmente: lendo arquivo da Seade.\n\t{e}')
URL = f'http://www.seade.gov.br/wp-content/uploads/{ano}/{mes}/casos_obitos_doencas_preexistentes.csv'
doencas = pd.read_csv(URL, sep=';', encoding='latin-1')
try:
print('\tAtualizando dados de casos/óbitos por raça e cor...')
URL = ('https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/casos_obitos_raca_cor.csv.zip')
dados_raciais = pd.read_csv(URL, sep=';')
opcoes_zip = dict(method='zip', archive_name='dados_raciais.csv')
dados_raciais.to_csv('dados/dados_raciais.zip', sep=';', compression=opcoes_zip)
except Exception as e:
print(f'\tErro ao buscar dados_raciais.csv do GitHub: lendo arquivo local.\n\t{e}')
dados_raciais = pd.read_csv('dados/dados_raciais.zip', sep=';', index_col=0)
print('\tAtualizando dados da campanha de vacinação...')
headers = {'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/88.0.4324.182 '
'Safari/537.36 '
'Edg/88.0.705.74'}
try:
print('\t\tDoses aplicadas por município...')
URL = f'https://www.saopaulo.sp.gov.br/wp-content/uploads/{ano}/{mes}/{data}_vacinometro.csv'
req = requests.get(URL, headers=headers, stream=True)
req.encoding = req.apparent_encoding
doses_aplicadas = pd.read_csv(StringIO(req.text), sep=';', encoding='utf-8-sig')
except Exception as e:
print(f'\t\tErro ao buscar {data}_vacinometro.csv da Seade: {e}')
doses_aplicadas = None
try:
print('\t\tDoses recebidas por cada município...')
URL = f'https://www.saopaulo.sp.gov.br/wp-content/uploads/{ano}/{mes}/{data}_painel_distribuicao_doses.csv'
req = requests.get(URL, headers=headers, stream=True)
req.encoding = req.apparent_encoding
doses_recebidas = pd.read_csv(StringIO(req.text), sep=';', encoding='utf-8-sig')
except Exception as e:
print(f'\t\tErro ao buscar {data}_painel_distribuicao_doses.csv da Seade: {e}')
doses_recebidas = None
try:
print('\t\tAtualizando doses aplicadas por vacina...')
url = 'https://www2.simi.sp.gov.br/views/PaineldeEstatsticasGerais_14_09_2021_16316423974680/PaineldeEstatsticasGerais'
scraper = TableauScraper()
scraper.loads(url)
sheet = scraper.getWorkbook().getWorksheet('donuts imunibiológico')
atualizacao_imunizantes = sheet.data.copy()
atualizacao_imunizantes['data'] = data_processamento
atualizacao_imunizantes = atualizacao_imunizantes[['data', 'Imunibiológico_Ajustado-alias', 'CNT(Imunibiológico_Ajustado)-alias']]
atualizacao_imunizantes.columns = ['data', 'vacina', 'aplicadas']
atualizacao_imunizantes.sort_values(by='vacina', inplace=True)
except Exception as e:
print(f'\t\tErro ao buscar dados de vacinas do Tableau: {e}')
traceback.print_exception(type(e), e, e.__traceback__)
atualizacao_imunizantes = None
leitos_estaduais = pd.read_csv('dados/leitos_estaduais.csv', index_col=0)
dados_vacinacao = pd.read_csv('dados/dados_vacinacao.zip')
dados_imunizantes = pd.read_csv('dados/dados_imunizantes.csv')
return dados_munic, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_imunizantes, atualizacao_imunizantes
def pre_processamento(hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_munic, dados_imunizantes, atualizacao_imunizantes):
print('\tDados municipais...')
dados_cidade, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total = pre_processamento_cidade(dados_munic, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total)
print('\tDados estaduais...')
dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, dados_munic, dados_imunizantes = pre_processamento_estado(dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_munic, dados_imunizantes, atualizacao_imunizantes)
return dados_cidade, dados_munic, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, dados_imunizantes
def pre_processamento_cidade(dados_munic, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total):
dados_cidade = dados_munic.loc[dados_munic.nome_munic == 'São Paulo', ['datahora', 'casos', 'casos_novos', 'obitos', 'obitos_novos', 'letalidade']]
dados_cidade.columns = ['data', 'confirmados', 'casos_dia', 'óbitos', 'óbitos_dia', 'letalidade']
dados_cidade['letalidade'] = dados_cidade.letalidade * 100
dados_cidade['data'] = pd.to_datetime(dados_cidade.data)
dados_cidade['dia'] = dados_cidade.data.apply(lambda d: d.strftime('%d %b %y'))
hospitais_campanha['data'] = pd.to_datetime(hospitais_campanha.data, format='%d/%m/%Y')
hospitais_campanha['dia'] = hospitais_campanha.data.apply(lambda d: d.strftime('%d %b %y'))
leitos_municipais['data'] = pd.to_datetime(leitos_municipais.data, format='%d/%m/%Y')
leitos_municipais['dia'] = leitos_municipais.data.apply(lambda d: d.strftime('%d %b %y'))
leitos_municipais_privados['data'] = | pd.to_datetime(leitos_municipais_privados.data, format='%d/%m/%Y') | pandas.to_datetime |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
import pandas.compat as compat
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
CategoricalIndex, DatetimeIndex, Float64Index, Index, Int64Index,
IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex,
UInt64Index, isna)
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'nbytes']
def setup_indices(self):
for name, idx in self.indices.items():
setattr(self, name, idx)
def test_pickle_compat_construction(self):
# need an object to create with
msg = (r"Index\(\.\.\.\) must be called with a collection of some"
r" kind, None was passed|"
r"__new__\(\) missing 1 required positional argument: 'data'|"
r"__new__\(\) takes at least 2 arguments \(1 given\)")
with pytest.raises(TypeError, match=msg):
self._holder()
def test_to_series(self):
# assert that we are creating a copy of the index
idx = self.create_index()
s = idx.to_series()
assert s.values is not idx.values
assert s.index is not idx
assert s.name == idx.name
def test_to_series_with_arguments(self):
# GH18699
# index kwarg
idx = self.create_index()
s = idx.to_series(index=idx)
assert s.values is not idx.values
assert s.index is idx
assert s.name == idx.name
# name kwarg
idx = self.create_index()
s = idx.to_series(name='__test')
assert s.values is not idx.values
assert s.index is not idx
assert s.name != idx.name
@pytest.mark.parametrize("name", [None, "new_name"])
def test_to_frame(self, name):
# see GH-15230, GH-22580
idx = self.create_index()
if name:
idx_name = name
else:
idx_name = idx.name or 0
df = idx.to_frame(name=idx_name)
assert df.index is idx
assert len(df.columns) == 1
assert df.columns[0] == idx_name
assert df[idx_name].values is not idx.values
df = idx.to_frame(index=False, name=idx_name)
assert df.index is not idx
def test_to_frame_datetime_tz(self):
# GH 25809
idx = pd.date_range(start='2019-01-01', end='2019-01-30', freq='D')
idx = idx.tz_localize('UTC')
result = idx.to_frame()
expected = pd.DataFrame(idx, index=idx)
tm.assert_frame_equal(result, expected)
def test_shift(self):
# GH8083 test the base class for shift
idx = self.create_index()
msg = "Not supported for type {}".format(type(idx).__name__)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_create_index_existing_name(self):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = self.create_index()
if not isinstance(expected, MultiIndex):
expected.name = 'foo'
result = pd.Index(expected)
tm.assert_index_equal(result, expected)
result = pd.Index(expected, name='bar')
expected.name = 'bar'
tm.assert_index_equal(result, expected)
else:
expected.names = ['foo', 'bar']
result = pd.Index(expected)
tm.assert_index_equal(
result, Index(Index([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'),
names=['foo', 'bar']))
result = pd.Index(expected, names=['A', 'B'])
tm.assert_index_equal(
result,
Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')],
dtype='object'), names=['A', 'B']))
def test_numeric_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match="cannot perform __mul__"):
idx * 1
with pytest.raises(TypeError, match="cannot perform __rmul__"):
1 * idx
div_err = "cannot perform __truediv__"
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = div_err.replace(' __', ' __r')
with pytest.raises(TypeError, match=div_err):
1 / idx
with pytest.raises(TypeError, match="cannot perform __floordiv__"):
idx // 1
with pytest.raises(TypeError, match="cannot perform __rfloordiv__"):
1 // idx
def test_logical_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match='cannot perform all'):
idx.all()
with pytest.raises(TypeError, match='cannot perform any'):
idx.any()
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
with pytest.raises(ValueError, match='The truth value of a'):
if idx:
pass
def test_reindex_base(self):
idx = self.create_index()
expected = np.arange(idx.size, dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with pytest.raises(ValueError, match='Invalid fill method'):
idx.get_indexer(idx, method='invalid')
def test_get_indexer_consistency(self):
# See GH 16819
for name, index in self.indices.items():
if isinstance(index, IntervalIndex):
continue
if index.is_unique or isinstance(index, CategoricalIndex):
indexer = index.get_indexer(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
else:
e = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=e):
index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
def test_ndarray_compat_properties(self):
idx = self.create_index()
assert idx.T.equals(idx)
assert idx.transpose().equals(idx)
values = idx.values
for prop in self._compat_props:
assert getattr(idx, prop) == getattr(values, prop)
# test for validity
idx.nbytes
idx.values.nbytes
def test_repr_roundtrip(self):
idx = self.create_index()
tm.assert_index_equal(eval(repr(idx)), idx)
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
assert "'foo'" in str(idx)
assert idx.__class__.__name__ in str(idx)
def test_repr_max_seq_item_setting(self):
# GH10182
idx = self.create_index()
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert '...' not in str(idx)
def test_copy_name(self):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
for name, index in compat.iteritems(self.indices):
if isinstance(index, MultiIndex):
continue
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_ensure_copied_data(self):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
for name, index in compat.iteritems(self.indices):
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs['freq'] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
continue
index_type = index.__class__
result = index_type(index.values, copy=True, **init_kwargs)
tm.assert_index_equal(index, result)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='copy')
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False,
**init_kwargs)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='same')
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
def test_memory_usage(self):
for name, index in compat.iteritems(self.indices):
result = index.memory_usage()
if len(index):
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
assert result2 > result
if index.inferred_type == 'object':
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_argsort(self):
for k, ind in self.indices.items():
# separately tested
if k in ['catIndex']:
continue
result = ind.argsort()
expected = np.array(ind).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
for k, ind in self.indices.items():
result = np.argsort(ind)
expected = ind.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(ind), (CategoricalIndex, RangeIndex)):
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, axis=1)
msg = "the 'kind' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, kind='mergesort')
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, order=('a', 'b'))
def test_take(self):
indexer = [4, 3, 0, 2]
for k, ind in self.indices.items():
# separate
if k in ['boolIndex', 'tuples', 'empty']:
continue
result = ind.take(indexer)
expected = ind[indexer]
assert result.equals(expected)
if not isinstance(ind,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# GH 10791
with pytest.raises(AttributeError):
ind.freq
def test_take_invalid_kwargs(self):
idx = self.create_index()
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode='clip')
def test_repeat(self):
rep = 2
i = self.create_index()
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
i = self.create_index()
rep = np.arange(len(i))
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
def test_numpy_repeat(self):
rep = 2
i = self.create_index()
expected = i.repeat(rep)
tm.assert_index_equal(np.repeat(i, rep), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(i, rep, axis=0)
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where(self, klass):
i = self.create_index()
cond = [True] * len(i)
result = i.where(klass(cond))
expected = i
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(i[1:])
expected = pd.Index([i._na_value] + i[1:].tolist(), dtype=i.dtype)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize("method", ["intersection", "union",
"difference", "symmetric_difference"])
def test_set_ops_error_cases(self, case, method):
for name, idx in compat.iteritems(self.indices):
# non-iterable input
msg = "Input must be Index or array-like"
with pytest.raises(TypeError, match=msg):
getattr(idx, method)(case)
def test_intersection_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[:5]
second = idx[:3]
intersect = first.intersection(second)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.intersection(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.intersection(case)
assert tm.equalContents(result, second)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.intersection([1, 2, 3])
def test_union_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[3:]
second = idx[:5]
everything = idx
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.union(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.union(case)
assert tm.equalContents(result, everything)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.union([1, 2, 3])
@pytest.mark.parametrize("sort", [None, False])
def test_difference_base(self, sort):
for name, idx in compat.iteritems(self.indices):
first = idx[2:]
second = idx[:4]
answer = idx[4:]
result = first.difference(second, sort)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.difference(case, sort)
elif isinstance(idx, CategoricalIndex):
pass
elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
assert result.__class__ == answer.__class__
tm.assert_numpy_array_equal(result.sort_values().asi8,
answer.sort_values().asi8)
else:
result = first.difference(case, sort)
assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.difference([1, 2, 3], sort)
def test_symmetric_difference(self):
for name, idx in compat.iteritems(self.indices):
first = idx[1:]
second = idx[:-1]
if isinstance(idx, CategoricalIndex):
pass
else:
answer = idx[[0, -1]]
result = first.symmetric_difference(second)
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.symmetric_difference(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.symmetric_difference(case)
assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.symmetric_difference([1, 2, 3])
def test_insert_base(self):
for name, idx in compat.iteritems(self.indices):
result = idx[1:4]
if not len(idx):
continue
# test 0th element
assert idx[0:4].equals(result.insert(0, idx[0]))
def test_delete_base(self):
for name, idx in compat.iteritems(self.indices):
if not len(idx):
continue
if isinstance(idx, RangeIndex):
# tested in class
continue
expected = idx[1:]
result = idx.delete(0)
assert result.equals(expected)
assert result.name == expected.name
expected = idx[:-1]
result = idx.delete(-1)
assert result.equals(expected)
assert result.name == expected.name
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
idx.delete(len(idx))
def test_equals(self):
for name, idx in compat.iteritems(self.indices):
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
# Cannot pass in non-int64 dtype to RangeIndex
if not isinstance(idx, RangeIndex):
same_values = Index(idx, dtype=object)
assert idx.equals(same_values)
assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
assert not idx.equals(pd.Series(idx))
def test_equals_op(self):
# GH9947, GH10637
index_a = self.create_index()
if isinstance(index_a, PeriodIndex):
pytest.skip('Skip check for PeriodIndex')
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
msg = "Lengths must match|could not be broadcast"
with pytest.raises(ValueError, match=msg):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match=msg):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with pytest.raises(ValueError, match=msg):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
series_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
def test_numpy_ufuncs(self):
# test ufuncs of numpy, see:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html
for name, idx in compat.iteritems(self.indices):
for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10,
np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin,
np.arccos, np.arctan, np.sinh, np.cosh, np.tanh,
np.arcsinh, np.arccosh, np.arctanh, np.deg2rad,
np.rad2deg]:
if isinstance(idx, DatetimeIndexOpsMixin):
# raise TypeError or ValueError (PeriodIndex)
# PeriodIndex behavior should be changed in future version
with pytest.raises(Exception):
with np.errstate(all='ignore'):
func(idx)
elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)):
# coerces to float (e.g. np.sin)
with np.errstate(all='ignore'):
result = func(idx)
exp = Index(func(idx.values), name=idx.name)
tm.assert_index_equal(result, exp)
assert isinstance(result, pd.Float64Index)
else:
# raise AttributeError or TypeError
if len(idx) == 0:
continue
else:
with pytest.raises(Exception):
with np.errstate(all='ignore'):
func(idx)
for func in [np.isfinite, np.isinf, np.isnan, np.signbit]:
if isinstance(idx, DatetimeIndexOpsMixin):
# raise TypeError or ValueError (PeriodIndex)
with pytest.raises(Exception):
func(idx)
elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)):
# Results in bool array
result = func(idx)
assert isinstance(result, np.ndarray)
assert not isinstance(result, Index)
else:
if len(idx) == 0:
continue
else:
with pytest.raises(Exception):
func(idx)
def test_hasnans_isnans(self):
# GH 11343, added tests for hasnans / isnans
for name, index in self.indices.items():
if isinstance(index, MultiIndex):
pass
else:
idx = index.copy()
# cases in indices doesn't include NaN
expected = np.array([False] * len(idx), dtype=bool)
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is False
idx = index.copy()
values = np.asarray(idx.values)
if len(index) == 0:
continue
elif isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
continue
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = index.__class__(values, freq=index.freq)
else:
idx = index.__class__(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_fillna(self):
# GH 11343
for name, index in self.indices.items():
if len(index) == 0:
pass
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.fillna(idx[0])
else:
idx = index.copy()
result = idx.fillna(idx[0])
tm.assert_index_equal(result, idx)
assert result is not idx
msg = "'value' must be a scalar, passed: "
with pytest.raises(TypeError, match=msg):
idx.fillna([idx[0]])
idx = index.copy()
values = np.asarray(idx.values)
if isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
continue
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = index.__class__(values, freq=index.freq)
else:
idx = index.__class__(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_nulls(self):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
for name, index in self.indices.items():
if len(index) == 0:
tm.assert_numpy_array_equal(
index.isna(), np.array([], dtype=bool))
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.isna()
else:
if not index.hasnans:
tm.assert_numpy_array_equal(
index.isna(), np.zeros(len(index), dtype=bool))
tm.assert_numpy_array_equal(
index.notna(), np.ones(len(index), dtype=bool))
else:
result = isna(index)
tm.assert_numpy_array_equal(index.isna(), result)
tm.assert_numpy_array_equal(index.notna(), ~result)
def test_empty(self):
# GH 15270
index = self.create_index()
assert not index.empty
assert index[:0].empty
def test_join_self_unique(self, join_type):
index = self.create_index()
if index.is_unique:
joined = index.join(index, how=join_type)
assert (index == joined).all()
def test_map(self):
# callable
index = self.create_index()
# we don't infer UInt64
if isinstance(index, pd.UInt64Index):
expected = index.astype('int64')
else:
expected = index
result = index.map(lambda x: x)
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import pickle
import argparse
import common_utils
import itertools
import tqdm
import re
import collections
import random
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
def get_top_frags(zipped, threshold):
assert threshold > 0.0
import operator
ig = operator.itemgetter(1)
temp = sorted(zipped, key=ig, reverse=True)
frags = []
cumm = 0.0
for ion, ion_int in temp:
if cumm > threshold:
break
frags.append(ion)
cumm += ion_int
return frags
def jacc(x, y, threshold=0.9, verbose=False):
assert isinstance(x, zip)
assert isinstance(y, zip)
xfrags = get_top_frags(x, threshold)
yfrags = get_top_frags(y, threshold)
assert len(xfrags) > 0
assert len(yfrags) > 0
if verbose:
print(f'experimental: {xfrags}')
print(f'predicted: {yfrags}')
intersect = len(set.intersection(set(xfrags), set(yfrags)))
union = len(set.union(set(xfrags), set(yfrags)))
return intersect / float(union)
def _bin_z(charge):
if int(charge) < 4:
return charge
else:
return '4+'
def _getbin(l):
if 7 <= l < 12:
return '7-11'
elif 12 <= l < 17:
return '12-16'
elif 17 <= l < 22:
return '17-21'
elif 22 <= l < 27:
return '22-26'
elif 27 <= l:
return '27+'
def baseline():
def _validateSpectraPair(spectra, minlength):
prev = None
assert len(spectra) == 2
for s in spectra:
try:
score, yi, yw, bi, bw, yf = s
if len(yi) < minlength:
return False
if prev == s:
return False
except Exception as e:
print(f"Unexpected number of tokens found!")
e.args += (str(s),)
raise
prev = s
return True
def _getSpectraPair(seq, data):
valid_spectra = data
ntry = 1
minlen = 5
spectra = random.sample(valid_spectra, 2)
try:
while not _validateSpectraPair(spectra, minlen):
ntry += 1
if ntry > 5:
valid_spectra = [d for d in data if len(d[1]) > minlen]
if len(valid_spectra) < 2:
print(f"Not enough valid spectra for {seq}")
return None
spectra = random.sample(valid_spectra, 2)
except Exception as e:
print(f"Exception occurred during processing {seq}")
e.args += (str(data),)
raise
return spectra
def _getParser(infile):
print('Processing the spectra...')
from MQParser import MQParser
with open(infile, 'r') as f:
parser = MQParser()
reader = pd.read_csv(f, delimiter='\t', iterator=True, chunksize=100000)
for index, chunk in enumerate(reader):
df = | pd.DataFrame(chunk) | pandas.DataFrame |
"""The postprocessing metric computation."""
import os # type: ignore
import numpy as np # type: ignore
import PySAM
import pandas as pd # type: ignore
import PySAM.PySSC as pssc # type: ignore
import PySAM.Singleowner as pysam_singleowner_financial_model # type: ignore
from copy import deepcopy # type: ignore
from typing import List, Tuple, Union # type: ignore
from pathlib import Path # type: ignore
from functools import partial # type: ignore
from itertools import product # type: ignore
from wombat.core import FixedCosts
from wombat.core.library import load_yaml
def _calculate_time_availability(
availability: np.ndarray, by_turbine=False
) -> Union[float, np.ndarray]:
"""Calculates the availability ratio of the whole timeseries or the whole timeseries, by turbine.
Parameters
----------
availability : np.ndarray
Timeseries array of operating ratios.
by_turbine : bool, optional
If True, calculates the availability rate of each column, otherwise across the whole array, by default False.
Returns
-------
Union[float, np.ndarray]
Availability ratio across the whole timeseries, or broken out by column (turbine).
"""
availability = availability > 0
if by_turbine:
return availability.sum(axis=0) / availability.shape[0]
return availability.sum() / availability.size
def _process_single(
events: pd.DataFrame, request_filter: np.ndarray
) -> Tuple[str, float, float, float, int]:
"""Computes the timing values for a single ``request_id``.
Parameters
----------
events : pd.DataFrame
The NaN-filtered events ``pd.DataFrame``.
request_filter : np.ndarray
The indicies to include for the calculation of the timings.
Returns
-------
Tuple[str, float, float, float, int]
The timing values. See ``process_times``.
"""
request = events.iloc[request_filter]
downtime = request[request.system_operating_level < 1]
vals = (
request.reason[0],
request.env_time.max() - request.env_time.min(), # total time
request.duration.sum(), # actual process time
downtime.env_time.max()
- downtime.env_time.min(), # downtime (duration of operations < 1)
1, # N processes
)
return vals
class Metrics:
"""The metric computation class that will store the logged outputs and compile results."""
_hourly_cost = "hourly_labor_cost"
_salary_cost = "salary_labor_cost"
_labor_cost = "total_labor_cost"
_equipment_cost = "equipment_cost"
_materials_cost = "materials_cost"
_total_cost = "total_cost"
_cost_columns = [
_hourly_cost,
_salary_cost,
_labor_cost,
_equipment_cost,
_materials_cost,
_total_cost,
]
def __init__(
self,
data_dir: Union[str, Path],
events: Union[str, pd.DataFrame],
operations: Union[str, pd.DataFrame],
potential: Union[str, pd.DataFrame],
production: Union[str, pd.DataFrame],
inflation_rate: float,
project_capacity: float,
turbine_capacities: List[float],
fixed_costs: str,
substation_id: Union[str, List[str]],
turbine_id: Union[str, List[str]],
service_equipment_names: Union[str, List[str]],
SAM_settings: Union[str, None] = None,
) -> None:
"""Initializes the Metrics class.
Parameters
----------
data_dir : Union[str, Path]
This should be the same as was used for running the analysis.
events : Union[str, pd.DataFrame]
Either a pandas ``DataFrame`` or filename to be used to read the csv log data.
operations : Union[str, pd.DataFrame]
Either a pandas ``DataFrame`` or filename to be used to read the csv log data.
potential : Union[str, pd.DataFrame]
Either a pandas ``DataFrame`` or a filename to be used to read the csv
potential power production data.
production : Union[str, pd.DataFrame]
Either a pandas ``DataFrame`` or a filename to be used to read the csv power
production data.
inflation_rate : float
The inflation rate to be applied to all dollar amounts from the analysis
starting year to ending year.
project_capacity : float
The project's rated capacity, in MW.
turbine_capacities : Union[float, List[float]]
The capacity of each individual turbine corresponding to ``turbine_id``, in kW.
fixed_costs : str
The filename of the project's fixed costs.
substation_id : Union[str, List[str]]
The substation id(s).
turbine_id : Union[str, List[str]]
The turbine id(s).
service_equipment_names : Union[str, List[str]]
The names of the servicing equipment, corresponding to
``ServiceEquipment.settings.name`` for each ``ServiceEquipment`` in the
simulation.
SAM_settings : Union[str, None]
The SAM settings YAML file located in <data_dir>/windfarm/<SAM_settings>
that should end in ".yaml". If no input is provided, then the model will
raise a ``NotImplementedError`` when the SAM-powered metrics are attempted to
be accessed.
"""
self.data_dir = Path(data_dir)
if not os.path.isdir(self.data_dir):
raise FileNotFoundError(f"{self.data_dir} does not exist")
self.inflation_rate = 1 + inflation_rate
self.project_capacity = project_capacity
fixed_costs = load_yaml(self.data_dir / "windfarm", fixed_costs)
self.fixed_costs = FixedCosts(**fixed_costs) # type: ignore
if isinstance(substation_id, str):
substation_id = [substation_id]
self.substation_id = substation_id
if isinstance(turbine_id, str):
turbine_id = [turbine_id]
self.turbine_id = turbine_id
if isinstance(service_equipment_names, str):
service_equipment_names = [service_equipment_names]
self.service_equipment_names = sorted(list(set(service_equipment_names)))
if isinstance(turbine_capacities, (float, int)):
turbine_capacities = [turbine_capacities]
self.turbine_capacities = turbine_capacities
if isinstance(events, str):
events = self._read_data(events)
self.events = self._apply_inflation_rate(self._tidy_data(events, kind="events"))
if isinstance(operations, str):
operations = self._read_data(operations)
self.operations = self._tidy_data(operations, kind="operations")
if isinstance(potential, str):
potential = self._read_data(potential)
self.potential = self._tidy_data(potential, kind="potential")
if isinstance(production, str):
production = self._read_data(production)
self.production = self._tidy_data(production, kind="production")
if SAM_settings is not None:
SAM_settings = "SAM_Singleowner_defaults.yaml"
self.sam_settings = load_yaml(self.data_dir / "windfarm", SAM_settings)
self._setup_pysam()
else:
self.sam_settings = None
self.financial_model = None
def _tidy_data(self, data: pd.DataFrame, kind: str) -> pd.DataFrame:
"""Tidies the "raw" csv-converted data to be able to be used among the ``Metrics``
class.
Parameters
----------
data : pd.DataFrame
The freshly imported csv log data.
kind : str
The category of the input provided to ``data``. Should be one of:
- "operations"
- "events"
- "potential"
- "production"
Returns
-------
pd.DataFrame
A tidied data frame to be used for all the operations in this class.
"""
if data.index.name != "datetime":
try:
data.datetime = pd.to_datetime(data.datetime)
except AttributeError:
data["datetime"] = pd.to_datetime(data.env_datetime)
data.index = data.datetime
data = data.drop(labels="datetime", axis=1)
data.env_datetime = pd.to_datetime(data.env_datetime)
data["year"] = data.env_datetime.dt.year
data["month"] = data.env_datetime.dt.month
data["day"] = data.env_datetime.dt.day
if kind == "operations":
data["windfarm"] = data[self.substation_id].mean(axis=1) * data[
self.turbine_id
].mean(axis=1)
elif kind in ("potential", "production"):
data[self.turbine_id] = data[self.turbine_id].astype(float)
return data
def _read_data(self, fname: str) -> pd.DataFrame:
"""Reads the csv log data from library. This is intended to be used for the
events or operations data.
Parameters
----------
path : str
Path to the simulation library.
fname : str
Filename of the csv data.
Returns
-------
pd.DataFrame
Dataframe of either the events or operations data.
"""
data = pd.read_csv(self.data_dir / "outputs" / "logs" / fname)
return data
def _apply_inflation_rate(self, events: pd.DataFrame) -> pd.DataFrame:
"""Adjusts the cost data for compounding inflation.
Parameters
----------
inflation_rate : float
The inflation rate to be applied for each year.
events : pd.DataFrame
The events dataframe containing the project cost data.
Returns
-------
pd.DataFrame
The events dataframe with costs adjusted for inflation.
"""
adjusted_inflation = deepcopy(self.inflation_rate)
years = events.year.unique()
years.sort()
for year in years:
row_filter = events.year == year
if year > years[0]:
events.loc[row_filter, self._cost_columns] *= adjusted_inflation
adjusted_inflation *= self.inflation_rate
return events
def _setup_pysam(self) -> None:
"""Creates and executes the PySAM model for financial metrics."""
# Define the model and import the SAM settings file.
self.financial_model = pysam_singleowner_financial_model.default(
"WindPowerSingleOwner"
)
model_data = pssc.dict_to_ssc_table(self.sam_settings, "singleowner")
self.financial_model = pysam_singleowner_financial_model.wrap(model_data)
# Remove the leap year production
leap_year_ix = self.production.index.month == 2
leap_year_ix &= self.production.index.day == 29
generation = self.production.loc[~leap_year_ix].windfarm.values
# Create a years variable for later use with the PySAM outputs
self.years = sorted(self.production.year.unique())
# Let mypy know that I know what I'm doing
assert isinstance(self.financial_model, PySAM.Singleowner.Singleowner)
# Replace the coded generation with modeled generation
self.financial_model.FinancialParameters.analysis_period = len(self.years)
self.financial_model.SystemOutput.gen = generation
# Reset the system capacity, in kW
self.financial_model.FinancialParameters.system_capacity = (
self.project_capacity * 1000
)
# Run the financial model
self.financial_model.execute()
def time_based_availability( # type: ignore
self, frequency: str, by: str
) -> Union[float, pd.DataFrame]:
"""Calculates the time-based availabiliy over a project's lifetime as a single
value, annual average, or monthly average for the whole windfarm or by turbine.
.. note:: This currently assumes that if there are multiple substations, that
the turbines are all connected to multiple.
Parameters
----------
frequency : str
One of "project", "annual", "monthly", or "month-year".
by : str
One of "windfarm" or "turbine".
Returns
-------
Union[float, pd.DataFrame]
The time-based availability at the desired aggregation level.
"""
frequency = frequency.lower().strip()
if frequency not in ("project", "annual", "monthly", "month-year"):
raise ValueError(
'``frequency`` must be one of "project", "annual", "monthly", or "month-year".'
)
by = by.lower().strip()
if by not in ("windfarm", "turbine"):
raise ValueError('``by`` must be one of "windfarm" or "turbine".')
by_turbine = by == "turbine"
operations = deepcopy(self.operations)
substation = np.prod(operations[self.substation_id], axis=1).values.reshape(
(-1, 1)
)
hourly = substation * operations.loc[:, self.turbine_id].values
if frequency == "project":
availability = _calculate_time_availability(hourly, by_turbine=by_turbine)
if by == "windfarm":
return availability
availability = pd.DataFrame(
availability.reshape(1, -1), columns=self.turbine_id # type: ignore
)
return availability
elif frequency == "annual":
date_time = operations[["year"]]
counts = operations.groupby(by="year").count()
counts = counts[self.turbine_id] if by_turbine else counts[["windfarm"]]
annual = [
_calculate_time_availability(
hourly[date_time.year == year], by_turbine=by_turbine
)
for year in counts.index
]
return pd.DataFrame(
annual, index=counts.index, columns=counts.columns
).reset_index()
elif frequency == "monthly":
date_time = operations[["month"]]
counts = operations.groupby(by="month").count()
counts = counts[self.turbine_id] if by_turbine else counts[["windfarm"]]
monthly = [
_calculate_time_availability(
hourly[date_time.month == month], by_turbine=by_turbine
)
for month in counts.index
]
return pd.DataFrame(
monthly, index=counts.index, columns=counts.columns
).reset_index()
elif frequency == "month-year":
date_time = operations[["year", "month"]]
counts = operations.groupby(by=["year", "month"]).count()
counts = counts[self.turbine_id] if by_turbine else counts[["windfarm"]]
month_year = [
_calculate_time_availability(
hourly[(date_time.year == year) & (date_time.month == month)],
by_turbine=by_turbine,
)
for year, month in counts.index
]
return pd.DataFrame(
month_year, index=counts.index, columns=counts.columns
).reset_index()
def production_based_availability( # type: ignore
self, frequency: str, by: str
) -> Union[float, pd.DataFrame]:
"""Calculates the production-based availabiliy over a project's lifetime as a
single value, annual average, or monthly average for the whole windfarm or by
turbine.
.. note:: This currently assumes that if there are multiple substations, that
the turbines are all connected to multiple.
Parameters
----------
frequency : str
One of "project", "annual", "monthly", or "month-year".
by : str
One of "windfarm" or "turbine".
Returns
-------
Union[float, pd.DataFrame]
The production-based availability at the desired aggregation level.
"""
frequency = frequency.lower().strip()
if frequency not in ("project", "annual", "monthly", "month-year"):
raise ValueError(
'``frequency`` must be one of "project", "annual", "monthly", or "month-year".'
)
by = by.lower().strip()
if by not in ("windfarm", "turbine"):
raise ValueError('``by`` must be one of "windfarm" or "turbine".')
by_turbine = by == "turbine"
production = self.production.loc[:, self.turbine_id]
potential = self.potential.loc[:, self.turbine_id]
if frequency == "project":
production = production.values
potential = potential.values
if (potential == 0).sum() > 0:
potential[potential == 0] = 1
if not by_turbine:
return production.sum() / potential.sum()
availability = pd.DataFrame(
(production.sum(axis=0) / potential.sum(axis=0)).reshape(1, -1),
columns=self.turbine_id,
)
return availability
production[["year", "month"]] = [
production.index.year.values.reshape(-1, 1),
production.index.month.values.reshape(-1, 1),
]
potential[["year", "month"]] = [
potential.index.year.values.reshape(-1, 1),
potential.index.month.values.reshape(-1, 1),
]
if frequency == "annual":
production = production.groupby("year").sum()[self.turbine_id]
potential = potential.groupby("year").sum()[self.turbine_id]
elif frequency == "monthly":
production = production.groupby("month").sum()[self.turbine_id]
potential = potential.groupby("month").sum()[self.turbine_id]
elif frequency == "month-year":
production = production.groupby(["year", "month"]).sum()[self.turbine_id]
potential = potential.groupby(["year", "month"]).sum()[self.turbine_id]
if (potential.values == 0).sum() > 0:
potential.loc[potential.values == 0] = 1
columns = self.turbine_id
if not by_turbine:
production = production.sum(axis=1)
potential = potential.sum(axis=1)
columns = [by]
return pd.DataFrame(production / potential, columns=columns)
def capacity_factor( # type: ignore
self, which: str, frequency: str, by: str
) -> Union[float, pd.DataFrame]:
"""Calculates the capacity factor over a project's lifetime as a single value,
annual average, or monthly average for the whole windfarm or by turbine.
.. note:: This currently assumes that if there are multiple substations, that
the turbines are all connected to multiple.
Parameters
----------
which : str
One of "net" or "gross".
frequency : str
One of "project", "annual", "monthly", or "month-year".
by : str
One of "windfarm" or "turbine".
Returns
-------
Union[float, pd.DataFrame]
The capacity factor at the desired aggregation level.
"""
which = which.lower().strip()
if which not in ("net", "gross"):
raise ValueError('``which`` must be one of "net" or "gross".')
frequency = frequency.lower().strip()
if frequency not in ("project", "annual", "monthly", "month-year"):
raise ValueError(
'``frequency`` must be one of "project", "annual", "monthly", or "month-year".'
)
by = by.lower().strip()
if by not in ("windfarm", "turbine"):
raise ValueError('``by`` must be one of "windfarm" or "turbine".')
by_turbine = by == "turbine"
capacity = (
np.array(self.turbine_capacities) if by_turbine else self.project_capacity
)
production = self.production if which == "net" else self.potential
production = production.loc[:, self.turbine_id]
if frequency == "project":
potential = production.shape[0]
if not by_turbine:
production = production.values.sum() / 1000 # convert to MWh
return production / (potential * capacity)
potential = potential * capacity / 1000
cf = pd.DataFrame((production.sum(axis=0) / 1000 / potential)).T
return cf
production[["year", "month"]] = [
production.index.year.values.reshape(-1, 1),
production.index.month.values.reshape(-1, 1),
]
if frequency == "annual":
potential = production.groupby("year").count()[self.turbine_id]
production = production.groupby("year").sum()[self.turbine_id]
elif frequency == "monthly":
potential = production.groupby("month").count()[self.turbine_id]
production = production.groupby("month").sum()[self.turbine_id]
elif frequency == "month-year":
potential = production.groupby(["year", "month"]).count()[self.turbine_id]
production = production.groupby(["year", "month"]).sum()[self.turbine_id]
if by_turbine:
columns = self.turbine_id
potential = potential.iloc[:, 0].values.reshape(-1, 1) * (
capacity / 1000
).reshape(1, -1)
else:
production = production.sum(axis=1)
potential = potential.iloc[:, 0] * capacity
columns = [by]
return pd.DataFrame(production / 1000 / potential, columns=columns)
def task_completion_rate(
self, which: str, frequency: str
) -> Union[float, pd.DataFrame]:
"""Calculates the task completion rate over a project's lifetime as a single value,
annual average, or monthly average for the whole windfarm or by turbine.
.. note:: This currently assumes that if there are multiple substations, that
the turbines are all connected to multiple.
Parameters
----------
which : str
One of "scheduled", "unscheduled", or "both".
frequency : str
One of "project", "annual", "monthly", or "month-year".
Returns
-------
Union[float, pd.DataFrame]
The task completion rate at the desired aggregation level.
"""
which = which.lower().strip()
if which not in ("scheduled", "unscheduled", "both"):
raise ValueError(
'``which`` must be one of "scheduled", "unscheduled", or "both".'
)
frequency = frequency.lower().strip()
if frequency not in ("project", "annual", "monthly", "month-year"):
raise ValueError(
'``frequency`` must be one of "project", "annual", "monthly", or "month-year".'
)
if which == "scheduled":
task_filter = ["maintenance"]
elif which == "unscheduled":
task_filter = ["repair"]
else:
task_filter = ["maintenance", "repair"]
cols = ["env_datetime", "request_id"]
completion_filter = [f"{el} complete" for el in task_filter]
request_filter = [f"{el} request" for el in task_filter]
requests = self.events.loc[
self.events.action.isin(request_filter), cols
].reset_index(drop=True)
completions = self.events.loc[
self.events.action.isin(completion_filter), cols
].reset_index(drop=True)
if frequency == "project":
return completions.shape[0] / requests.shape[0]
requests[["year", "month"]] = [
requests.env_datetime.dt.year.values.reshape(-1, 1),
requests.env_datetime.dt.month.values.reshape(-1, 1),
]
completions[["year", "month"]] = [
completions.env_datetime.dt.year.values.reshape(-1, 1),
completions.env_datetime.dt.month.values.reshape(-1, 1),
]
if frequency == "annual":
group_filter = ["year"]
indices = self.operations.year.unique()
elif frequency == "monthly":
group_filter = ["month"]
indices = self.operations.month.unique()
elif frequency == "month-year":
group_filter = ["year", "month"]
indices = list(
product(self.operations.year.unique(), self.operations.month.unique())
)
requests = requests.groupby(group_filter).count()["request_id"]
requests.loc[requests == 0] = 1
completions = completions.groupby(group_filter).count()["request_id"]
missing = [ix for ix in indices if ix not in requests]
requests = requests.append(pd.Series(np.ones(len(missing)), index=missing))
requests = requests.sort_index()
missing = [ix for ix in indices if ix not in completions]
completions = completions.append(
pd.Series(np.zeros(len(missing)), index=missing)
)
completions = completions.sort_index()
completion_rate = pd.DataFrame(completions / requests).reset_index()
return completion_rate.rename(
columns={
"request_id": "Completion Rate",
0: "Completion Rate",
"index": "year",
}
)
def equipment_costs(
self, frequency: str, by_equipment: bool = False
) -> Union[float, pd.DataFrame]:
"""Calculates the equipment costs for the simulation at a project, annual, or
monthly level with (or without) respect to equipment utilized in the simulation.
Parameters
----------
frequency : str
One of "project", "annual", "monthly", or "month-year".
by_equipment : bool, optional
Indicates whether the values are with resepect to the equipment utilized
(True) or not (False), by default False.
Returns
-------
Union[float, pd.DataFrame]
Returns either a float for whole project-level costs or a pandas ``DataFrame``
with columns:
- year (if appropriate for frequency)
- month (if appropriate for frequency)
- then any equipment names as they appear in the logs
Raises
------
ValueError
If ``frequency`` is not one of "project", "annual", "monthly", or "month-year".
ValueError
If ``by_equipment`` is not one of ``True`` or ``False``.
"""
frequency = frequency.lower().strip()
if frequency not in ("project", "annual", "monthly", "month-year"):
raise ValueError(
'``frequency`` must be one of "project", "annual", or "monthly".'
)
if not isinstance(by_equipment, bool):
raise ValueError("``by_equipment`` must be one of ``True`` or ``False``")
if frequency == "annual":
col_filter = ["year"]
elif frequency == "monthly":
col_filter = ["month"]
elif frequency == "month-year":
col_filter = ["year", "month"]
if by_equipment:
if frequency == "project":
costs = (
self.events[self.events[self._equipment_cost] > 0]
.groupby(["agent"])
.sum()[[self._equipment_cost]]
.fillna(0)
.reset_index(level=0)
)
costs = costs.fillna(costs.max(axis=0)).T
costs = (
costs.rename(columns=costs.iloc[0])
.drop(index="agent")
.reset_index(drop=True)
)
return costs
col_filter = ["agent"] + col_filter
costs = (
self.events[self.events[self._equipment_cost] > 0]
.groupby(col_filter)
.sum()[[self._equipment_cost]]
.reset_index(level=0)
)
costs = pd.concat(
[
costs[costs.agent == eq][[self._equipment_cost]].rename(
columns={self._equipment_cost: eq}
)
for eq in costs.agent.unique()
],
axis=1,
)
return costs.fillna(value=0).reset_index()
if frequency == "project":
return self.events[self._equipment_cost].sum()
costs = self.events.groupby(col_filter).sum()[[self._equipment_cost]]
return costs.fillna(0).reset_index()
def service_equipment_utilization(self, frequency: str) -> pd.DataFrame:
"""Calculates the utilization rate for each of the service equipment in the
simulation.
Parameters
----------
frequency : str
One of "project" or "annual".
Returns
-------
pd.DataFrame
The utilization rate of each of the simulation ``SerivceEquipment``.
Raises
------
ValueError
If ``frequency`` is not one of "project" or "annual".
"""
frequency = frequency.lower().strip()
if frequency not in ("project", "annual"):
raise ValueError('``frequency`` must be one of "project" or "annual".')
no_requests = []
total_days = []
no_request_filter = self.events.reason == "no requests"
return_filter = self.events.action == "delay"
return_filter &= self.events.reason == "work is complete"
return_filter &= self.events.additional == "will return next year"
for name in self.service_equipment_names:
equipment_filter = self.events.agent == name
_events = self.events[equipment_filter & no_request_filter]
_events = _events.groupby("year").count()[["agent"]]
no_requests.append(_events.rename(columns={"agent": name}))
ix_filter = equipment_filter & ~return_filter
total = self.events[ix_filter].groupby(["year", "month", "day"]).size()
total = total.reset_index().groupby("year").count()[["day"]]
total_days.append(total.rename(columns={"day": name}))
if len(self.service_equipment_names) > 1:
no_requests_df = no_requests[0].join(no_requests[1:]).fillna(0)
total_df = total_days[0].join(total_days[1:]).fillna(1)
else:
no_requests_df = pd.DataFrame(no_requests[0])
total_df = pd.DataFrame(total_days[0])
for year in self.events.year.unique():
if year not in no_requests_df.index:
missing = pd.DataFrame(
np.zeros((1, no_requests_df.shape[1])),
index=[year],
columns=no_requests_df.columns,
)
no_requests_df = no_requests_df.append(missing).sort_index()
if year not in total_df.index:
missing = pd.DataFrame(
np.ones((1, total_df.shape[1])),
index=[year],
columns=no_requests_df.columns,
)
total_df = total_df.append(missing).sort_index()
if frequency == "project":
no_requests_df = no_requests_df.reset_index().sum()[
self.service_equipment_names
]
total_df = total_df.reset_index().sum()[self.service_equipment_names]
return pd.DataFrame((total_df - no_requests_df) / total_df).T
return (total_df - no_requests_df) / total_df
def labor_costs(
self, frequency: str, by_type: bool = False
) -> Union[float, pd.DataFrame]:
"""Calculates the labor costs for the simulation at a project, annual, or
monthly level that can be broken out by hourly and salary labor costs.
Parameters
----------
frequency : str
One of "project", "annual", "monthly", or "month-year".
by_type : bool, optional
Indicates whether the values are with resepect to the labor types
(True) or not (False), by default False.
Returns
-------
Union[float, pd.DataFrame]
Returns either a float for whole project-level costs or a pandas ``DataFrame``
with columns:
- year (if appropriate for frequency)
- month (if appropriate for frequency)
- total_labor_cost
- hourly_labor_cost (if broken out)
- salary_labor_cost (if broken out)
Raises
------
ValueError
If ``frequency`` is not one of "project", "annual", "monthly", or "month-year".
ValueError
If ``by_type`` is not one of ``True`` or ``False``.
"""
frequency = frequency.lower().strip()
if frequency not in ("project", "annual", "monthly", "month-year"):
raise ValueError(
'``frequency`` must be one of "project", "annual", "monthly", or "month-year".'
)
if not isinstance(by_type, bool):
raise ValueError("``by_equipment`` must be one of ``True`` or ``False``")
labor_cols = [self._hourly_cost, self._salary_cost, self._labor_cost]
if frequency == "project":
costs = pd.DataFrame(
self.events[labor_cols].sum(axis=0).values.reshape(1, -1),
columns=labor_cols,
)
if not by_type:
return costs[self._labor_cost].values[0]
return costs
if frequency == "annual":
group_filter = ["year"]
elif frequency == "monthly":
group_filter = ["month"]
elif frequency == "month-year":
group_filter = ["year", "month"]
costs = (
self.events.groupby(group_filter)
.sum()[labor_cols]
.reset_index()
.fillna(value=0)
)
if not by_type:
return costs[group_filter + [self._labor_cost]]
return costs
def equipment_labor_cost_breakdowns(
self, frequency: str, by_category: bool = False
) -> pd.DataFrame:
"""Calculates the producitivty cost breakdowns for the simulation at a project, annual, or
monthly level that can be broken out to include the equipment and labor components.
Parameters
----------
frequency : str
One of "project", "annual", "monthly", or "month-year".
by_category : bool, optional
Indicates whether to include the equipment and labor categories (True) or
not (False), by default False.
.. note:: Does not produce a value if there is no cost associated with a "reason".
Returns
-------
pd.DataFrame
Returns pandas ``DataFrame`` with columns:
- year (if appropriate for frequency)
- month (if appropriate for frequency)
- reason
- hourly_labor_cost (if by_category == ``True``)
- salary_labor_cost (if by_category == ``True``)
- total_labor_cost (if by_category == ``True``)
- equipment_cost (if by_category == ``True``)
- total_cost (if broken out)
Raises
------
ValueError
If ``frequency`` is not one of "project", "annual", "monthly", or "month-year".
ValueError
If ``by_category`` is not one of ``True`` or ``False``.
"""
frequency = frequency.lower().strip()
if frequency not in ("project", "annual", "monthly", "month-year"):
raise ValueError(
'``frequency`` must be one of "project", "annual", or "monthly".'
)
if not isinstance(by_category, bool):
raise ValueError("``by_equipment`` must be one of ``True`` or ``False``")
group_filter = ["action", "reason", "additional"]
if frequency in ("annual", "month-year"):
group_filter.insert(0, "year")
elif frequency == "monthly":
group_filter.insert(0, "month")
if frequency == "month-year":
group_filter.insert(1, "month")
equipment = self.events[self.events[self._equipment_cost] > 0].agent.unique()
costs = (
self.events[
self.events.agent.isin(equipment)
& self.events.action.isin(
("delay", "repair", "maintenance", "mobilization")
)
& ~self.events.additional.isin(["work is complete"])
]
.groupby(group_filter)
.sum()[self._cost_columns]
.reset_index()
)
costs["display_reason"] = [""] * costs.shape[0]
group_filter.append("display_reason")
non_shift_hours = (
"not in working hours",
"work shift has ended; waiting for next shift to start",
"no more return visits will be made",
"will return next year",
)
costs.loc[
(costs.action == "delay") & (costs.additional.isin(non_shift_hours)),
"display_reason",
] = "Not in Shift"
costs.loc[costs.action == "repair", "display_reason"] = "Repair"
costs.loc[costs.action == "maintenance", "display_reason"] = "Maintenance"
costs.loc[costs.action == "mobilization", "display_reason"] = "Mobilization"
costs.loc[
costs.additional == "weather delay", "display_reason"
] = "Weather Delay"
costs.loc[costs.reason == "no requests", "display_reason"] = "No Requests"
costs.reason = costs.display_reason
group_filter.pop(group_filter.index("action"))
group_filter.pop(group_filter.index("display_reason"))
group_filter.pop(group_filter.index("additional"))
drop_columns = [self._materials_cost]
if not by_category:
drop_columns.extend(
[
self._hourly_cost,
self._salary_cost,
self._labor_cost,
self._equipment_cost,
]
)
costs = costs.drop(columns=drop_columns)
costs = costs.groupby(group_filter).sum().reset_index()
month_year = frequency == "month-year"
if frequency in ("annual", "month-year"):
years = costs.year.unique()
reasons = costs.reason.unique()
comparison_values = product(years, reasons)
if month_year:
months = costs.month.unique()
comparison_values = product(years, months, reasons)
zeros = np.zeros(costs.shape[1] - 2).tolist()
for _year, *_month, _reason in comparison_values:
row_filter = costs.year.values == _year
row = [_year, _reason] + zeros
if month_year:
_month = _month[0]
row_filter &= costs.month.values == _month
row = [_year, _month, _reason] + zeros[:-1]
row_filter &= costs.reason.values == _reason
if costs.loc[row_filter].size > 0:
continue
costs.loc[costs.shape[0]] = row
elif frequency == "monthly":
months = costs.month.unique()
reasons = costs.reason.unique()
comparison_values = product(months, reasons)
zeros = np.zeros(costs.shape[1] - 2).tolist()
for _month, _reason in comparison_values:
row_filter = costs.month.values == _month
row_filter &= costs.reason.values == _reason
row = [_month, _reason] + zeros
if costs.loc[row_filter].size > 0:
continue
costs.loc[costs.shape[0]] = row
new_sort = [
"Maintenance",
"Repair",
"Mobilization",
"Weather Delay",
"No Requests",
"Not in Shift",
]
costs.reason = pd.Categorical(costs.reason, new_sort)
if frequency == "project":
return costs.sort_values(by="reason").reset_index(drop=True)
if frequency == "annual":
return costs.sort_values(by=["year", "reason"]).reset_index(drop=True)
if frequency == "monthly":
return costs.sort_values(by=["month", "reason"]).reset_index(drop=True)
return costs.sort_values(by=["year", "month", "reason"]).reset_index(drop=True)
def component_costs(
self, frequency: str, by_category: bool = False, by_action: bool = False
) -> pd.DataFrame:
"""Calculates the component costs for the simulation at a project, annual, or
monthly level that can be broken out by cost categories. This will not sum to
the total cost because it is does not include times where there is no work being
done, but costs are being accrued.
.. note:: It should be noted that the costs will include costs accrued from both
weather delays and shift-to-shift delays. In the future these will be
disentangled.
Parameters
----------
frequency : str
One of "project", "annual", "monthly", or "month-year".
by_category : bool, optional
Indicates whether the values are with resepect to the various cost
categories (True) or not (False), by default False.
by_action : bool, optional
Indicates whether component costs are going to be further broken out by the
action being performed--repair, maintenance, and delay--(True) or not
(False), by default False.
Returns
-------
Union[float, pd.DataFrame]
Returns either a float for whole project-level costs or a pandas ``DataFrame``
with columns:
- year (if appropriate for frequency)
- month (if appropriate for frequency)
- component
- action (if broken out)
- materials_cost (if broken out)
- total_labor_cost (if broken out)
- equipment_cost (if broken out)
- total_cost
Raises
------
ValueError
If ``frequency`` is not one of "project", "annual", "monthly", or "month-year".
ValueError
If ``by_category`` is not one of ``True`` or ``False``.
ValueError
If ``by_action`` is not one of ``True`` or ``False``.
"""
frequency = frequency.lower().strip()
if frequency not in ("project", "annual", "monthly", "month-year"):
raise ValueError(
'``frequency`` must be one of "project", "annual", or "monthly".'
)
if not isinstance(by_category, bool):
raise ValueError("``by_equipment`` must be one of ``True`` or ``False``")
if not isinstance(by_action, bool):
raise ValueError("``by_equipment`` must be one of ``True`` or ``False``")
events = deepcopy(self.events)
events = events[~events.part_id.isna()]
# Need to simplify the cable identifiers to not include the connection information
events["component"] = [el.split("::")[0] for el in events.part_id.values]
cost_cols = ["total_cost"]
if by_category:
cost_cols[0:0] = ["materials_cost", "total_labor_cost", "equipment_cost"]
group_filter = []
if frequency == "annual":
group_filter = ["year"]
elif frequency == "monthly":
group_filter = ["month"]
elif frequency == "month-year":
group_filter = ["year", "month"]
group_filter.append("component")
if by_action:
repair_map = {
val: "repair" for val in ("repair request", "repair", "repair_complete")
}
maintenance_map = {
val: "maintenance"
for val in (
"maintenance request",
"maintenance",
"maintenance_complete",
)
}
delay_map = {"delay": "delay"}
action_map = {**repair_map, **maintenance_map, **delay_map}
events.action = events.action.map(action_map)
group_filter.append("action")
month_year = frequency == "month-year"
zeros = np.zeros(len(cost_cols)).tolist()
costs = events.groupby(group_filter).sum()[cost_cols].reset_index()
if not by_action:
costs["action"] = np.zeros(costs.shape[0])
cols = costs.columns.to_list()
_ix = cols.index("component") + 1
cols[_ix:_ix] = ["action"]
cols.pop(-1)
costs = costs[cols]
if frequency in ("annual", "month-year"):
years = costs.year.unique()
components = costs.component.unique()
actions = costs.action.unique()
comparison_values = product(years, components, actions)
if month_year:
months = costs.month.unique()
comparison_values = product(years, months, components, actions)
for _year, *_month, _component, _action in comparison_values:
row_filter = costs.year.values == _year
row_filter &= costs.component.values == _component
row_filter &= costs.action.values == _action
row = [_year, _component, _action] + zeros
if month_year:
_month = _month[0]
row_filter &= costs.month.values == _month
row = [_year, _month, _component, _action] + zeros
if costs.loc[row_filter].size > 0:
continue
costs.loc[costs.shape[0]] = row
elif frequency == "monthly":
months = costs.month.unique()
components = costs.component.unique()
actions = costs.action.unique()
comparison_values = product(months, actions, components)
for _month, _action, _component in comparison_values:
row_filter = costs.month.values == _month
row_filter &= costs.component.values == _component
row_filter &= costs.action.values == _action
row = [_month, _component, _action] + zeros
if costs.loc[row_filter].size > 0:
continue
costs.loc[costs.shape[0]] = row
elif frequency == "project":
components = costs.component.unique()
actions = costs.action.unique()
comparison_values = product(actions, components)
for _action, _component in comparison_values:
row_filter = costs.component.values == _component
row_filter &= costs.action.values == _action
row = [_component, _action] + zeros
if costs.loc[row_filter].size > 0:
continue
costs.loc[costs.shape[0]] = row
costs = costs.sort_values(group_filter)[group_filter + cost_cols]
return costs.reset_index(drop=True)
def project_fixed_costs(self, frequency: str, resolution: str) -> pd.DataFrame:
"""Calculates the fixed costs of a project at the project and annual frequencies
at a given cost breakdown resolution.
Parameters
----------
frequency : str
One of "project" or "annual".
resolution : str
One of "low", "medium", or "high", where the values correspond to:
- low: ``FixedCosts.resolution["low"]``, corresponding to the itemized costs.
- medium: ``FixedCosts.resolution["medium"]``, corresponding to the
overarching cost categories.
- high: ``FixedCosts.resolution["high"]``, corresponding to a lump sum.
These values can also be seen through the ``FixedCosts.hierarchy``
Returns
-------
pd.DataFrame
The project's fixed costs as a sum or annualized with high, medium, and low
resolution as desired.
Raises
------
ValueError
If ``frequency`` not one of "project" or "annual".
ValueError
If ``resolution`` must be one of "low", "medium", or "high".
"""
frequency = frequency.lower().strip()
if frequency not in ("project", "annual"):
raise ValueError('``frequency`` must be one of "project" or "annual".')
resolution = resolution.lower().strip()
if resolution not in ("low", "medium", "high"):
raise ValueError(
'``resolution`` must be one of "low", "medium", or "high".'
)
keys = self.fixed_costs.resolution[resolution]
vals = [[getattr(self.fixed_costs, key) for key in keys]]
costs = | pd.DataFrame(vals, columns=keys) | pandas.DataFrame |
#%%
import pandas as pd
import numpy as np
import holoviews as hv
import hvplot.pandas
from scipy.sparse.linalg import svds
from scipy.stats import chisquare, chi2_contingency
from sklearn.decomposition import TruncatedSVD
from umoja.ca import CA
hv.extension('bokeh')
#%%
X = context.io.load('xente_train')
Y = context.io.load('xente_sample_submission')
Y_wide = (Y
.loc[:, 'Account X date X PID']
.str.split(' X ', expand=True)
.rename(columns={0:'acc', 1:'date',2:'PID'})
.assign(test = True)
)
context.io.save('xente_sample_submission_wide', Y_wide)
data = (pd.concat([Y_wide, X.assign(test = False)], axis=0)
.reset_index()
.rename(columns={'index':'old_index'})
.assign(acc = lambda df: df.acc.astype('int64')))
context.io.save('xente_merged', data)
# %%
a = (data.drop(columns=['old_index'])
.groupby(['acc', 'PID'])
.date
.apply(lambda df: pd.to_datetime(df).sort_values().diff().dt.total_seconds()
.fillna(0))
.reset_index()
.rename(columns={'level_2': 'index'})
.set_index('index')
.rename(columns={'date': 'time_since_last'}))
b = data.loc[:,['date']].assign(date = lambda df: pd.to_datetime(df.date))
#%%
time_since_last = (a.join(b).sort_values('date'))
#%%
dummies = | pd.get_dummies(time_since_last.PID) | pandas.get_dummies |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
| tm.assert_frame_equal(iris_frame1, iris_frame2) | pandas._testing.assert_frame_equal |
#!usr/bin/env python
"""
Evaluate the performance of the generative model on multiple aspects:
to be filled
"""
import pandas as pd
import numpy as np
from post_processing import data
from rdkit import Chem, DataStructs
import scipy.stats as ss
import math
from rdkit import Chem
from rdkit.Chem.Draw import IPythonConsole
from IPython.display import SVG
import time
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
def internal_sim(smi_lst):
"""
Compute internal similarity within generated SMILES
Args:
smi_lst: list of generated unique SMILE structures
Returns: Average internal molecular similarity with in the input list
"""
setV = len(smi_lst)
mols = data.get_mols(smi_lst)
fps_morgan, _ = data.get_fingerprints(mols)
total_smi = 0
for i in range(len(fps_morgan)):
for j in range(len(fps_morgan)):
total_smi += DataStructs.DiceSimilarity(fps_morgan[i], fps_morgan[j])
Din = total_smi/(setV*setV)
return Din
def external_sim(smi_lst, reference):
"""
Compute the external similarity against the source data, i.e. the average similarity between the
generated molecules and their nearest neighbours in the training set.
Args:
smi_lst: list of generated unique SMILE structures
reference: list of SMILES used for training the generation
Returns: Average external molecular similarity between generated and origin lst
"""
gen_mols, ori_mols = get_mols(smi_lst), get_mols(reference)
fps_gen, _ = get_fingerprints(gen_mols)
#print(len(smi_lst), len(fps_gen))
fps_ori, _ = get_fingerprints(ori_mols)
similarity_maxs = []
neighbours = []
for i in range(len(fps_gen)):
similarity_with = []
gen_smi = smi_lst[i]
for j in range(len(fps_ori)):
similarity_with.append(DataStructs.DiceSimilarity(fps_gen[i], fps_ori[j]))
similarity_maxs.append(max(similarity_with))
k = np.argmax(similarity_with)
ref_neighbour = similarity_with[k]
neighbours.extend([reference[k], ref_neighbour])
assert (len(similarity_maxs) == len(fps_gen))
Dext = np.sum(similarity_maxs)/len(fps_gen)
return Dext, neighbours
def KL_divergence(gen_arr, reference_arr):
"""
Args:
gen_arr: array of numeric parameters of generated molecules
reference_arr: array of original numeric parameters of training molecules
Returns: KL-divergence of value_arr against reference_arr
"""
epsilon = 0.0001
min_val = math.floor(min(min(gen_arr), min(reference_arr)))
max_val = math.ceil(max(max(gen_arr), max(reference_arr)))
gen_arr_dis = np.histogram(gen_arr, bins=12,range=(min_val, max_val), density=True)[0] + epsilon
reference_arr_dis = np.histogram(reference_arr, bins=12, range=(min_val, max_val), density=True)[0] + epsilon
entropy = ss.entropy(reference_arr_dis, gen_arr_dis)
return entropy
def generate_metric_df():
all_exp_df = pd.read_csv('exp_df_merged.csv')
all_gen_df = pd.read_csv('novel_sampled_merged.csv')
eval_df = pd.DataFrame()
eval_df['Group'] = ['all', 'class3', 'prom']
internal_sims = []; external_sims = []; gaps_kls = []; dips_kls= []
for group in ['all', 'class3', 'prom']:
gen_smi = all_gen_df[all_gen_df['Label'] == group]['SMILES'].tolist()
exp_smi = all_exp_df[all_exp_df['Label'] == group]['SMILES'].tolist()
gen_gap = all_gen_df[all_gen_df['Label'] == group]['Gaps']
exp_gap = all_exp_df[all_exp_df['Label'] == group]['gaps']
gen_dip = all_gen_df[all_gen_df['Label'] == group]['Dips']
exp_dip = all_exp_df[all_exp_df['Label'] == group]['dips']
internal_ = internal_sim(gen_smi)
internal_sims.append(internal_)
external_ , _= external_sim(gen_smi, exp_smi)
external_sims.append(external_)
gaps_kl = KL_divergence(gen_gap, exp_gap)
dips_kl = KL_divergence(gen_dip, exp_dip)
gaps_kls.append(gaps_kl)
dips_kls.append(dips_kl)
print('Internal similarity for group {}: {}'.format(group, internal_))
print('External similarity for group {}: {}'.format(group, external_))
print('KL divergence for H-L gaps for group {}: {}'.format(group, gaps_kl))
print('KL divergence for dips for group {}: {}'.format(group, dips_kl))
eval_df['Internal_similarity'] = internal_sims
eval_df['External_similarity'] = external_sims
eval_df['KL_gaps'] = gaps_kls
eval_df['KL_dips'] = dips_kls
return eval_df
def find_neighbour(smi, b_lst, n=5):
"""
get n neighbours (most similar molecules) of smi from b_lst
IMPORTANT: all smiles must be valid.
Args:
smi: target smile representation of molecule
b_lst: list of smiles
n: number of neighbours to obtain
Returns: list of smiles of the n neighbours
"""
smi_mol, lst_mols = get_mols([smi]), get_mols(b_lst)
fps_lst, _ = get_fingerprints(lst_mols)
smi_fp, _ = get_fingerprints(smi_mol)
assert len(fps_lst) == len(b_lst), "Invalid SMILES representation present."
similarity = []
for i in range(len(fps_lst)):
tmp_sim = DataStructs.DiceSimilarity(smi_fp[0], fps_lst[i])
similarity.append((b_lst[i], tmp_sim))
sorted_sim = sorted(similarity, key=lambda tup:tup[0])
return sorted_sim[:n]
def moltosvg(mol,molSize=(450,150),kekulize=True):
mc = Chem.Mol(mol.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except:
mc = Chem.Mol(mol.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
drawer = rdMolDraw2D.MolDraw2DSVG(molSize[0],molSize[1])
drawer.DrawMolecule(mc)
drawer.FinishDrawing()
svg = drawer.GetDrawingText()
# It seems that the svg renderer used doesn't quite hit the spec.
# Here are some fixes to make it work in the notebook, although I think
# the underlying issue needs to be resolved at the generation step
return svg.replace('svg:','')
def get_prom_neighbours():
"""
Get the closest neighbours of gen prom molecules in the reference set
Returns:
"""
all_exp_df = pd.read_csv('exp_df_merged.csv')
all_gen_df = pd.read_csv('novel_sampled_merged.csv')
exp_prom = all_exp_df[all_exp_df['Gaps'] <=2 & all_exp_df['Dips']<=2]
if __name__ == '__main__':
m1_train = | pd.read_csv('Training') | pandas.read_csv |
import numpy as np
from numpy.random import randn
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
@pytest.mark.parametrize("name", ["var", "vol", "mean"])
def test_ewma_series(series, name):
series_result = getattr(series.ewm(com=10), name)()
assert isinstance(series_result, Series)
@pytest.mark.parametrize("name", ["var", "vol", "mean"])
def test_ewma_frame(frame, name):
frame_result = getattr(frame.ewm(com=10), name)()
assert isinstance(frame_result, DataFrame)
def test_ewma_adjust():
vals = Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
@pytest.mark.parametrize("adjust", [True, False])
@pytest.mark.parametrize("ignore_na", [True, False])
def test_ewma_cases(adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling():
s = Series([1.0] + [np.nan] * 5 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.0] * len(s)))
s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4))
@pytest.mark.parametrize(
"s, adjust, ignore_na, w",
[
(
Series([np.nan, 1.0, 101.0]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
False,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, 101.0]),
False,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
True,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0],
),
(
| Series([1.0, np.nan, 101.0]) | pandas.Series |
# write_Division_Codes_from_Census.py (scripts)
# !/usr/bin/env python3
# coding=utf-8
"""
Grabs Census Region and Division codes from a static URL.
- Writes reshaped file to datapath as csv.
"""
import pandas as pd
import numpy as np
from flowsa.settings import datapath
url = "https://www2.census.gov/programs-surveys/popest/geographies/2017/state-geocodes-v2017.xlsx"
if __name__ == '__main__':
# Read directly into a pandas df,
raw_df = | pd.read_excel(url) | pandas.read_excel |
"""
Module for collecting metrics values from GCE datastore
generated with the cloud functions located in feature_engineering
USAGE:
$python3 collect_from_datastore.py
This will create a .csv file in the current folder containing
the values of all the metrics available in the database for later
use in the jupyter notebooks of this section
"""
import numpy as np
import pandas as pd
from tqdm import tqdm
# [START datastore_build_service]
from google.cloud import datastore
# Function to create datastore client
def create_client(project_id):
return datastore.Client(project_id)
# [END datastore_build_service]
# Function to gather all properties for a kind in a pandas dataframe
def get_jobs_df(kind, namespace):
query = client.query(kind=kind, namespace=namespace)
query_iter = query.fetch()
i = 0
number_of_pages = 300
jobs_df = pd.DataFrame()
for page in tqdm(query_iter.pages):
i += 1
tasks = list(page)
page_df = pd.DataFrame(data=tasks)
print(i * number_of_pages, ' videos retrieved so far')
jobs_df = pd.concat([jobs_df, page_df], axis=0, sort=True)
print('Data retrieval completed {} videos retrieved, {} features extracted'.format(jobs_df.shape[0],jobs_df.shape[1]))
return jobs_df
def initialize():
global client
print('Initializing...')
| pd.set_option('display.max_colwidth', -1) | pandas.set_option |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 23:14:33 2020
@author: arti
"""
import pandas as pd
import seaborn as sns
df = pd.read_csv('./titanic.csv')
pd.set_option('display.max_columns', 15)
rdf = df.drop(['deck', 'embark_town'], axis=1)
rdf = rdf.dropna(subset=['age'], how='any', axis=0)
most_freq = rdf['embarked'].value_counts(dropna=True).idxmax()
rdf['embarked'].fillna(most_freq, inplace=True)
ndf = rdf[['survived', 'pclass', 'sex', 'age', 'sibsp', 'parch', 'embarked']]
onehot_sex = pd.get_dummies(ndf['sex'])
ndf = pd.concat([ndf, onehot_sex], axis=1)
onehot_embarked = pd.get_dummies(ndf['embarked'], prefix='town')
ndf = | pd.concat([ndf, onehot_embarked], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import json
import argparse
import os
def get_args():
""" Allows users to input arguments
Returns:
argparse.ArgumentParser.parse_args
Object containing options input by user
"""
def isFile(string: str):
if os.path.isfile(string):
return string
else:
raise
parser = argparse.ArgumentParser()
parser.description = "Counts the amount of MAG, and MeSH terms in a JSON newline delmited file."
parser.add_argument("input_file", type=isFile,
help="Path to JSON newline delimited file")
return parser.parse_args()
def load_dataframe(input_path: str):
""" Loads json newline delimited file into a dataframe and trims down to mesh and mag terms
Args:
input_path: str
Full or relative path to the input file
Returns:
pandas.DataFrame
Dataframe containing only mag and mesh terms
"""
paper_list = []
with open(input_path, "r") as cleaned_file:
for line in cleaned_file:
paper_list.append(json.loads(line))
dataframe = | pd.DataFrame(paper_list) | pandas.DataFrame |
import pandas as pd
from numpy import datetime64
from pandas_datareader import data
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from yahoofinancials import YahooFinancials
# holding period return in percents
def get_holding_period_return(df: DataFrame, start, end, col) -> float:
start_price = df.at[pd.to_datetime(start), col]
end_price = df.at[pd.to_datetime(end), col]
return round((end_price - start_price) * 100 / start_price, 2)
# calculates return for the last x days in percents
def get_rolling_x_day_return(df: DataFrame, col: str, days: int) -> DataFrame:
def f(x: Series):
return round((x[-1:] - x[0]) * 100 / x[0], 2)
df['Rolling Return'] = df[col].rolling(days).apply(f)
return df
# Rolling returns rank is based on the
# percentile stock's rolling returns fall into.
def get_market_timing(df: DataFrame, col: str) -> DataFrame:
df['{} Rank'.format(col)] = round(df[col].rank(pct=True) * 100, 2)
return df
def format_quater(dt: datetime64) -> str:
return '{}Q{}'.format(int((dt.month - 1) / 3) + 1, dt.year)
# read stock data from yahoo finance and return adjusted cose prices
def read_stock_dataframe(start: str, end: str, symbol: str) -> DataFrame:
df = data.get_data_yahoo(symbol, start, end)
df['Quarter'] = df.index
df['Quarter'] = df['Quarter'].apply(format_quater)
df = df[['Quarter', 'Adj Close']]
return df
# return df with the following columns
# Quarter, Shares Outstanding, Net Income, Total Sales, Book Value
def read_quarter_res(symbol: str) -> DataFrame:
df = {
'Quarter': [],
'Book Value': [],
'Net Income': [],
'Total Sales': [],
'Shares Outstanding': [],
}
stock = YahooFinancials(symbol)
res = stock.get_financial_stmts('quarterly', ['income', 'balance'])
income = res['incomeStatementHistoryQuarterly'][symbol]
balance = res['balanceSheetHistoryQuarterly'][symbol]
for dt in [list(d.items())[0][0] for d in income]:
dt_income = [x[dt] for x in income if dt in x.keys()][0]
dt_balance = [x[dt] for x in balance if dt in x.keys()][0]
df['Quarter'].append(format_quater(pd.to_datetime(dt)))
df['Book Value'].append(dt_balance['netTangibleAssets'])
df['Net Income'].append(dt_income['netIncome'])
df['Total Sales'].append(dt_income['totalRevenue'])
df['Shares Outstanding'].append(dt_balance['commonStock'])
return | pd.DataFrame.from_dict(df) | pandas.DataFrame.from_dict |
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.tools.post_processing.post_processing_factory import PostProcessingFactory
from sos_trades_core.study_manager.study_manager import StudyManager
from os.path import join, dirname
from numpy import asarray, arange, array
import pandas as pd
import numpy as np
from sos_trades_core.execution_engine.func_manager.func_manager import FunctionManager
from sos_trades_core.execution_engine.func_manager.func_manager_disc import FunctionManagerDisc
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
def update_dspace_with(dspace_dict, name, value, lower, upper):
''' type(value) has to be ndarray
'''
if not isinstance(lower, (list, np.ndarray)):
lower = [lower] * len(value)
if not isinstance(upper, (list, np.ndarray)):
upper = [upper] * len(value)
dspace_dict['variable'].append(name)
dspace_dict['value'].append(value.tolist())
dspace_dict['lower_bnd'].append(lower)
dspace_dict['upper_bnd'].append(upper)
dspace_dict['dspace_size'] += len(value)
def update_dspace_dict_with(dspace_dict, name, value, lower, upper, activated_elem=None, enable_variable=True):
if not isinstance(lower, (list, np.ndarray)):
lower = [lower] * len(value)
if not isinstance(upper, (list, np.ndarray)):
upper = [upper] * len(value)
if activated_elem is None:
activated_elem = [True] * len(value)
dspace_dict[name] = {'value': value,
'lower_bnd': lower, 'upper_bnd': upper, 'enable_variable': enable_variable, 'activated_elem': activated_elem}
dspace_dict['dspace_size'] += len(value)
class Study(StudyManager):
def __init__(self, year_start=2000, year_end=2020, time_step=1, name='', execution_engine=None):
super().__init__(__file__, execution_engine=execution_engine)
self.study_name = 'usecase'
self.macro_name = '.Macroeconomics'
self.year_start = year_start
self.year_end = year_end
self.time_step = time_step
self.nb_poles = 8
def setup_usecase(self):
setup_data_list = []
years = np.arange(self.year_start, self.year_end + 1, 1)
self.nb_per = round(self.year_end - self.year_start + 1)
# data dir
data_dir = join(
dirname(dirname(dirname(dirname(dirname(__file__))))), 'tests', 'data')
if self.year_start == 2000 and self.year_end == 2020:
data_dir = join(
dirname(dirname(dirname(dirname(dirname(__file__))))), 'tests', 'data/sectorization_fitting')
#Invest
hist_invest = pd.read_csv(join(data_dir, 'hist_invest_sectors.csv'))
agri_invest = pd.DataFrame({'years': hist_invest['years'], 'investment': hist_invest['Agriculture']})
services_invest = pd.DataFrame({'years': hist_invest['years'], 'investment': hist_invest['Services']})
indus_invest = pd.DataFrame({'years': hist_invest['years'], 'investment': hist_invest['Industry']})
#Energy
hist_energy = pd.read_csv(join(data_dir, 'hist_energy_sect.csv'))
agri_energy = pd.DataFrame({'years': hist_energy['years'], 'Total production': hist_energy['Agriculture']})
services_energy = pd.DataFrame({'years': hist_energy['years'], 'Total production': hist_energy['Services']})
indus_energy = | pd.DataFrame({'years': hist_energy['years'], 'Total production': hist_energy['Industry']}) | pandas.DataFrame |
"""
**hep_ml.speedup** is module to obtain formulas with machine learning,
which can be applied very fast (with a speed comparable to simple selections),
while keeping high quality of classification.
In many application (i.e. triggers in HEP) it is pressing to get really fast formula.
This module contains tools to prepare formulas, which can be applied with the speed comparable to cuts.
Example
-------
Let's show how one can use some really heavy classifier and still have fast predictions:
>>> from sklearn.ensemble import RandomForestClassifier
>>> from hep_ml.speedup import LookupClassifier
>>> base_classifier = RandomForestClassifier(n_estimators=1000, max_depth=25)
>>> classifier = LookupClassifier(base_estimator=base_classifier, keep_trained_estimator=False)
>>> classifier.fit(X, y, sample_weight=sample_weight)
Though training takes much time, all predictions are precomputed and saved to lookup table,
so you are able to predict millions of events per second using single CPU.
>>> classifier.predict_proba(testX)
"""
from __future__ import division, print_function, absolute_import
import numpy
import pandas
from collections import OrderedDict
from sklearn.base import ClassifierMixin, BaseEstimator, clone
from .commonutils import to_pandas_dataframe, check_xyw, check_sample_weight, weighted_quantile
__author__ = '<NAME>'
class LookupClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, base_estimator, n_bins=16, max_cells=500000000, keep_trained_estimator=True):
"""
LookupClassifier splits each of features into bins, trains a base_estimator to use this data.
To predict class for new observation, results of base_estimator are kept for all possible combinations of bins,
and saved together
:param n_bins:
* int: how many bins to use for each axis
* dict: feature_name -> int, specialize how many bins to use for each axis
* dict: feature_name -> list of floats, set manually edges of bins
By default, the (weighted) quantiles are used to compute bin edges.
:type n_bins: int | dict
:param int max_cells: raise error if lookup table will have more items.
:param bool keep_trained_estimator: if True, trained estimator will be saved.
See also: this idea is used inside LHCb triggers, see <NAME>, <NAME>, 'Bonsai BDT'
Resulting formula is very simple and can be rewritten in other language or environment (C++, CUDA, etc).
"""
self.base_estimator = base_estimator
self.n_bins = n_bins
self.max_cells = max_cells
self.keep_trained_estimator = keep_trained_estimator
def fit(self, X, y, sample_weight=None):
"""Train a classifier and collect predictions for all possible combinations.
:param X: pandas.DataFrame or numpy.array with data of shape [n_samples, n_features]
:param y: array with labels of shape [n_samples]
:param sample_weight: None or array of shape [n_samples] with weights of events
:return: self
"""
self.classes_ = numpy.unique(y)
X, y, normed_weights = check_xyw(X, y, sample_weight=sample_weight, classification=True)
X = to_pandas_dataframe(X)
normed_weights = check_sample_weight(y, sample_weight=normed_weights, normalize_by_class=True, normalize=True)
self.bin_edges = self._compute_bin_edges(X, normed_weights=normed_weights)
n_parameter_combinations = numpy.prod([len(bin_edge) + 1 for name, bin_edge in self.bin_edges.items()])
assert n_parameter_combinations <= self.max_cells, \
'the total size of lookup table exceeds {}, ' \
'reduce n_bins or number of features in use'.format(self.max_cells)
transformed_data = self.transform(X)
trained_estimator = clone(self.base_estimator)
fit_params = {}
if sample_weight is not None:
fit_params['sample_weights'] = sample_weight
trained_estimator.fit(transformed_data, y, **fit_params)
all_lookup_indices = numpy.arange(int(n_parameter_combinations))
all_combinations = self.convert_lookup_index_to_bins(all_lookup_indices)
self._lookup_table = trained_estimator.predict_proba(all_combinations)
if self.keep_trained_estimator:
self.trained_estimator = trained_estimator
return self
def _compute_bin_edges(self, X, normed_weights):
"""
Compute edges of bins, weighted quantiles are used,
"""
bins_over_axis = OrderedDict()
for column in X.columns:
if isinstance(self.n_bins, int):
bins_over_axis[column] = self.n_bins
else:
bins_over_axis[column] = self.n_bins[column]
bin_edges = OrderedDict()
for column, column_bins in bins_over_axis.items():
if isinstance(column_bins, int):
quantiles = numpy.linspace(0., 1., column_bins + 1)[1:-1]
bin_edges[column] = weighted_quantile(X[column], quantiles=quantiles, sample_weight=normed_weights)
else:
bin_edges[column] = numpy.array(list(column_bins))
return bin_edges
def convert_bins_to_lookup_index(self, bins_indices):
"""
:param bins_indices: numpy.array of shape [n_samples, n_columns], filled with indices of bins.
:return: numpy.array of shape [n_samples] with corresponding index in lookup table
"""
lookup_indices = numpy.zeros(len(bins_indices), dtype=int)
bins_indices = numpy.array(bins_indices)
assert bins_indices.shape[1] == len(self.bin_edges)
for i, (column_name, bin_edges) in enumerate(self.bin_edges.items()):
lookup_indices *= len(bin_edges) + 1
lookup_indices += bins_indices[:, i]
return lookup_indices
def convert_lookup_index_to_bins(self, lookup_indices):
"""
:param lookup_indices: array of shape [n_samples] with positions at lookup table
:return: array of shape [n_samples, n_features] with indices of bins.
"""
result = numpy.zeros([len(lookup_indices), len(self.bin_edges)], dtype='uint8')
for i, (column_name, bin_edges) in list(enumerate(self.bin_edges.items()))[::-1]:
n_columns = len(bin_edges) + 1
result[:, i] = lookup_indices % n_columns
lookup_indices = lookup_indices // n_columns
return result
def transform(self, X):
"""Convert data to bin indices.
:param X: pandas.DataFrame or numpy.array with data
:return: pandas.DataFrame, where each column is replaced with index of bin
"""
X = to_pandas_dataframe(X)
assert list(X.columns) == list(self.bin_edges.keys()), 'passed dataset with wrong columns'
result = numpy.zeros(X.shape, dtype='uint8')
for i, column in enumerate(X.columns):
edges = self.bin_edges[column]
result[:, i] = numpy.searchsorted(edges, X[column])
return | pandas.DataFrame(result, columns=X.columns) | pandas.DataFrame |
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="A", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_M_to_T_end = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_M_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_M_to_S_end = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
assert ival_M.asfreq("A") == ival_M_to_A
assert ival_M_end_of_year.asfreq("A") == ival_M_to_A
assert ival_M.asfreq("Q") == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q
assert ival_M.asfreq("W", "S") == ival_M_to_W_start
assert ival_M.asfreq("W", "E") == ival_M_to_W_end
assert ival_M.asfreq("B", "S") == ival_M_to_B_start
assert ival_M.asfreq("B", "E") == ival_M_to_B_end
assert ival_M.asfreq("D", "S") == ival_M_to_D_start
assert ival_M.asfreq("D", "E") == ival_M_to_D_end
assert ival_M.asfreq("H", "S") == ival_M_to_H_start
assert ival_M.asfreq("H", "E") == ival_M_to_H_end
assert ival_M.asfreq("Min", "S") == ival_M_to_T_start
assert ival_M.asfreq("Min", "E") == ival_M_to_T_end
assert ival_M.asfreq("S", "S") == ival_M_to_S_start
assert ival_M.asfreq("S", "E") == ival_M_to_S_end
assert ival_M.asfreq("M") == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq="W", year=2007, month=1, day=1)
ival_WSUN = Period(freq="W", year=2007, month=1, day=7)
ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6)
ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5)
ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4)
ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3)
ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2)
ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31)
ival_W_to_A = Period(freq="A", year=2007)
ival_W_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_W_to_M = Period(freq="M", year=2007, month=1)
if Period(freq="D", year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq="A", year=2007)
else:
ival_W_to_A_end_of_year = Period(freq="A", year=2008)
if Period(freq="D", year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=2)
if Period(freq="D", year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=2)
ival_W_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq="B", year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_W_to_T_end = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_W_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_W_to_S_end = Period(
freq="S", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
assert ival_W.asfreq("A") == ival_W_to_A
assert ival_W_end_of_year.asfreq("A") == ival_W_to_A_end_of_year
assert ival_W.asfreq("Q") == ival_W_to_Q
assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter
assert ival_W.asfreq("M") == ival_W_to_M
assert ival_W_end_of_month.asfreq("M") == ival_W_to_M_end_of_month
assert ival_W.asfreq("B", "S") == ival_W_to_B_start
assert ival_W.asfreq("B", "E") == ival_W_to_B_end
assert ival_W.asfreq("D", "S") == ival_W_to_D_start
assert ival_W.asfreq("D", "E") == ival_W_to_D_end
assert ival_WSUN.asfreq("D", "S") == ival_WSUN_to_D_start
assert ival_WSUN.asfreq("D", "E") == ival_WSUN_to_D_end
assert ival_WSAT.asfreq("D", "S") == ival_WSAT_to_D_start
assert ival_WSAT.asfreq("D", "E") == ival_WSAT_to_D_end
assert ival_WFRI.asfreq("D", "S") == ival_WFRI_to_D_start
assert ival_WFRI.asfreq("D", "E") == ival_WFRI_to_D_end
assert ival_WTHU.asfreq("D", "S") == ival_WTHU_to_D_start
assert ival_WTHU.asfreq("D", "E") == ival_WTHU_to_D_end
assert ival_WWED.asfreq("D", "S") == ival_WWED_to_D_start
assert ival_WWED.asfreq("D", "E") == ival_WWED_to_D_end
assert ival_WTUE.asfreq("D", "S") == ival_WTUE_to_D_start
assert ival_WTUE.asfreq("D", "E") == ival_WTUE_to_D_end
assert ival_WMON.asfreq("D", "S") == ival_WMON_to_D_start
assert ival_WMON.asfreq("D", "E") == ival_WMON_to_D_end
assert ival_W.asfreq("H", "S") == ival_W_to_H_start
assert ival_W.asfreq("H", "E") == ival_W_to_H_end
assert ival_W.asfreq("Min", "S") == ival_W_to_T_start
assert ival_W.asfreq("Min", "E") == ival_W_to_T_end
assert ival_W.asfreq("S", "S") == ival_W_to_S_start
assert ival_W.asfreq("S", "E") == ival_W_to_S_end
assert ival_W.asfreq("W") == ival_W
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
ival_W.asfreq("WK")
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=1)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-SAT", year=2007, month=1, day=6)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-FRI", year=2007, month=1, day=5)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-THU", year=2007, month=1, day=4)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-WED", year=2007, month=1, day=3)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-TUE", year=2007, month=1, day=2)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-MON", year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq="B", year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq="B", year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq="B", year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq="B", year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq="B", year=2007, month=1, day=5)
ival_B_to_A = Period(freq="A", year=2007)
ival_B_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_B_to_M = Period(freq="M", year=2007, month=1)
ival_B_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_B_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_B_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_B_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_B_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_B.asfreq("A") == ival_B_to_A
assert ival_B_end_of_year.asfreq("A") == ival_B_to_A
assert ival_B.asfreq("Q") == ival_B_to_Q
assert ival_B_end_of_quarter.asfreq("Q") == ival_B_to_Q
assert ival_B.asfreq("M") == ival_B_to_M
assert ival_B_end_of_month.asfreq("M") == ival_B_to_M
assert ival_B.asfreq("W") == ival_B_to_W
assert ival_B_end_of_week.asfreq("W") == ival_B_to_W
assert ival_B.asfreq("D") == ival_B_to_D
assert ival_B.asfreq("H", "S") == ival_B_to_H_start
assert ival_B.asfreq("H", "E") == ival_B_to_H_end
assert ival_B.asfreq("Min", "S") == ival_B_to_T_start
assert ival_B.asfreq("Min", "E") == ival_B_to_T_end
assert ival_B.asfreq("S", "S") == ival_B_to_S_start
assert ival_B.asfreq("S", "E") == ival_B_to_S_end
assert ival_B.asfreq("B") == ival_B
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq="D", year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq="D", year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq="D", year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq="D", year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq="D", year=2007, month=1, day=7)
ival_D_friday = Period(freq="D", year=2007, month=1, day=5)
ival_D_saturday = Period(freq="D", year=2007, month=1, day=6)
ival_D_sunday = Period(freq="D", year=2007, month=1, day=7)
# TODO: unused?
# ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq="B", year=2007, month=1, day=5)
ival_B_monday = Period(freq="B", year=2007, month=1, day=8)
ival_D_to_A = Period(freq="A", year=2007)
ival_Deoq_to_AJAN = Period(freq="A-JAN", year=2008)
ival_Deoq_to_AJUN = Period(freq="A-JUN", year=2007)
ival_Deoq_to_ADEC = Period(freq="A-DEC", year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq="M", year=2007, month=1)
ival_D_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_D_to_H_end = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_D_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_D_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_D_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_D_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_D.asfreq("A") == ival_D_to_A
assert ival_D_end_of_quarter.asfreq("A-JAN") == ival_Deoq_to_AJAN
assert ival_D_end_of_quarter.asfreq("A-JUN") == ival_Deoq_to_AJUN
assert ival_D_end_of_quarter.asfreq("A-DEC") == ival_Deoq_to_ADEC
assert ival_D_end_of_year.asfreq("A") == ival_D_to_A
assert ival_D_end_of_quarter.asfreq("Q") == ival_D_to_QEDEC
assert ival_D.asfreq("Q-JAN") == ival_D_to_QEJAN
assert ival_D.asfreq("Q-JUN") == ival_D_to_QEJUN
assert ival_D.asfreq("Q-DEC") == ival_D_to_QEDEC
assert ival_D.asfreq("M") == ival_D_to_M
assert ival_D_end_of_month.asfreq("M") == ival_D_to_M
assert ival_D.asfreq("W") == ival_D_to_W
assert ival_D_end_of_week.asfreq("W") == ival_D_to_W
assert ival_D_friday.asfreq("B") == ival_B_friday
assert ival_D_saturday.asfreq("B", "S") == ival_B_friday
assert ival_D_saturday.asfreq("B", "E") == ival_B_monday
assert ival_D_sunday.asfreq("B", "S") == ival_B_friday
assert ival_D_sunday.asfreq("B", "E") == ival_B_monday
assert ival_D.asfreq("H", "S") == ival_D_to_H_start
assert ival_D.asfreq("H", "E") == ival_D_to_H_end
assert ival_D.asfreq("Min", "S") == ival_D_to_T_start
assert ival_D.asfreq("Min", "E") == ival_D_to_T_end
assert ival_D.asfreq("S", "S") == ival_D_to_S_start
assert ival_D.asfreq("S", "E") == ival_D_to_S_end
assert ival_D.asfreq("D") == ival_D
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_H_end_of_quarter = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_H_end_of_month = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_H_end_of_week = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_H_end_of_day = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_H_end_of_bus = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_H_to_A = Period(freq="A", year=2007)
ival_H_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_H_to_M = Period(freq="M", year=2007, month=1)
ival_H_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_H_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_H_to_B = Period(freq="B", year=2007, month=1, day=1)
ival_H_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_H_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=59
)
ival_H_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_H_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=59, second=59
)
assert ival_H.asfreq("A") == ival_H_to_A
assert ival_H_end_of_year.asfreq("A") == ival_H_to_A
assert ival_H.asfreq("Q") == ival_H_to_Q
assert ival_H_end_of_quarter.asfreq("Q") == ival_H_to_Q
assert ival_H.asfreq("M") == ival_H_to_M
assert ival_H_end_of_month.asfreq("M") == ival_H_to_M
assert ival_H.asfreq("W") == ival_H_to_W
assert ival_H_end_of_week.asfreq("W") == ival_H_to_W
assert ival_H.asfreq("D") == ival_H_to_D
assert ival_H_end_of_day.asfreq("D") == ival_H_to_D
assert ival_H.asfreq("B") == ival_H_to_B
assert ival_H_end_of_bus.asfreq("B") == ival_H_to_B
assert ival_H.asfreq("Min", "S") == ival_H_to_T_start
assert ival_H.asfreq("Min", "E") == ival_H_to_T_end
assert ival_H.asfreq("S", "S") == ival_H_to_S_start
assert ival_H.asfreq("S", "E") == ival_H_to_S_end
assert ival_H.asfreq("H") == ival_H
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0)
ival_T_end_of_year = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_T_end_of_quarter = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_T_end_of_month = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_T_end_of_week = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_T_end_of_day = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_T_end_of_bus = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_T_end_of_hour = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=59
)
ival_T_to_A = Period(freq="A", year=2007)
ival_T_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_T_to_M = Period(freq="M", year=2007, month=1)
ival_T_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_T_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_T_to_B = Period(freq="B", year=2007, month=1, day=1)
ival_T_to_H = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_T_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=59
)
assert ival_T.asfreq("A") == ival_T_to_A
assert ival_T_end_of_year.asfreq("A") == ival_T_to_A
assert ival_T.asfreq("Q") == ival_T_to_Q
assert ival_T_end_of_quarter.asfreq("Q") == ival_T_to_Q
assert ival_T.asfreq("M") == ival_T_to_M
assert ival_T_end_of_month.asfreq("M") == ival_T_to_M
assert ival_T.asfreq("W") == ival_T_to_W
assert ival_T_end_of_week.asfreq("W") == ival_T_to_W
assert ival_T.asfreq("D") == ival_T_to_D
assert ival_T_end_of_day.asfreq("D") == ival_T_to_D
assert ival_T.asfreq("B") == ival_T_to_B
assert ival_T_end_of_bus.asfreq("B") == ival_T_to_B
assert ival_T.asfreq("H") == ival_T_to_H
assert ival_T_end_of_hour.asfreq("H") == ival_T_to_H
assert ival_T.asfreq("S", "S") == ival_T_to_S_start
assert ival_T.asfreq("S", "E") == ival_T_to_S_end
assert ival_T.asfreq("Min") == ival_T
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0)
ival_S_end_of_year = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_S_end_of_quarter = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_S_end_of_month = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
ival_S_end_of_week = Period(
freq="S", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
ival_S_end_of_day = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
ival_S_end_of_bus = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
ival_S_end_of_hour = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=59, second=59
)
ival_S_end_of_minute = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=59
)
ival_S_to_A = Period(freq="A", year=2007)
ival_S_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_S_to_M = Period(freq="M", year=2007, month=1)
ival_S_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_S_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_S_to_B = Period(freq="B", year=2007, month=1, day=1)
ival_S_to_H = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_S_to_T = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0)
assert ival_S.asfreq("A") == ival_S_to_A
assert ival_S_end_of_year.asfreq("A") == ival_S_to_A
assert ival_S.asfreq("Q") == ival_S_to_Q
assert ival_S_end_of_quarter.asfreq("Q") == ival_S_to_Q
assert ival_S.asfreq("M") == ival_S_to_M
assert ival_S_end_of_month.asfreq("M") == ival_S_to_M
assert ival_S.asfreq("W") == ival_S_to_W
assert ival_S_end_of_week.asfreq("W") == ival_S_to_W
assert ival_S.asfreq("D") == ival_S_to_D
assert ival_S_end_of_day.asfreq("D") == ival_S_to_D
assert ival_S.asfreq("B") == ival_S_to_B
assert ival_S_end_of_bus.asfreq("B") == ival_S_to_B
assert ival_S.asfreq("H") == ival_S_to_H
assert ival_S_end_of_hour.asfreq("H") == ival_S_to_H
assert ival_S.asfreq("Min") == ival_S_to_T
assert ival_S_end_of_minute.asfreq("Min") == ival_S_to_T
assert ival_S.asfreq("S") == ival_S
def test_conv_microsecond(self):
# GH#31475 Avoid floating point errors dropping the start_time to
# before the beginning of the Period
per = | Period("2020-01-30 15:57:27.576166", freq="U") | pandas.Period |
"""Tests for the sdv.constraints.base module."""
import warnings
from unittest.mock import Mock, patch
import pandas as pd
import pytest
from copulas.multivariate.gaussian import GaussianMultivariate
from copulas.univariate import GaussianUnivariate
from rdt.hyper_transformer import HyperTransformer
from sdv.constraints.base import Constraint, _get_qualified_name, get_subclasses, import_object
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import ColumnFormula, UniqueCombinations
def test__get_qualified_name_class():
"""Test the ``_get_qualified_name`` function, if a class is passed.
The ``_get_qualified_name`` function is expected to:
- Return the Fully Qualified Name from a class.
Input:
- A class.
Output:
- The class qualified name.
"""
# Run
fully_qualified_name = _get_qualified_name(Constraint)
# Assert
expected_name = 'sdv.constraints.base.Constraint'
assert fully_qualified_name == expected_name
def test__get_qualified_name_function():
"""Test the ``_get_qualified_name`` function, if a function is passed.
The ``_get_qualified_name`` function is expected to:
- Return the Fully Qualified Name from a function.
Input:
- A function.
Output:
- The function qualified name.
"""
# Run
fully_qualified_name = _get_qualified_name(_get_qualified_name)
# Assert
expected_name = 'sdv.constraints.base._get_qualified_name'
assert fully_qualified_name == expected_name
def test_get_subclasses():
"""Test the ``get_subclasses`` function.
The ``get_subclasses`` function is expected to:
- Recursively find subclasses for the class object passed.
Setup:
- Create three classes, Parent, Child and GrandChild,
which inherit of each other hierarchically.
Input:
- The Parent class.
Output:
- Dict of the subclasses of the class: ``Child`` and ``GrandChild`` classes.
"""
# Setup
class Parent:
pass
class Child(Parent):
pass
class GrandChild(Child):
pass
# Run
subclasses = get_subclasses(Parent)
# Assert
expected_subclasses = {
'Child': Child,
'GrandChild': GrandChild
}
assert subclasses == expected_subclasses
def test_import_object_class():
"""Test the ``import_object`` function, when importing a class.
The ``import_object`` function is expected to:
- Import a class from its qualifed name.
Input:
- Qualified name of the class.
Output:
- The imported class.
"""
# Run
obj = import_object('sdv.constraints.base.Constraint')
# Assert
assert obj is Constraint
def test_import_object_function():
"""Test the ``import_object`` function, when importing a function.
The ``import_object`` function is expected to:
- Import a function from its qualifed name.
Input:
- Qualified name of the function.
Output:
- The imported function.
"""
# Run
imported = import_object('sdv.constraints.base.import_object')
# Assert
assert imported is import_object
class TestConstraint():
def test__identity(self):
"""Test ```Constraint._identity`` method.
``_identity`` method should return whatever it is passed.
Input:
- anything
Output:
- Input
"""
# Run
instance = Constraint('all')
output = instance._identity('input')
# Asserts
assert output == 'input'
def test___init___transform(self):
"""Test ```Constraint.__init__`` method when 'transform' is passed.
If 'transform' is given, the ``__init__`` method should replace the ``is_valid`` method
with an identity and leave ``transform`` and ``reverse_transform`` untouched.
Input:
- transform
Side effects:
- is_valid == identity
- transform != identity
- reverse_transform != identity
"""
# Run
instance = Constraint(handling_strategy='transform')
# Asserts
assert instance.filter_valid == instance._identity
assert instance.transform != instance._identity
assert instance.reverse_transform != instance._identity
def test___init___reject_sampling(self):
"""Test ``Constraint.__init__`` method when 'reject_sampling' is passed.
If 'reject_sampling' is given, the ``__init__`` method should replace the ``transform``
and ``reverse_transform`` methods with an identity and leave ``is_valid`` untouched.
Input:
- reject_sampling
Side effects:
- is_valid != identity
- transform == identity
- reverse_transform == identity
"""
# Run
instance = Constraint(handling_strategy='reject_sampling')
# Asserts
assert instance.filter_valid != instance._identity
assert instance.transform == instance._identity
assert instance.reverse_transform == instance._identity
def test___init___all(self):
"""Test ``Constraint.__init__`` method when 'all' is passed.
If 'all' is given, the ``__init__`` method should leave ``transform``,
``reverse_transform`` and ``is_valid`` untouched.
Input:
- all
Side effects:
- is_valid != identity
- transform != identity
- reverse_transform != identity
"""
# Run
instance = Constraint(handling_strategy='all')
# Asserts
assert instance.filter_valid != instance._identity
assert instance.transform != instance._identity
assert instance.reverse_transform != instance._identity
def test___init___not_kown(self):
"""Test ``Constraint.__init__`` method when a not known ``handling_strategy`` is passed.
If a not known ``handling_strategy`` is given, a ValueError is raised.
Input:
- not_known
Side effects:
- ValueError
"""
# Run
with pytest.raises(ValueError):
Constraint(handling_strategy='not_known')
def test_fit(self):
"""Test the ``Constraint.fit`` method.
The base ``Constraint.fit`` method is expected to:
- Call ``_fit`` method.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=False)
instance._fit = Mock()
# Run
instance.fit(table_data)
# Assert
instance._fit.assert_called_once_with(table_data)
@patch('sdv.constraints.base.GaussianMultivariate', spec_set=GaussianMultivariate)
def test_fit_gaussian_multivariate_correct_distribution(self, gm_mock):
"""Test the ``GaussianMultivariate`` from the ``Constraint.fit`` method.
The ``GaussianMultivariate`` is expected to be called with default distribution
set as ``GaussianUnivariate``.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [1, 2, 3]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=True)
instance.constraint_columns = ('a', 'b')
# Run
instance.fit(table_data)
# Assert
gm_mock.assert_called_once_with(distribution=GaussianUnivariate)
@patch('sdv.constraints.base.GaussianMultivariate', spec_set=GaussianMultivariate)
@patch('sdv.constraints.base.HyperTransformer', spec_set=HyperTransformer)
def test_fit_trains_column_model(self, ht_mock, gm_mock):
"""Test the ``Constraint.fit`` method trains the column model.
When ``fit_columns_model`` is True and there are multiple ``constraint_columns``,
the ``Constraint.fit`` method is expected to:
- Call ``_fit`` method.
- Create ``_hyper_transformer``.
- Create ``_column_model`` and train it.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=True)
instance.constraint_columns = ('a', 'b')
# Run
instance.fit(table_data)
# Assert
gm_mock.return_value.fit.assert_called_once()
calls = ht_mock.return_value.fit_transform.mock_calls
args = calls[0][1]
assert len(calls) == 1
pd.testing.assert_frame_equal(args[0], table_data)
def test_transform(self):
"""Test the ``Constraint.transform`` method.
It is an identity method for completion, to be optionally
overwritten by subclasses.
The ``Constraint.transform`` method is expected to:
- Return the input data unmodified.
Input:
- Anything
Output:
- Input
"""
# Run
instance = Constraint(handling_strategy='transform')
output = instance.transform('input')
# Assert
assert output == 'input'
def test_transform_calls__transform(self):
"""Test that the ``Constraint.transform`` method calls ``_transform``.
The ``Constraint.transform`` method is expected to:
- Return value returned by ``_transform``.
Input:
- Anything
Output:
- Result of ``_transform(input)``
"""
# Setup
constraint_mock = Mock()
constraint_mock.fit_columns_model = False
constraint_mock._transform.return_value = 'the_transformed_data'
constraint_mock._validate_columns.return_value = pd.DataFrame()
# Run
output = Constraint.transform(constraint_mock, 'input')
# Assert
assert output == 'the_transformed_data'
def test_transform_model_disabled_any_columns_missing(self):
"""Test the ``Constraint.transform`` method with invalid data.
If ``table_data`` is missing any columns and ``fit_columns_model``
is False, it should raise a ``MissingConstraintColumnError``.
The ``Constraint.transform`` method is expected to:
- Raise ``MissingConstraintColumnError``.
"""
# Run
instance = Constraint(handling_strategy='transform', fit_columns_model=False)
instance._transform = lambda x: x
instance.constraint_columns = ('a',)
# Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame([[1, 2], [3, 4]], columns=['b', 'c']))
def test_transform_model_enabled_all_columns_missing(self):
"""Test the ``Constraint.transform`` method with missing columns.
If ``table_data`` is missing all of the ``constraint_columns`` and
``fit_columns_model`` is True, it should raise a
``MissingConstraintColumnError``.
The ``Constraint.transform`` method is expected to:
- Raise ``MissingConstraintColumnError``.
"""
# Run
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a',)
# Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame())
def test_transform_model_enabled_some_columns_missing(self):
"""Test that the ``Constraint.transform`` method uses column model.
If ``table_data`` is missing some of the ``constraint_columns``,
the ``_column_model`` should be used to sample the rest and the
data should be transformed.
Input:
- Table with some missing columns.
Output:
- Transformed data with all columns.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
conditions = [
pd.DataFrame([[5, 1, 2]], columns=['a', 'b', 'c']),
pd.DataFrame([[6, 3, 4]], columns=['a', 'b', 'c'])
]
transformed_conditions = [
pd.DataFrame([[1]], columns=['b']),
pd.DataFrame([[3]], columns=['b'])
]
instance._columns_model.sample.return_value = pd.DataFrame([
[1, 2, 3]
], columns=['b', 'c', 'a'])
instance._hyper_transformer.transform.side_effect = transformed_conditions
instance._hyper_transformer.reverse_transform.side_effect = conditions
# Run
data = pd.DataFrame([[1, 2], [3, 4]], columns=['b', 'c'])
transformed_data = instance.transform(data)
# Assert
expected_tranformed_data = pd.DataFrame([[1, 2, 3]], columns=['b', 'c', 'a'])
expected_result = pd.DataFrame([
[5, 1, 2],
[6, 3, 4]
], columns=['a', 'b', 'c'])
model_calls = instance._columns_model.sample.mock_calls
assert len(model_calls) == 2
instance._columns_model.sample.assert_any_call(num_rows=1, conditions={'b': 1})
instance._columns_model.sample.assert_any_call(num_rows=1, conditions={'b': 3})
reverse_transform_calls = instance._hyper_transformer.reverse_transform.mock_calls
pd.testing.assert_frame_equal(reverse_transform_calls[0][1][0], expected_tranformed_data)
pd.testing.assert_frame_equal(reverse_transform_calls[1][1][0], expected_tranformed_data)
pd.testing.assert_frame_equal(transformed_data, expected_result)
def test_transform_model_enabled_reject_sampling(self):
"""Test the ``Constraint.transform`` method's reject sampling.
If the column model is used but doesn't return valid rows,
reject sampling should be used to get the valid rows.
Setup:
- The ``_columns_model`` returns some valid_rows the first time,
and then the rest with the next call.
Input:
- Table with some missing columns.
Output:
- Transformed data with all columns.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
transformed_conditions = [pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])]
instance._columns_model.sample.side_effect = [
pd.DataFrame([
[1, 2],
[1, 3]
], columns=['a', 'b']),
pd.DataFrame([
[1, 4],
[1, 5],
[1, 6],
[1, 7]
], columns=['a', 'b']),
]
instance._hyper_transformer.transform.side_effect = transformed_conditions
instance._hyper_transformer.reverse_transform = lambda x: x
# Run
data = pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])
transformed_data = instance.transform(data)
# Assert
expected_result = pd.DataFrame([
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6]
], columns=['a', 'b'])
model_calls = instance._columns_model.sample.mock_calls
assert len(model_calls) == 2
instance._columns_model.sample.assert_any_call(num_rows=5, conditions={'b': 1})
assert model_calls[1][2]['num_rows'] > 3
pd.testing.assert_frame_equal(transformed_data, expected_result)
def test_transform_model_enabled_reject_sampling_error(self):
"""Test that the ``Constraint.transform`` method raises an error appropriately.
If the column model is used but doesn't return valid rows,
reject sampling should be used to get the valid rows. If it doesn't
get any valid rows in 100 tries, a ``ValueError`` is raised.
Setup:
- The ``_columns_model`` is fixed to always return an empty ``DataFrame``.
Input:
- Table with some missing columns.
Side Effect:
- ``ValueError`` raised.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
transformed_conditions = pd.DataFrame([[1]], columns=['b'])
instance._columns_model.sample.return_value = pd.DataFrame()
instance._hyper_transformer.transform.return_value = transformed_conditions
instance._hyper_transformer.reverse_transform.return_value = | pd.DataFrame() | pandas.DataFrame |
# Import required libraries
import pandas as pd
import nest_asyncio
import numpy as np
import warnings
import re
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
# Configurations
warnings.filterwarnings('ignore')
# This function takes a card and transforms it to the shape required by the models
def transform_card(insert):
# Create the empty dataset to populate with our card
set_df = pd.DataFrame(columns=['name', 'lang', 'released_at', 'mana_cost', 'cmc', 'type_line',
'oracle_text', 'power', 'toughness', 'colors', 'color_identity',
'keywords', 'legalities', 'games', 'set', 'set_name', 'set_type',
'digital', 'rarity', 'flavor_text', 'artist', 'edhrec_rank', 'prices',
'loyalty', 'prints','image_uris', 'card_faces', 'oracle_text_1', 'oracle_text_2',
'image_uris_1', 'image_uris_2'])
# Insert the new card into the empty dataframe from before
set_df = set_df.append(insert,ignore_index=True)
# If it has text in "oracle_text_1", then it's a Double Faced Card
set_df['DFC'] = np.where(set_df['oracle_text_1'] != "None", 1, 0)
# Transform the data in double faced cards
# Let's first create a dataframe that just has the card name and the column 'card_faces'
double_cards_df = set_df[['name','card_faces']].dropna()
# We also filter it so we get cards that actually have 2 sides
double_cards_df = double_cards_df[double_cards_df['card_faces']!="none"]
# If we actually have information about the 2 faces, we separate them into 2 columns
try:
double_cards_df[['face1','face2']] = pd.DataFrame(double_cards_df['card_faces'].tolist(), index= double_cards_df.index)
except:
double_cards_df[['oracle_text_1','oracle_text_2']] = "None"
# Now let's drop the column 'card_faces'
double_cards_df.drop("card_faces",axis=1, inplace=True)
# We now go into each key within the dictionary of face1 and face2 and separate them into columns
try:
double_cards_df[double_cards_df['face1'].apply(pd.Series).columns + "_1"] = double_cards_df['face1'].apply(pd.Series)
double_cards_df[double_cards_df['face2'].apply(pd.Series).columns + "_2"] = double_cards_df['face2'].apply(pd.Series)
except:
pass
# Define a list of columns we want to keep from the 2 sided cards
cols_to_keep = ['name','oracle_text_1','oracle_text_2','image_uris_1','image_uris_2', 'colors_1',
'power_1', 'toughness_1', 'loyalty_1']
# For each column in the dataframe, if it's not a selected column, we drop it
for i in double_cards_df.columns:
if i not in cols_to_keep:
double_cards_df.drop(i, axis=1, inplace=True)
# We now need to consolidate the 2 oracle texts into 1, we join them together
double_cards_df['oracle_text_dobles'] = double_cards_df['oracle_text_1'] + "\n" + double_cards_df['oracle_text_2']
# Reset the indexes
double_cards_df = double_cards_df.reset_index(drop=True)
# Merge the 2 faces info into our main df
# We now merge them by card name
set_df = set_df.merge(double_cards_df, on=["name","oracle_text_1","oracle_text_2"], how="left").drop("card_faces",axis=1)
# We use this script to replace Nulls with "None"
set_df[['oracle_text_1','oracle_text_2']] = set_df[['oracle_text_1','oracle_text_2']].fillna("None")
try:
set_df[['image_uris_1','image_uris_2', 'colors_1',
'power_1', 'toughness_1','loyalty_1']] = set_df[['image_uris_1','image_uris_2', 'colors_1', 'power_1', 'toughness_1','loyalty_1']].fillna("None")
except:
pass
# Now that we have our oracle text from the 2 card sides joined together, we want to use it to replace
# the actual "oracle_text" from the original dataframe, which is actually empty
# If oracle_text is empty (meaning it's a double faced card), we replace it with our 'oracle_text_dobles' column
set_df['oracle_text'] = np.where(set_df['oracle_text'].isna(),set_df['oracle_text_dobles'],set_df['oracle_text'])
# And now that column is useless so we drop it
set_df = set_df.drop("oracle_text_dobles",axis=1)
# We need to do the same for all the other columns. However, for these columns, we bring the results
# of the front card:
# Color of the card
try:
set_df['colors'] = np.where(set_df['colors'].isna(),set_df['colors_1'],set_df['colors'])
set_df = set_df.drop("colors_1",axis=1)
except:
pass
# Power of the creature
try:
set_df['power'] = np.where(set_df['power'].isna(),set_df['power_1'],set_df['power'])
set_df = set_df.drop("power_1",axis=1)
except:
pass
# Toughness of the creature
try:
set_df['toughness'] = np.where(set_df['toughness'].isna(),set_df['toughness_1'],set_df['toughness'])
set_df = set_df.drop("toughness_1",axis=1)
except:
pass
# Loyalty of the planeswalker
try:
set_df['loyalty'] = np.where(set_df['loyalty'].isna(),set_df['loyalty_1'],set_df['loyalty'])
set_df = set_df.drop("loyalty_1",axis=1)
except:
pass
# One last thing. We can create a new column that will indicate if the card is a double faced card or not
set_df['DFC'] = np.where(set_df['oracle_text_1'] != "None", 1, 0)
# CMC grouping
# Create groupings for the cmc. For 7 or above, we group them together
set_df['cmc_grp'] = np.where(set_df['cmc'] <= 6.0, (set_df['cmc'].astype('int').astype('str'))+"_drop", "7plus_drop")
# Separate the Keywords column into unique keyword columns
# Create a list to use as column names for the keyword columnn
my_list = list(set_df['keywords'].apply(pd.Series).columns)
string = 'keyword_'
kw_list = [string + str(x+1) for x in my_list]
print("Keyword Columns:")
print(kw_list)
#Apply the separation to our dataset
set_df[kw_list] = set_df['keywords'].apply(pd.Series).fillna('99999')
# Separate the Legalities column into unique legality columns
#Apply the separation to our dataset
set_df[set_df['legalities'].apply(pd.Series).columns] = set_df['legalities'].apply(pd.Series)
# Separate the prices column into unique price columns
#Apply the separation to our dataset
set_df[set_df['prices'].apply(pd.Series).columns] = set_df['prices'].apply(pd.Series)
# Let's check the shape of our dataframe once again
print(f"Shape of dataframe: {set_df.shape}")
# Colors
print(f"Max colors in a card: {len(list(set_df['colors'].apply(pd.Series).fillna('99999').columns))}")
# Lets create a dataframe that joins the colors to create all possible color combinations
color_df = set_df['colors'].apply(pd.Series).fillna('')
color_df['color'] = color_df.apply(lambda x: ''.join(sorted(x)), axis=1).replace('','Colorless')
color_df = color_df[['color']]
# Replace the "colors" column in the dataframe with our new column
set_df['colors'] = color_df
print(f"Different color in data: {len(set_df['colors'].unique())}")
# Repeat the process for the "color_identity" column
color_id_df = set_df['color_identity'].apply(pd.Series).fillna('')
color_id_df['color_identity'] = color_id_df.apply(lambda x: ''.join(sorted(x)), axis=1).replace('','Colorless')
color_id_df = color_id_df[['color_identity']]
set_df['color_identity'] = color_id_df
### Remove useless columns
# List of columns we no longer need
cols_to_drop = ['keywords','legalities','games','prices','usd_etched']
# Drop the columns
set_df.drop(cols_to_drop,axis=1,inplace=True)
# Creating the keywords columns
#Lets create a sub dataframe with just the name of the card and the keyword columns
temp_df = set_df[['name'] + kw_list]
# We now want to melt this dataframe so we have the name repeated as many times as keywords, but just 1 keywords column
temp_df2 = pd.melt(temp_df, id_vars=['name'], value_vars=kw_list).drop('variable',axis=1)
# Now we can pivot this sub dataframe and get a column for each keyword, with 0s and 1s depending on each card
kw_df = temp_df2.pivot(columns="value", values="value").fillna(0)
try:
kw_df = kw_df.drop('99999',axis=1)
except:
pass
try:
kw_df = kw_df.replace(regex={r'\D': 1})
except:
pass
# Let's add the name of the card to this new sub dataframe
result = pd.concat([temp_df2[['name']], kw_df], axis=1)
# Summing and resetting index will help to condense the data
final_df = result.groupby(['name']).sum().reset_index()
# We can now merge this sub dataframe with our main dataframe and get all the keywords!
set_df_kw = set_df.merge(final_df, on=['name'], how="left").drop(kw_list, axis=1)
### Replace nulls in `flavor_text`
# If a card does not have a flavor text, let's put "no flavor text" instead
set_df_kw['flavor_text'] = set_df_kw['flavor_text'].fillna("no_flavor_text")
### Replace nulls in `edhrec_rank`
# If a card does not have an edhrec_rank, let's replace it with int 999999
set_df_kw['edhrec_rank'] = set_df_kw['edhrec_rank'].fillna(999999).astype(int)
# Separate column ``type_line``
# We first separate the card type of the front from the card type of the back
try:
set_df_kw[['face','back']] = set_df_kw['type_line'].str.split(' // ',expand=True).fillna('None')
except:
set_df_kw[['face','back']] = [set_df_kw['type_line'],"None"]
# We then separate the face type using the "-" as separator
try:
set_df_kw[['face_type','face_subtype']] = set_df_kw['face'].str.split(' — ',expand=True).fillna('None')
except:
set_df_kw['face_type'] = set_df_kw['face']
set_df_kw['face_subtype'] = "None"
# If a card has a back, we then separate the back type using the "-" as separator
try:
set_df_kw[['back_type','back_subtype']] = set_df_kw['back'].str.split(' — ',expand=True).fillna('None')
except:
set_df_kw['back_type'] = set_df_kw['back']
set_df_kw['back_subtype'] = "None"
# Separate ``face_type`` in each possible token
# Let's obtain the max quantity of words within "face_type" column
max_word_len = []
for i in range(len(set_df_kw['face_type'].unique())):
append_length = len(set_df_kw['face_type'].unique()[i].split())
max_word_len.append(append_length)
face_type_max = max(max_word_len)
print(f"Max words in face_type: {face_type_max}")
# Using our result of max words in face_type, create as many face_type_N columns
face_type_cols = []
for i in range(face_type_max):
face_type_col = f"face_type_{i+1}"
face_type_cols.append(face_type_col)
# Use these columns to split the face_type column
set_df_kw[face_type_cols] = set_df_kw['face_type'].str.split(' ',expand=True).fillna('None')
# Separate ``face_subtype`` in each possible token
# Let's obtain the max quantity of words within "face_subtype" column
max_word_len = []
for i in range(len(set_df_kw['face_subtype'].unique())):
append_length = len(set_df_kw['face_subtype'].unique()[i].split())
max_word_len.append(append_length)
face_subtype_max = max(max_word_len)
print(f"Max words in face_subtype: {face_subtype_max}")
# Using our result of max words in face_subtype, create as many face_subtype_N columns
face_subtype_cols = []
for i in range(face_subtype_max):
face_subtype_col = f"face_subtype_{i+1}"
face_subtype_cols.append(face_subtype_col)
# Use these columns to split the face_subtype column
set_df_kw[face_subtype_cols] = set_df_kw['face_subtype'].str.split(' ',expand=True).fillna('None')
# Separate ``back_type`` in each possible token
# Let's obtain the max quantity of words within "back_type" column
max_word_len = []
for i in range(len(set_df_kw['back_type'].unique())):
append_length = len(set_df_kw['back_type'].unique()[i].split())
max_word_len.append(append_length)
back_type_max = max(max_word_len)
print(f"Max words in back_type: {back_type_max}")
# Using our result of max words in back_type, create as many face_subtype_N columns
back_type_cols = []
for i in range(back_type_max):
back_type_col = f"back_type_{i+1}"
back_type_cols.append(back_type_col)
# Use these columns to split the back_type column
set_df_kw[back_type_cols] = set_df_kw['back_type'].str.split(' ',expand=True).fillna('None')
# Separate ``back_subtype`` in each possible token
# Let's obtain the max quantity of words within "back_subtype" column
max_word_len = []
for i in range(len(set_df_kw['back_subtype'].unique())):
append_length = len(set_df_kw['back_subtype'].unique()[i].split())
max_word_len.append(append_length)
back_subtype_max = max(max_word_len)
print(f"Max words in back_subtype: {back_subtype_max}")
# Using our result of max words in back_subtype, create as many back_subtype_N columns
back_subtype_cols = []
for i in range(back_subtype_max):
back_subtype_col = f"back_subtype_{i+1}"
back_subtype_cols.append(back_subtype_col)
# Use these columns to split the back_subtype column
set_df_kw[back_subtype_cols] = set_df_kw['back_subtype'].str.split(' ',expand=True).fillna('None')
# Abilities Count
# Define a function that will split the oracle text using \n as delimiter
def count_abilities(string):
try:
abilities_count = len(string.split('\n'))
except:
abilities_count = 0
return abilities_count
# Apply the function and create the "abilities_count" column
set_df_kw['abilities_count'] = set_df_kw.apply(lambda x: count_abilities(x['oracle_text']),axis=1)
# Cleave fix
# Cleave transformation
# If card has cleave, remove "[" and "]" and repeat the same orcale text removing whatever is between them
try:
set_df_kw['oracle_text'] = np.where(set_df_kw['Cleave']==1,
set_df_kw['oracle_text'].str.replace("[","").str.replace("]","")+'\n'+set_df_kw['oracle_text'].str.replace(r"[\(\[].*?[\)\]] ", ""),
set_df_kw['oracle_text'])
except:
pass
# Monocolored, Multicolored and others
# If color column has just 1 character, it's monocolored (eg. "B" or "W")
set_df_kw['monocolored'] = np.where(set_df_kw['colors'].str.len() == 1,1,0)
# If it has more than 1 charater and it does not say Colorless, then it's multicolored
set_df_kw['multicolored'] = np.where((set_df_kw['colors'].str.len() > 1) & (set_df_kw['colors'] != "Colorless"),1,0)
# And these other variants
set_df_kw['two_colors'] = np.where(set_df_kw['colors'].str.len() == 2,1,0)
set_df_kw['three_colors'] = np.where(set_df_kw['colors'].str.len() == 3,1,0)
set_df_kw['four_colors'] = np.where(set_df_kw['colors'].str.len() == 4,1,0)
set_df_kw['five_colors'] = np.where(set_df_kw['colors'].str.len() == 5,1,0)
set_df_kw['colorless'] = np.where(set_df_kw['colors'] == "Colorless",1,0)
# Devotion
# We count how many mana symbols we find in a card CMC
set_df_kw['mana_symbols_cost'] = set_df_kw['mana_cost'].str.count('W|U|B|R|G').fillna(0)
# We also count how many specific mana symbols
set_df_kw['devotion_W'] = set_df_kw['mana_cost'].str.count('W').fillna(0)
set_df_kw['devotion_U'] = set_df_kw['mana_cost'].str.count('U').fillna(0)
set_df_kw['devotion_B'] = set_df_kw['mana_cost'].str.count('B').fillna(0)
set_df_kw['devotion_R'] = set_df_kw['mana_cost'].str.count('R').fillna(0)
set_df_kw['devotion_G'] = set_df_kw['mana_cost'].str.count('G').fillna(0)
# Prices
# We create some columns to detect if we have missing prices
set_df_kw['missing_usd'] = np.where(set_df_kw['usd'].isna(), 1, 0)
set_df_kw['missing_usd_foil'] = np.where(set_df_kw['usd_foil'].isna(), 1, 0)
set_df_kw['missing_eur'] = np.where(set_df_kw['eur'].isna(), 1, 0)
set_df_kw['missing_eur_foil'] = np.where(set_df_kw['eur_foil'].isna(), 1, 0)
set_df_kw['missing_tix'] = np.where(set_df_kw['tix'].isna(), 1, 0)
# If there are missings, we fill them with 0
set_df_kw['usd'] = set_df_kw['usd'].fillna(0)
set_df_kw['eur'] = set_df_kw['eur'].fillna(0)
set_df_kw['usd_foil'] = set_df_kw['usd_foil'].fillna(0)
set_df_kw['eur_foil'] = set_df_kw['eur_foil'].fillna(0)
set_df_kw['tix'] = set_df_kw['tix'].fillna(0)
# Power & Toughness
# We just want to fill NaNs with "None" to fix any card that is not a creature
set_df_kw['power'] = set_df_kw['power'].fillna("None")
# Loyalty
# We just want to fill NaNs with "None" to fix any card that is not a planeswalker
set_df_kw['loyalty'] = set_df_kw['loyalty'].fillna('None')
# X spells
# Create a column that is 1 if it's a card with X in it's mana cost
set_df_kw['X_spell'] = np.where(set_df_kw['mana_cost'].str.contains('{X}'),1,0)
# Text `(to be removed)`
# Remove text between brackets in oracle_text
set_df_kw['oracle_text'] = set_df_kw['oracle_text'].str.replace(r"\(.*\)","")
# Mana symbols in oracle text
# We create a column tha that is 1 if there are mana symbols inside the oracle text
set_df_kw['mana_symbols_oracle'] = np.where(set_df_kw['oracle_text'].str.contains('{W}|{U}|{B}|{R}|{G}'),1,0)
# We count how many different mana symbols are in the oracle text
set_df_kw['mana_symbols_oracle_nbr'] = set_df_kw['oracle_text'].str.count('{W}|{U}|{B}|{R}|{G}')
# Includes tapping ability
# We create a column that is 1 if the card has {T} in the oracle_text
set_df_kw['tapping_ability'] = np.where(set_df_kw['oracle_text'].str.contains('{T}'),1,0)
# Includes multiple choice
# We create a column that is 1 if the card has '• ' in the oracle_text
set_df_kw['multiple_choice'] = np.where(set_df_kw['oracle_text'].str.contains('• '),1,0)
# Replace card name
#EXACT MATCH
for i in range(len(set_df_kw)):
set_df_kw.at[i,"oracle_text"] = set_df_kw.at[i,'oracle_text'].replace(set_df_kw.at[i,'name'].split(" // ")[0], 'CARDNAME')
#this is to also replace cardnames from back cards
try:
set_df_kw.at[i,"oracle_text"] = set_df_kw.at[i,'oracle_text'].replace(set_df_kw.at[i,'name'].split(" // ")[1], 'CARDNAME')
except:
pass
#FIRST NAME MATCH
for i in range(len(set_df_kw)):
set_df_kw.at[i,"oracle_text"] = set_df_kw.at[i,'oracle_text'].replace(set_df_kw.at[i,'name'].replace(",","").split(" // ")[0].split(" ")[0], 'CARDNAME')
#this is to also replace cardnames from back cards
try:
set_df_kw.at[i,"oracle_text"] = set_df_kw.at[i,'oracle_text'].replace(set_df_kw.at[i,'name'].replace(",","").split(" // ")[1].split(" ")[0], 'CARDNAME')
except:
pass
# Tokenize Oracle Text
# Define a function that takes the oracle text, removes undesired characters, stopwords and tokenizes it
def process_oracle(oracle):
"""Process oracle function.
Input:
oracle: a string containing an oracle
Output:
oracle_clean: a list of words containing the processed oracle
"""
import string
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
oracle = re.sub(r'\$\w*', '', oracle)
oracle = re.sub(r'^RT[\s]+', '', oracle)
oracle = re.sub(r'#', '', oracle)
oracle = re.sub("\d+", '', oracle)
# tokenize tweets
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True)
oracle_tokens = tokenizer.tokenize(oracle)
oracle_clean = []
for word in oracle_tokens:
if (word not in stopwords_english and # remove stopwords
word not in string.punctuation): # remove punctuation
# oracle_clean.append(word)
stem_word = stemmer.stem(word) # stemming word
oracle_clean.append(stem_word)
return oracle_clean
# Apply the function and create a new column
set_df_kw['oracle_tokens'] = set_df_kw['oracle_text'].apply(lambda x: process_oracle(x))
# Create columns for each token
# Separate the tokens into columns
tokens_df = set_df_kw['oracle_tokens'].apply(pd.Series).fillna("None")
# Create a list with all the different tokens
tokens_set_list = []
remove_tokens = ['iii','None','•','x','c','r','−','g','iv','}:',
'eight','nine','ten','—','ii','u','b','w','p']
for i in tokens_df.columns:
tokens_set_list = list(set(tokens_set_list+list(tokens_df[i].unique())))
tokens_set_list = [x for x in tokens_set_list if x not in remove_tokens]
print(f"Number of tokens: {len(tokens_set_list)}")
# Create a new df with as many columns as tokens and 1s or 0s if the card has that token or not
empty_df = | pd.DataFrame(columns=tokens_set_list) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 15 01:48:49 2018
@author: ozkan
"""
import pandas as pd
import numpy as np
#from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from scipy import stats
from contextlib import contextmanager
import time
import gc
def nonUnique(x):
return x.nunique()
def modeValue(x):
return stats.mode(x)[0][0]
def totalBadCredit(x):
badCredit = 0
for value in x:
if(value==2 or value==3):
badCredit+=1
return badCredit
def creditOverdue(x):
overdue=0
for value in x:
if(value>0):
overdue+=1
return overdue
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na=True)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
def getAppData():
data = pd.read_csv('../input/application_train.csv')
test = pd.read_csv('../input/application_test.csv')
len_train = len(data)
app_data = pd.concat([data, test])
print('Combined train & test input shape before any merging = {}'.format(app_data.shape))
return app_data, len_train
def generateAppFeatures(app_data):
app_data['LOAN_INCOME_RATIO'] = app_data['AMT_CREDIT'] / app_data['AMT_INCOME_TOTAL']
app_data['ANNUITY_INCOME_RATIO'] = app_data['AMT_ANNUITY'] / app_data['AMT_INCOME_TOTAL']
app_data['ANNUITY LENGTH'] = app_data['AMT_CREDIT'] / app_data['AMT_ANNUITY']
app_data['WORKING_LIFE_RATIO'] = app_data['DAYS_EMPLOYED'] / app_data['DAYS_BIRTH']
app_data['INCOME_PER_FAM'] = app_data['AMT_INCOME_TOTAL'] / app_data['CNT_FAM_MEMBERS']
app_data['CHILDREN_RATIO'] = app_data['CNT_CHILDREN'] / app_data['CNT_FAM_MEMBERS']
#appdata_['INCOME_CREDIT_PCT'] = app_data['AMT_INCOME_TOTAL'] / app_data['AMT_CREDIT']
print('Shape after extra features = {}'.format(app_data.shape))
return app_data
def generateAppFeatures_v4(app_data):
app_data.loc[app_data['CODE_GENDER']=='XNA','CODE_GENDER'] = 'F'
docs = [_f for _f in app_data.columns if 'FLAG_DOC' in _f]
live = [_f for _f in app_data.columns if ('FLAG_' in _f) & ('FLAG_DOC' not in _f) & ('_FLAG_' not in _f)]
# NaN values for DAYS_EMPLOYED: 365.243 -> nan
app_data['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
inc_by_org = app_data[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE').median()['AMT_INCOME_TOTAL']
app_data['NEW_CREDIT_TO_ANNUITY_RATIO'] = app_data['AMT_CREDIT'] / app_data['AMT_ANNUITY']
app_data['NEW_CREDIT_TO_GOODS_RATIO'] = app_data['AMT_CREDIT'] / app_data['AMT_GOODS_PRICE']
app_data['NEW_DOC_IND_KURT'] = app_data[docs].kurtosis(axis=1)
app_data['NEW_LIVE_IND_SUM'] = app_data[live].sum(axis=1)
app_data['NEW_INC_PER_CHLD'] = app_data['AMT_INCOME_TOTAL'] / (1 + app_data['CNT_CHILDREN'])
app_data['NEW_INC_BY_ORG'] = app_data['ORGANIZATION_TYPE'].map(inc_by_org)
app_data['NEW_EMPLOY_TO_BIRTH_RATIO'] = app_data['DAYS_EMPLOYED'] / app_data['DAYS_BIRTH']
app_data['NEW_ANNUITY_TO_INCOME_RATIO'] = app_data['AMT_ANNUITY'] / (1 + app_data['AMT_INCOME_TOTAL'])
app_data['NEW_SOURCES_PROD'] = (app_data['EXT_SOURCE_1']+1) * (app_data['EXT_SOURCE_2']+1) * (app_data['EXT_SOURCE_3']+1)
app_data['NEW_EXT_SOURCES_MEAN'] = app_data[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
app_data['NEW_SCORES_STD'] = app_data[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
app_data['NEW_SCORES_STD'] = app_data['NEW_SCORES_STD'].fillna(app_data['NEW_SCORES_STD'].mean())
app_data['NEW_CAR_TO_BIRTH_RATIO'] = app_data['OWN_CAR_AGE'] / app_data['DAYS_BIRTH']
app_data['NEW_CAR_TO_EMPLOY_RATIO'] = app_data['OWN_CAR_AGE'] / app_data['DAYS_EMPLOYED']
app_data['NEW_PHONE_TO_BIRTH_RATIO'] = app_data['DAYS_LAST_PHONE_CHANGE'] / app_data['DAYS_BIRTH']
app_data['NEW_PHONE_TO_BIRTH_RATIO_EMPLOYER'] = app_data['DAYS_LAST_PHONE_CHANGE'] / app_data['DAYS_EMPLOYED']
app_data['NEW_CREDIT_TO_INCOME_RATIO'] = app_data['AMT_CREDIT'] / app_data['AMT_INCOME_TOTAL']
# Categorical features with Binary encode (0 or 1; two categories)
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
app_data[bin_feature], uniques = pd.factorize(app_data[bin_feature])
# Categorical features with One-Hot encode
app_data, cat_cols = one_hot_encoder(app_data)
print('Shape after generating extra features = {}'.format(app_data.shape))
return app_data
def handlePrev(app_data):
prev = pd.read_csv('../input/previous_application.csv')
prev_group = prev.groupby('SK_ID_CURR').agg({'SK_ID_CURR': 'count',
'AMT_CREDIT': ['sum', 'mean', 'max', 'min']})
prev_group.columns = [' '.join(col).strip() for col in prev_group.columns.values]
for column in prev_group.columns:
prev_group = prev_group.rename(columns={column:'PREV_'+column})
merged_app_data = app_data.merge(prev_group, left_on='SK_ID_CURR', right_index=True, how='left')
categorical_feats = [f for f in prev.columns if prev[f].dtype == 'object']
for f_ in categorical_feats:
prev[f_], indexer = pd.factorize(prev[f_])
prev_apps_cat_mode = prev.groupby('SK_ID_CURR').agg({categorical_feats[0]:modeValue,
categorical_feats[1]:modeValue,
categorical_feats[2]:modeValue,
categorical_feats[3]:modeValue,
categorical_feats[4]:modeValue,
categorical_feats[5]:modeValue,
categorical_feats[6]:modeValue,
categorical_feats[7]:modeValue,
categorical_feats[8]:modeValue,
categorical_feats[9]:modeValue,
categorical_feats[10]:modeValue,
categorical_feats[11]:modeValue,
categorical_feats[12]:modeValue,
categorical_feats[13]:modeValue,
categorical_feats[14]:modeValue,
categorical_feats[15]:modeValue})
merged_app_data = merged_app_data.merge(prev_apps_cat_mode, left_on='SK_ID_CURR', right_index=True,
how='left', suffixes=['', '_PRVMODE'])
print('Shape after merging with PREV = {}'.format(merged_app_data.shape))
return merged_app_data
def handlePrev_v2(app_data):
prev = pd.read_csv('../input/previous_application.csv')
prev, cat_cols = one_hot_encoder(prev)
# Days 365.243 values -> nan
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
# Add feature: value ask / value received percentage
prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
# Previous applications numeric features
num_aggregations = {
'AMT_ANNUITY': [ 'max', 'mean'],
'AMT_APPLICATION': [ 'max','mean'],
'AMT_CREDIT': [ 'max', 'mean'],
'APP_CREDIT_PERC': [ 'max', 'mean'],
'AMT_DOWN_PAYMENT': [ 'max', 'mean'],
'AMT_GOODS_PRICE': [ 'max', 'mean'],
'HOUR_APPR_PROCESS_START': [ 'max', 'mean'],
'RATE_DOWN_PAYMENT': [ 'max', 'mean'],
'DAYS_DECISION': [ 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
}
# Previous applications categorical features
cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean']
prev_agg = prev.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist()])
# Previous Applications: Approved Applications - only numerical features
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR').agg(num_aggregations)
approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist()])
prev_agg = prev_agg.join(approved_agg, how='left')
# Previous Applications: Refused Applications - only numerical features
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR').agg(num_aggregations)
refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist()])
prev_agg = prev_agg.join(refused_agg, how='left')
merged_app_data = app_data.merge(prev_agg, left_on='SK_ID_CURR', right_index=True, how='left')
return merged_app_data
def handlePrev_v4(app_data):
prev = pd.read_csv('../input/previous_application.csv')
prev, cat_cols = one_hot_encoder(prev)
# Days 365.243 values -> nan
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
# Add feature: value ask / value received percentage
prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
# Previous applications numeric features
num_aggregations = {
'AMT_ANNUITY' : [ 'max', 'mean', 'min', 'sum', 'std'],
'AMT_APPLICATION' : [ 'max', 'mean', 'min', 'sum', 'std'],
'AMT_CREDIT' : [ 'max', 'mean', 'min', 'sum', 'std'],
'APP_CREDIT_PERC' : [ 'max', 'mean', 'min', 'sum', 'std'],
'AMT_DOWN_PAYMENT' : [ 'max', 'mean', 'min', 'sum', 'std'],
'AMT_GOODS_PRICE' : [ 'max', 'mean', 'min', 'sum', 'std'],
'HOUR_APPR_PROCESS_START' : [ 'max', 'mean', 'min', 'sum', 'std'],
'RATE_DOWN_PAYMENT' : [ 'max', 'mean', 'min', 'sum', 'std'],
'DAYS_DECISION' : [ 'max', 'mean', 'min', 'sum', 'std'],
'CNT_PAYMENT' : [ 'max', 'mean', 'min', 'sum', 'std'],
}
# Previous applications categorical features
cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean', 'sum']
prev_agg = prev.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist()])
# Previous Applications: Approved Applications - only numerical features
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist()])
prev_agg = prev_agg.join(approved_agg, how='left')
# Previous Applications: Refused Applications - only numerical features
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist()])
prev_agg = prev_agg.join(refused_agg, how='left')
app_data = app_data.merge(prev_agg, left_on='SK_ID_CURR', right_index=True, how='left')
print('Shape after merging with PREV = {}'.format(app_data.shape))
return app_data
def handleCreditCard(app_data):
credit_card = pd.read_csv('../input/credit_card_balance.csv')
# Value Counts
app_data = app_data.merge(pd.DataFrame(credit_card['SK_ID_CURR'].value_counts()), left_on='SK_ID_CURR',
right_index=True, how='left', suffixes=['', '_CNT_CRED_CARD'])
# Last Values
most_recent_index = credit_card.groupby('SK_ID_CURR')['MONTHS_BALANCE'].idxmax()
cat_feats = credit_card.columns[credit_card.dtypes == 'object'].tolist() + ['SK_ID_CURR']
app_data = app_data.merge(credit_card.loc[most_recent_index, cat_feats], left_on='SK_ID_CURR',
right_on='SK_ID_CURR', how='left', suffixes=['', '_CCLAST'])
print('Shape after merging with credit card data = {}'.format(app_data.shape))
return app_data
def handleCreditCard_v2(app_data):
credit_card = pd.read_csv('../input/credit_card_balance.csv')
idColumns = ['SK_ID_CURR', 'SK_ID_PREV']
cat_feats = [f for f in credit_card.columns if credit_card[f].dtype == 'object']
for f_ in cat_feats:
credit_card[f_], indexer = pd.factorize(credit_card[f_])
cat_feats = cat_feats + ['MONTHS_BALANCE']
nonNum_feats = idColumns + cat_feats
num_feats = [f for f in credit_card.columns if f not in nonNum_feats]
# Numeric Features
trans = ['sum', 'mean', 'max', 'min']
aggs = {}
for feat in num_feats:
aggs[feat]=trans
aggs['SK_ID_CURR']='count'
cc_numeric_group = credit_card.groupby('SK_ID_CURR').agg(aggs)
cc_numeric_group.columns = [' '.join(col).strip() for col in cc_numeric_group.columns.values]
for column in cc_numeric_group.columns:
cc_numeric_group = cc_numeric_group.rename(columns={column:'CC_'+column})
app_data = app_data.merge(cc_numeric_group, left_on='SK_ID_CURR', right_index=True,
how='left', suffixes=['','_CC'])
# Categorical Features
trans = modeValue
aggs = {}
for feat in cat_feats:
aggs[feat]=trans
cc_cat_group = credit_card.groupby('SK_ID_CURR').agg(aggs)
for column in cc_cat_group.columns:
cc_cat_group = cc_cat_group.rename(columns={column:'CC_'+column})
app_data = app_data.merge(cc_cat_group, left_on='SK_ID_CURR', right_index=True,
how='left', suffixes=['', '_CCMODE'])
# Last Features
most_recent_index = credit_card.groupby('SK_ID_CURR')['MONTHS_BALANCE'].idxmax()
app_data = app_data.merge(credit_card.loc[most_recent_index], on='SK_ID_CURR',
how='left', suffixes=['','_CCLAST'])
print('Shape after merging with credit card data = {}'.format(app_data.shape))
return app_data
def handleCreditCard_v4(app_data):
credit_card = pd.read_csv('../input/credit_card_balance.csv')
credit_card.drop('SK_ID_PREV', inplace=True, axis=1)
idColumns = ['SK_ID_CURR']
credit_card, cat_cols = one_hot_encoder(credit_card)
nonNum_cols = idColumns + cat_cols
num_cols = [f for f in credit_card.columns if f not in nonNum_cols]
numAggs = {}
catAggs = {}
numMethods = ['max', 'mean', 'min', 'sum', 'std']
catMethods = ['mean','sum']
for feat in num_cols:
numAggs[feat] = numMethods
for feat in cat_cols:
catAggs[feat] = catMethods
credit_agg = credit_card.groupby('SK_ID_CURR').agg({**numAggs, **catAggs})
credit_agg.columns = pd.Index(['CreditCard_' + e[0] + "_" + e[1].upper() for e in credit_agg.columns.tolist()])
app_data = app_data.merge(credit_agg, left_on='SK_ID_CURR', right_index=True, how='left')
print('Shape after merging with credit card data = {}'.format(app_data.shape))
return app_data
def handleBuro(app_data):
buro = pd.read_csv('../input/bureau.csv')
# Value Counts
app_data = app_data.merge(pd.DataFrame(buro['SK_ID_CURR'].value_counts()), left_on='SK_ID_CURR',
right_index=True, how='left', suffixes=['', '_CNT_BUREAU'])
# Aggregate Values on All Credits
buro_group = buro.groupby('SK_ID_CURR').agg({'SK_ID_BUREAU':'count',
'AMT_CREDIT_SUM':'sum',
'AMT_CREDIT_SUM_DEBT':'sum',
'CREDIT_CURRENCY': [nonUnique, modeValue],
'CREDIT_TYPE': [nonUnique, modeValue],
'CNT_CREDIT_PROLONG': 'sum',
'CREDIT_ACTIVE': totalBadCredit,
'CREDIT_DAY_OVERDUE': creditOverdue
})
buro_group.columns = [' '.join(col).strip() for col in buro_group.columns.values]
for column in buro_group.columns:
buro_group = buro_group.rename(columns={column:'BURO_ALL_'+column})
app_data = app_data.merge(buro_group, left_on='SK_ID_CURR',
right_index=True, how='left', suffixes=['', '_BURO'])
# Aggregate Values on Active Credits
buro_active = buro.loc[buro['CREDIT_ACTIVE']==1]
buro_group_active = buro_active.groupby('SK_ID_CURR').agg({'AMT_CREDIT_SUM': ['sum', 'count'],
'AMT_CREDIT_SUM_DEBT': 'sum',
'AMT_CREDIT_SUM_LIMIT': 'sum'
})
buro_group_active.columns = [' '.join(col).strip() for col in buro_group_active.columns.values]
for column in buro_group_active.columns:
buro_group_active = buro_group_active.rename(columns={column:'BURO_ACT_'+column})
app_data = app_data.merge(buro_group_active, left_on='SK_ID_CURR',
right_index=True, how='left', suffixes=['', '_BURO_ACT'])
# Buro_Last['LastBalanceOnCreditBuro'] = Buro_Last['Active_Debt_Amount'] / Buro_Last['Active_Credit_Amount']
# Getting last credit for each user
idx = buro.groupby('SK_ID_CURR')['SK_ID_BUREAU'].transform(max) == buro['SK_ID_BUREAU']
Buro_Last = buro[idx][['SK_ID_CURR','CREDIT_TYPE','DAYS_CREDIT_UPDATE','DAYS_CREDIT',
'DAYS_CREDIT_ENDDATE','DAYS_ENDDATE_FACT', 'SK_ID_BUREAU']]
app_data = app_data.merge(Buro_Last, left_on='SK_ID_CURR',
right_index=True, how='left', suffixes=['', '_BURO_LAST'])
print('Shape after merging with credit bureau data = {}'.format(app_data.shape))
return app_data
def handleBuro_v2(app_data):
bureau = pd.read_csv('../input/bureau.csv')
bb = pd.read_csv('../input/bureau_balance.csv')
bb, bb_cat = one_hot_encoder(bb)
bureau, bureau_cat = one_hot_encoder(bureau)
# Bureau balance: Perform aggregations and merge with bureau.csv
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU').agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist()])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)
# Bureau and bureau_balance numeric features
num_aggregations = {
'DAYS_CREDIT': [ 'mean', 'var'],
'DAYS_CREDIT_ENDDATE': [ 'mean'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'AMT_CREDIT_SUM': [ 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': [ 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
# Bureau and bureau_balance categorical features
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist()])
# Bureau: Active credits - using only numerical aggregations
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)
active_agg.columns = pd.Index(['ACTIVE_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist()])
bureau_agg = bureau_agg.join(active_agg, how='left')
del active, active_agg
gc.collect()
# Bureau: Closed credits - using only numerical aggregations
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)
closed_agg.columns = pd.Index(['CLOSED_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist()])
bureau_agg = bureau_agg.join(closed_agg, how='left')
return bureau_agg
def handleBuro_v4(app_data):
bureau = pd.read_csv('../input/bureau.csv')
bb = pd.read_csv('../input/bureau_balance.csv')
bb, bb_cat = one_hot_encoder(bb)
bureau, bureau_cat = one_hot_encoder(bureau)
# Bureau balance: Perform aggregations and merge with bureau.csv
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU').agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist()])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)
# Bureau and bureau_balance numeric features
num_aggregations = {
'DAYS_CREDIT': [ 'mean', 'var'],
'DAYS_CREDIT_ENDDATE': [ 'mean'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'AMT_CREDIT_SUM': [ 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': [ 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
# Bureau and bureau_balance categorical features
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist()])
# Bureau: Active credits - using only numerical aggregations
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)
active_agg.columns = pd.Index(['ACTIVE_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist()])
bureau_agg = bureau_agg.join(active_agg, how='left')
del active, active_agg
gc.collect()
# Bureau: Closed credits - using only numerical aggregations
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)
closed_agg.columns = pd.Index(['CLOSED_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist()])
bureau_agg = bureau_agg.join(closed_agg, how='left')
app_data = app_data.merge(bureau_agg, left_on='SK_ID_CURR', right_index=True, how='left')
return app_data
def handleBuroBalance(app_data):
buro_balance = pd.read_csv('../input/bureau_balance.csv')
# Historical Buro Balance
buro_balance.loc[buro_balance['STATUS']=='C', 'STATUS'] = '0'
buro_balance.loc[buro_balance['STATUS']=='X', 'STATUS'] = '0'
buro_balance['STATUS'] = buro_balance['STATUS'].astype('int64')
buro_balance_group = buro_balance.groupby('SK_ID_BUREAU').agg({'STATUS':['max','mean'], 'MONTHS_BALANCE':'max'})
buro_balance_group.columns = [' '.join(col).strip() for col in buro_balance_group.columns.values]
app_data = app_data.merge(buro_balance_group, left_on='SK_ID_CURR',
right_index=True, how='left', suffixes=['', '_BALANCE_HIST'])
# Last Buro Balance
idx = buro_balance.groupby('SK_ID_BUREAU')['MONTHS_BALANCE'].transform(max) == buro_balance['MONTHS_BALANCE']
Buro_Balance_Last = buro_balance[idx][['SK_ID_BUREAU','STATUS']]
app_data = app_data.merge(Buro_Balance_Last, left_on='SK_ID_CURR',
right_index=True, how='left', suffixes=['', '_BALANCE_HIST'])
print('Shape after merging with Bureau Balance Data = {}'.format(app_data.shape))
return app_data
def handleBuroBalance_v2(app_data):
buro_balance = pd.read_csv('../input/bureau_balance.csv')
buro = pd.read_csv('../input/bureau.csv')
buro = buro[['SK_ID_CURR','SK_ID_BUREAU']]
# Add Historical Buro Balance
buro_balance.loc[buro_balance['STATUS']=='C', 'STATUS'] = '0'
buro_balance.loc[buro_balance['STATUS']=='X', 'STATUS'] = '0'
buro_balance['STATUS'] = buro_balance['STATUS'].astype('int64')
buro_balance_group = buro_balance.groupby('SK_ID_BUREAU').agg({'STATUS':['max','mean','min','sum'],
'MONTHS_BALANCE':['count']})
buro_balance_group.columns = [' '.join(col).strip() for col in buro_balance_group.columns.values]
buro = buro.merge(buro_balance_group, left_on='SK_ID_BUREAU',
right_index=True, how='left', suffixes=['', '_BALANCE_HIST'])
# Add Last Buro Balance
most_recent_index = buro_balance.groupby('SK_ID_BUREAU')['MONTHS_BALANCE'].idxmax()
Buro_Balance_Last = buro_balance.loc[most_recent_index]
buro = buro.merge(Buro_Balance_Last, on='SK_ID_BUREAU', how='left', suffixes=['', '_BALANCE_LAST'])
# All historical data for each credit is now one line
# Buro Balance summary merged with all credits in Buro
trans = ['sum', 'mean', 'max', 'min']
aggs = {}
aggregateColumns = ['STATUS max','STATUS mean','STATUS min','STATUS sum','MONTHS_BALANCE count']
for col in aggregateColumns:
aggs[col]=trans
BuroBal_AllHist_group = buro.groupby('SK_ID_CURR').agg(aggs)
BuroBal_AllHist_group.columns = [' '.join(col).strip() for col in BuroBal_AllHist_group.columns.values]
app_data = app_data.merge(BuroBal_AllHist_group, left_on='SK_ID_CURR', right_index=True,
how='left', suffixes=['','_BBHist'])
# Buro Balance summary merged with active credits in Buro
# Posponed for now
# Buro Balance summary merged with last credit in Buro
most_recent_index = buro.groupby('SK_ID_CURR')['SK_ID_BUREAU'].idxmax()
buroLast = buro.loc[most_recent_index]
buroLastBeforeMerge = buroLast.drop('SK_ID_BUREAU', axis=1)
app_data = app_data.merge(buroLastBeforeMerge, on='SK_ID_CURR',how='left', suffixes=['','_BuroBalLAST'])
print('Shape after merging with Bureau Balance Data = {}'.format(app_data.shape))
return app_data
def handlePosCash(app_data):
POS_CASH = pd.read_csv('../input/POS_CASH_balance.csv')
# Weighted by recency
wm = lambda x: np.average(x, weights=-1/POS_CASH.loc[x.index, 'MONTHS_BALANCE'])
f = {'CNT_INSTALMENT': wm, 'CNT_INSTALMENT_FUTURE': wm, 'SK_DPD': wm, 'SK_DPD_DEF':wm}
cash_avg = POS_CASH.groupby('SK_ID_CURR')['CNT_INSTALMENT','CNT_INSTALMENT_FUTURE',
'SK_DPD', 'SK_DPD_DEF'].agg(f)
app_data = app_data.merge(cash_avg, left_on='SK_ID_CURR', right_index=True,
how='left', suffixes=['', '_CashAVG'])
# Historical Data
PosCashGroup = POS_CASH.groupby('SK_ID_CURR')['CNT_INSTALMENT','CNT_INSTALMENT_FUTURE','SK_DPD',
'SK_DPD_DEF'].agg({
'CNT_INSTALMENT':['mean', 'max', 'min'],
'CNT_INSTALMENT_FUTURE':['mean', 'max', 'min'],
'SK_DPD':['mean', 'max', 'min'],
'SK_DPD_DEF':['mean', 'max', 'min']})
PosCashGroup.columns = [' '.join(col).strip() for col in PosCashGroup.columns.values]
for column in PosCashGroup.columns:
PosCashGroup = PosCashGroup.rename(columns={column:'PosCash_'+column})
app_data = app_data.merge(PosCashGroup, left_on='SK_ID_CURR',
right_index=True, how='left', suffixes=['', '_POSCASH'])
# Last Values
most_recent_index = POS_CASH.groupby('SK_ID_CURR')['MONTHS_BALANCE'].idxmax()
cat_feats = POS_CASH.columns[POS_CASH.dtypes == 'object'].tolist() + ['SK_ID_CURR']
app_data = app_data.merge(POS_CASH.loc[most_recent_index, cat_feats], on='SK_ID_CURR',
how='left', suffixes=['', '_PosCashLast'])
print('Shape after merging with pos cash data = {}'.format(app_data.shape))
return app_data
def handlePosCash_v2(app_data):
POS_CASH = pd.read_csv('../input/POS_CASH_balance.csv')
idColumns = ['SK_ID_CURR', 'SK_ID_PREV']
cat_feats = [f for f in POS_CASH.columns if POS_CASH[f].dtype == 'object']
for f_ in cat_feats:
POS_CASH[f_], indexer = | pd.factorize(POS_CASH[f_]) | pandas.factorize |
"""
This module implements visualizations for EOPatch
Credits:
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import numpy as np
import pandas as pd
import geopandas as gpd
import xarray as xr
import holoviews as hv
import geoviews as gv
import hvplot # pylint: disable=unused-import
import hvplot.xarray # pylint: disable=unused-import
import hvplot.pandas # pylint: disable=unused-import
from cartopy import crs as ccrs
from shapely.geometry import Polygon
import shapely
shapely.speedups.disable()
from sentinelhub import CRS
## TODO!
# make using sys obsolete
import sys
sys.path.append("D:/Code/eotopia/repo_core")
from constants import FeatureType, FeatureTypeSet
from utilities import FeatureParser
sys.path.append("D:/Code/eotopia/repo_visualization_eopatchcore")
from xarray_utils_viz import array_to_dataframe, new_coordinates, string_to_variable
PLOT_WIDTH = 800
PLOT_HEIGHT = 500
class EOPatchVisualization:
"""
Plot class for making visulizations.
:param eopatch: eopatch
:type eopatch: EOPatch
:param feature: feature of eopatch
:type feature: (FeatureType, str)
:param rgb: bands for creating RGB image
:type rgb: [int, int, int]
:param rgb_factor: multiplication factor for constructing rgb image
:type rgb_factor: float
:param vdims: value dimensions for plotting geopandas.GeoDataFrame
:type vdims: str
:param timestamp_column: geopandas.GeoDataFrame columns with timestamps
:type timestamp_column: str
:param geometry_column: geopandas.GeoDataFrame columns with geometry
:type geometry_column: geometry
:param pixel: wheather plot data for each pixel (line), for FeatureType.DATA and FeatureType.MASK
:type pixel: bool
:param mask: name of the FeatureType.MASK to apply to data
:type mask: str
"""
def __init__(self, eopatch, feature, rgb=None, rgb_factor=3.5, vdims=None,
timestamp_column='TIMESTAMP', geometry_column='geometry', pixel=False, mask=None):
self.eopatch = eopatch
self.feature = feature
self.rgb = list(rgb) if isinstance(rgb, tuple) else rgb
self.rgb_factor = rgb_factor
self.vdims = vdims
self.timestamp_column = timestamp_column
self.geometry_column = geometry_column
self.pixel = pixel
self.mask = mask
def plot(self):
""" Plots eopatch
:return: plot
:rtype: holovies/bokeh
"""
features = list(FeatureParser(self.feature))
feature_type, feature_name = features[0]
if self.pixel and feature_type in FeatureTypeSet.RASTER_TYPES_4D:
vis = self.plot_pixel(feature_type, feature_name)
elif feature_type in (FeatureType.MASK, *FeatureTypeSet.RASTER_TYPES_3D):
vis = self.plot_raster(feature_type, feature_name)
elif self.rgb and feature_type is FeatureType.DATA:
vis = self.plot_data_rgb(feature_name)
elif feature_type is FeatureType.VECTOR:
vis = self.plot_vector(feature_name)
elif feature_type is FeatureType.VECTOR_TIMELESS:
vis = self.plot_vector_timeless(feature_name)
else: # elif feature_type in (FeatureType.SCALAR, FeatureType.LABEL):
vis = self.plot_scalar_label(feature_type, feature_name)
return vis.opts(plot=dict(width=PLOT_WIDTH, height=PLOT_HEIGHT))
def plot_data_rgb(self, feature_name):
""" Plots the FeatureType.DATA of eopatch.
:param feature_name: name of the eopatch feature
:type feature_name: str
:return: visualization
:rtype: holoview/geoviews/bokeh
"""
crs = self.eopatch.bbox.crs
crs = CRS.POP_WEB if crs is CRS.WGS84 else crs
data_da = array_to_dataframe(self.eopatch, (FeatureType.DATA, feature_name), crs=crs)
if self.mask:
data_da = self.mask_data(data_da)
timestamps = self.eopatch.timestamp
crs = self.eopatch.bbox.crs
if not self.rgb:
return data_da.hvplot(x='x', y='y', crs=ccrs.epsg(crs.epsg))
data_rgb = self.eopatch_da_to_rgb(data_da, feature_name, crs)
rgb_dict = {timestamp_: self.plot_rgb_one(data_rgb, timestamp_) for timestamp_ in timestamps}
return hv.HoloMap(rgb_dict, kdims=['time'])
@staticmethod
def plot_rgb_one(eopatch_da, timestamp): # OK
""" Returns visualization for one timestamp for FeatureType.DATA
:param eopatch_da: eopatch converted to xarray DataArray
:type eopatch_da: xarray DataArray
:param timestamp: timestamp to make plot for
:type timestamp: datetime
:return: visualization
:rtype: holoviews/geoviews/bokeh
"""
return eopatch_da.sel(time=timestamp).drop('time').hvplot(x='x', y='y')
def plot_raster(self, feature_type, feature_name):
""" Makes visualization for raster data (except for FeatureType.DATA)
:param feature_type: type of eopatch feature
:type feature_type: FeatureType
:param feature_name: name of eopatch feature
:type feature_name: str
:return: visualization
:rtype: holoviews/geoviews/bokeh
"""
crs = self.eopatch.bbox.crs
crs = CRS.POP_WEB if crs is CRS.WGS84 else crs
data_da = array_to_dataframe(self.eopatch, (feature_type, feature_name), crs=crs)
data_min = data_da.values.min()
data_max = data_da.values.max()
data_levels = len(np.unique(data_da))
data_levels = 11 if data_levels > 11 else data_levels
data_da = data_da.where(data_da > 0).fillna(-1)
vis = data_da.hvplot(x='x', y='y',
crs=ccrs.epsg(crs.epsg)).opts(clim=(data_min, data_max),
clipping_colors={'min': 'transparent'},
color_levels=data_levels)
return vis
def plot_vector(self, feature_name):
""" Visualizaton for vector (FeatureType.VECTOR) data
:param feature_name: name of eopatch feature
:type feature_name: str
:return: visualization
:rtype: holoviews/geoviews/bokeh
"""
crs = self.eopatch.bbox.crs
timestamps = self.eopatch.timestamp
data_gpd = self.fill_vector(FeatureType.VECTOR, feature_name)
if crs is CRS.WGS84:
crs = CRS.POP_WEB
data_gpd = data_gpd.to_crs(crs.pyproj_crs())
shapes_dict = {timestamp_: self.plot_shapes_one(data_gpd, timestamp_, crs)
for timestamp_ in timestamps}
return hv.HoloMap(shapes_dict, kdims=['time'])
def fill_vector(self, feature_type, feature_name):
""" Adds timestamps from eopatch to GeoDataFrame.
:param feature_type: type of eopatch feature
:type feature_type: FeatureType
:param feature_name: name of eopatch feature
:type feature_name: str
:return: GeoDataFrame with added data
:rtype: geopandas.GeoDataFrame
"""
vector = self.eopatch[feature_type][feature_name].copy()
vector['valid'] = True
eopatch_timestamps = self.eopatch.timestamp
vector_timestamps = set(vector[self.timestamp_column])
blank_timestamps = [timestamp for timestamp in eopatch_timestamps if timestamp not in vector_timestamps]
dummy_geometry = self.create_dummy_polygon(0.0000001)
temp_df = self.create_dummy_dataframe(vector,
blank_timestamps=blank_timestamps,
dummy_geometry=dummy_geometry)
final_vector = gpd.GeoDataFrame(pd.concat((vector, temp_df), ignore_index=True),
crs=vector.crs)
return final_vector
def create_dummy_dataframe(self, geodataframe, blank_timestamps, dummy_geometry,
fill_str='', fill_numeric=1):
""" Creates geopadnas GeoDataFrame to fill with dummy data (for visualization)
:param geodataframe: dataframe to append rows to
:type geodataframe: geopandas.GeoDataFrame
:param blank_timestamps: timestamps for constructing dataframe
:type blank_timestamps: list of timestamps
:param dummy_geometry: geometry to plot when there is no data
:type dummy_geometry: shapely.geometry.Polygon
:param fill_str: insert when there is no value in str column
:type fill_str: str
:param fill_numeric: insert when
:type fill_numeric: float
:return: dataframe with dummy data
:rtype: geopandas.GeoDataFrame
"""
dataframe = | pd.DataFrame(data=blank_timestamps, columns=[self.timestamp_column]) | pandas.DataFrame |
# Imports: standard library
import re
import copy
import datetime
from typing import Dict, List, Tuple, Union, Optional
# Imports: third party
import numpy as np
import pandas as pd
# Imports: first party
from ml4c3.metrics import weighted_crossentropy
from definitions.ecg import ECG_PREFIX
from definitions.ici import ICI_PREFIX, ICI_DATE_COLUMN
from definitions.sts import STS_PREFIX, STS_SURGERY_DATE_COLUMN
from definitions.echo import ECHO_PREFIX, ECHO_DATETIME_COLUMN
from definitions.globals import SECONDS_IN_DAY
from tensormap.TensorMap import (
Dates,
TensorMap,
PatientData,
Interpretation,
make_default_time_series_filter,
)
def update_tmaps_weighted_loss(
tmap_name: str,
tmaps: Dict[str, TensorMap],
) -> Dict[str, TensorMap]:
"""Make new tmap from base name, modifying loss weight"""
if "_weighted_loss_" not in tmap_name:
return tmaps
base_name, weight = tmap_name.split("_weighted_loss_")
if base_name not in tmaps:
raise ValueError(
f"Base tmap {base_name} not in existing tmaps. "
f"Cannot modify weighted loss.",
)
weight = weight.split("_")[0]
tmap = copy.deepcopy(tmaps[base_name])
new_tmap_name = f"{base_name}_weighted_loss_{weight}"
tmap.name = new_tmap_name
tmap.loss = weighted_crossentropy([1.0, float(weight)], new_tmap_name)
tmaps[new_tmap_name] = tmap
return tmaps
random_date_selections: Dict[str, Union[List[str], pd.Series]] = dict()
def update_tmaps_time_series(
tmap_name: str,
tmaps: Dict[str, TensorMap],
time_series_limit: Optional[int] = None,
) -> Dict[str, TensorMap]:
"""Given the name of a needed tensor maps, e.g. ["ecg_age_newest"], and its base
TMap, e.g. tmaps["ecg_age"], this function creates new tmap with the name of the
needed tmap and the correct shape, but otherwise inherits properties from the base
tmap. Next, updates new tmap to tmaps dict.
"""
if "_newest" in tmap_name:
base_split = "_newest"
elif "_oldest" in tmap_name:
base_split = "_oldest"
elif "_random" in tmap_name:
base_split = "_random"
else:
return tmaps
base_name, _ = tmap_name.split(base_split)
if base_name not in tmaps:
raise ValueError(
f"Base tmap {base_name} not in existing tmaps. Cannot modify time series.",
)
base_tmap = tmaps[base_name]
def updated_time_series_filter(data: PatientData) -> Dates:
_dates = base_tmap.time_series_filter(data)
_dates = (
_dates.sort_values() if isinstance(_dates, pd.Series) else sorted(_dates)
)
tsl = 1 if time_series_limit is None else time_series_limit
if "_random" in tmap_name:
if data.id in random_date_selections:
return random_date_selections[data.id]
if len(_dates) < tsl:
tsl = len(_dates)
_dates = (
_dates.sample(tsl, replace=False)
if isinstance(_dates, pd.Series)
else np.random.choice(_dates, tsl, replace=False)
)
random_date_selections[data.id] = _dates
return _dates
if "_oldest" in tmap_name:
return _dates[:tsl]
if "_newest" in tmap_name:
return _dates[-tsl:]
raise ValueError(f"Unknown time series ordering: {tmap_name}")
new_tmap = copy.deepcopy(base_tmap)
new_tmap_name = f"{base_name}{base_split}"
new_tmap.name = new_tmap_name
new_tmap.time_series_limit = time_series_limit
new_tmap.time_series_filter = updated_time_series_filter
tmaps[new_tmap_name] = new_tmap
return tmaps
def _get_dataset_metadata(dataset_name: str) -> Tuple[str, str]:
if dataset_name == "sts":
prefix = STS_PREFIX
datetime_column = STS_SURGERY_DATE_COLUMN
elif dataset_name == "echo":
prefix = ECHO_PREFIX
datetime_column = ECHO_DATETIME_COLUMN
elif dataset_name == "ecg":
prefix = ECG_PREFIX
datetime_column = None
elif dataset_name == "ici":
prefix = ICI_PREFIX
datetime_column = ICI_DATE_COLUMN
else:
raise ValueError("{data_descriptor} is not a valid data descriptor")
return prefix, datetime_column
CROSS_REFERENCE_SOURCES = [ECG_PREFIX, STS_PREFIX, ECHO_PREFIX, ICI_PREFIX]
def _days_between_tensor_from_file(tm: TensorMap, data: PatientData) -> np.ndarray:
# Time series filter will be updated to return days for this tensor from file
days = tm.time_series_filter(data)
return days.to_numpy()[:, None]
def update_tmaps_window(
tmap_name: str,
tmaps: Dict[str, TensorMap],
) -> Dict[str, TensorMap]:
"""
Make new tensor map from base tensor map, making conditional on a date from
another source of data. This requires a precise format for tensor map name:
[base_tmap_name]_[N]_days_[pre/post]_[other_data_source]
e.g.
ecg_2500_365_days_pre_echo
ecg_2500_365_days_pre_sts_newest
av_peak_gradient_30_days_post_echo
Additionally, a special tensor map can be created to get the days between
cross referenced events by the following format:
[source_name]_[N]_days_[pre/post]_[other_data_source]_days_between_matched_events
e.g.
ecg_180_days_pre_echo_days_between_matched_events
"""
pattern = (
fr"(.*)_(\d+)_days_(pre|post)_({'|'.join(CROSS_REFERENCE_SOURCES)})"
fr"(_days_between_matched_events)?"
)
match = re.match(pattern, tmap_name)
if match is None:
return tmaps
# fmt: off
# ecg_2500_std_180_days_pre_echo
source_name = match[1] # ecg_2500_std
offset_days = int(match[2]) # 180
pre_or_post = match[3] # pre
reference_name = match[4] # echo
days_between = match[5] or "" # (empty string)
# fmt: on
new_name = (
f"{source_name}_{offset_days}_days_{pre_or_post}_{reference_name}{days_between}"
)
# If the tmap should return the number of days between matched events,
# source_name is the name of a source dataset
if days_between:
if source_name not in CROSS_REFERENCE_SOURCES:
raise ValueError(
f"Source dataset {source_name} not in known cross reference sources; "
f"cannot create {new_name}",
)
source_prefix, source_dt_col = _get_dataset_metadata(dataset_name=source_name)
# Setup time series filter, using the default time series filter if the source
# datetime column is None
if source_dt_col is not None:
time_series_filter = lambda data: data[source_prefix][source_dt_col]
else:
time_series_filter = make_default_time_series_filter(source_prefix)
# Create a fake base tmap which will be modified with a time series filter
# function which returns the number of days between events
base_tmap = TensorMap(
name=source_name,
shape=(1,),
interpretation=Interpretation.CONTINUOUS,
path_prefix=source_prefix,
tensor_from_file=_days_between_tensor_from_file,
time_series_limit=0,
time_series_filter=time_series_filter,
)
# If not getting days between events, source_name is the name of an underlying tmap
# to filter and must exist
elif source_name not in tmaps:
raise ValueError(
f"Base tmap {source_name} not in existing tmaps; cannot create {new_name}",
)
# If all checks pass, get base_tmap in the case that it is an existing tmap
else:
base_tmap = tmaps[source_name]
# Copy the base_tmap to modify, either a real tmap or the fake one setup to get
# the days between events
new_tmap = copy.deepcopy(base_tmap)
reference_prefix, reference_dt_col = _get_dataset_metadata(
dataset_name=reference_name,
)
# One-to-one matching algorithm maximizes the number of matches by pairing events
# nearest in time, starting from the most recent event.
# 1. Sort source dates from newest -> oldest
# 2. Sort reference dates from newest -> oldest
# 3. For each reference date, starting from the newest reference date
# a. Compute relative time window
# b. Take the newest source date in range
def get_cross_referenced_dates(data: PatientData) -> Dates:
source_dates = base_tmap.time_series_filter(data)
# Get dates from reference data
reference_data = data[reference_prefix]
if isinstance(reference_data, pd.DataFrame):
reference_dates = reference_data[reference_dt_col] # Reference data is CSV
else:
reference_dates = list(reference_data) # Reference data is HD5
# Convert everything to pd.Series of pd.Timestamp
source_is_list = isinstance(source_dates, list)
source_dates = pd.Series(source_dates).sort_values(ascending=False)
source_dates_dt = pd.to_datetime(source_dates)
reference_dates = pd.Series(reference_dates).sort_values(ascending=False)
reference_dates = pd.to_datetime(reference_dates)
# Set start and end dates relative to an event
if pre_or_post == "pre":
start_dates = reference_dates + datetime.timedelta(days=offset_days * -1)
end_dates = reference_dates
else:
start_dates = reference_dates
end_dates = reference_dates + datetime.timedelta(days=offset_days)
dates = pd.Series(dtype=object)
day_differences = pd.Series(dtype=object)
for start_date, end_date, reference_date in zip(
start_dates,
end_dates,
reference_dates,
):
# Get newest source date in range of start and end dates
matched_date = source_dates_dt[
source_dates_dt.between(start_date, end_date, inclusive=False)
][:1]
# If computing the days between events, calculate the day difference between
# the reference date and the matched date
if days_between:
difference = reference_date - matched_date
difference = difference.dt.total_seconds() / SECONDS_IN_DAY
day_differences = day_differences.append(difference)
# If not computing the days between events, return the actual dates
else:
# Computation is done on pd.Timestamp objects but returned list should
# use the original strings/format in source_dates
dates = dates.append(source_dates[matched_date.index])
# Remove the matched date from further matching
source_dates_dt = source_dates_dt.drop(matched_date.index)
if len(dates) == 0 and len(day_differences) == 0:
raise ValueError("No cross referenced dates")
if days_between:
return day_differences
elif source_is_list:
return list(dates)
else:
return dates
new_tmap.time_series_filter = get_cross_referenced_dates
new_tmap.name = new_name
tmaps[new_name] = new_tmap
return tmaps
def update_sts_ecg_cross_reference(
tmap_name: str,
tmaps: Dict[str, TensorMap],
) -> Dict[str, TensorMap]:
pattern = fr"(.*)_(\d+)_days_(before_sts|after_ecg)"
match = re.match(pattern, tmap_name)
if match is None:
return tmaps
# ecg_2500_zscore_pop_180_days_pre_sts
base_name = match[1] # ecg_2500_zscore_pop
offset_days = int(match[2]) # 180
relative_event = match[3] # pre_sts
new_name = f"{base_name}_{offset_days}_days_{relative_event}"
if base_name not in tmaps:
raise ValueError(
f"Base tmap {base_name} not in existing tmaps; cannot create {new_name}",
)
base_tmap = tmaps[base_name]
new_tmap = copy.deepcopy(base_tmap)
left_edge = pd.to_datetime(pd.Series(["1900-01-01"]))
left_edge.index = [-1]
def get_cross_referenced_dates(data: PatientData) -> Dates:
ecg_dates_str = pd.Series(list(data[ECG_PREFIX]))
surgery_dates = data[STS_PREFIX][STS_SURGERY_DATE_COLUMN]
ecg_dates = | pd.to_datetime(ecg_dates_str) | pandas.to_datetime |
import pickle
from ds import *
import pandas as pd
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from sklearn import ensemble
import numpy as np
import xgboost as xgb
from sklearn.impute import SimpleImputer
import csv
data_as_list = []
with open('data/out.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for idx,row in enumerate(readCSV):
if idx==0: continue
try:
title_len = len(row[0])
story_len = len(row[1])
goal = int(row[4])
category = row[5]
amt_raised = int(row[3])
data_as_list.append([title_len, story_len, goal, category, amt_raised])
except Exception:
continue
df = pd.DataFrame(data_as_list, columns = ["title_len", "story_len", "goal", "category", "amt_raised"])
df['category'] = | pd.Categorical(df['category']) | pandas.Categorical |
#!/usr/bin/env python
# encoding:utf-8
"""
Author : <NAME>
Date : 2021/8/4
Time: 20:06
File: precision_table_plot.py
HomePage : http://github.com/yuanqingmei
Email : <EMAIL>
compute the avg std max min values and draw the box plot of precision and recall.
"""
import time
def precision_table_plot(working_dir="F:\\NJU\\MTmeta\\experiments\\pooled\\",
plot_dir="F:\\NJU\\MTmeta\\experiments\\pooled\\plots\\"):
import os
import csv
import pandas as pd
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
| pd.set_option('display.width', 5000) | pandas.set_option |
################################################################################
# The contents of this file are Teradata Public Content and have been released
# to the Public Domain.
# <NAME> & <NAME> - April 2020 - v.1.1
# Copyright (c) 2020 by Teradata
# Licensed under BSD; see "license.txt" file in the bundle root folder.
#
################################################################################
# R and Python TechBytes Demo - Part 5: Python in-nodes with SCRIPT
# ------------------------------------------------------------------------------
# File: stoRFFitMM.py
# ------------------------------------------------------------------------------
# The R and Python TechBytes Demo comprises of 5 parts:
# Part 1 consists of only a Powerpoint overview of R and Python in Vantage
# Part 2 demonstrates the Teradata R package tdplyr for clients
# Part 3 demonstrates the Teradata Python package teradataml for clients
# Part 4 demonstrates using R in-nodes with the SCRIPT and ExecR Table Operators
# Part 5 demonstrates using Python in-nodes with the SCRIPT Table Operator
################################################################################
#
# This TechBytes demo utilizes a use case to predict the propensity of a
# financial services customer base to open a credit card account.
#
# The present file is the Python model fitting script to be used with the SCRIPT
# table operator, as described in the following use case 2 of the present demo
# Part 5:
#
# 2) Fitting and scoring multiple models
#
# We utilize the statecode variable as a partition to built a Random
# Forest model for every state. This is done by using SCRIPT Table Operator
# to run a model fitting script with a PARTITION BY statecode in the query.
# This creates a model for each of the CA, NY, TX, IL, AZ, OH and Other
# state codes, and perists the model in the database via CREATE TABLE AS
# statement.
# Then we run a scoring script via the SCRIPT Table Operator against
# these persisted Random Forest models to score the entire data set.
#
# For this use case, we build an analytic data set nearly identical to the
# one in the teradataml demo (Part 3), with one change as indicated by item
# (d) below. This is so we can demonstrate the in-database capability of
# simultaneously building many models.
# 60% of the analytic data set rows are sampled to create a training
# subset. The remaining 40% is used to create a testing/scoring dataset.
# The train and test/score datasets are used in the SCRIPT operations.
################################################################################
# File Changelog
# v.1.0 2019-10-29 First release
# v.1.1 2020-04-02 Added change log; no code changes in present file
################################################################################
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import pickle
import base64
###
### Read input
###
delimiter = '\t'
inputData = []
for line in sys.stdin.read().splitlines():
line = line.split(delimiter)
inputData.append(line)
###
### If no data received, gracefully exit rather than producing an error later.
###
if not inputData:
sys.exit()
###
### Set up input DataFrame according to input schema
###
# Know your data: You must know in advance the number and data types of the
# incoming columns from the database!
# For numeric columns, the database sends in floats in scientific format with a
# blank space when the exponential is positive; e.g., 1.0 is sent as 1.000E 000.
# The following input data read deals with any such blank spaces in numbers.
columns = ['cust_id', 'tot_income', 'tot_age', 'tot_cust_years', 'tot_children',
'female_ind', 'single_ind', 'married_ind', 'separated_ind',
'statecode', 'ck_acct_ind', 'sv_acct_ind', 'cc_acct_ind',
'ck_avg_bal', 'sv_avg_bal', 'cc_avg_bal', 'ck_avg_tran_amt',
'sv_avg_tran_amt', 'cc_avg_tran_amt', 'q1_trans_cnt',
'q2_trans_cnt', 'q3_trans_cnt', 'q4_trans_cnt', 'SAMPLE_ID']
df = pd.DataFrame(inputData, columns=columns)
del inputData
df['cust_id'] = pd.to_numeric(df['cust_id'])
df['tot_income'] = df['tot_income'].apply(lambda x: "".join(x.split()))
df['tot_income'] = pd.to_numeric(df['tot_income'])
df['tot_age'] = pd.to_numeric(df['tot_age'])
df['tot_cust_years'] = pd.to_numeric(df['tot_cust_years'])
df['tot_children'] = pd.to_numeric(df['tot_children'])
df['female_ind'] = pd.to_numeric(df['female_ind'])
df['single_ind'] = pd.to_numeric(df['single_ind'])
df['married_ind'] = | pd.to_numeric(df['married_ind']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 11:41:44 2018
@author: MichaelEK
"""
import types
import pandas as pd
import numpy as np
import json
from pdsf import sflake as sf
from utils import split_months
def process_allo(param, permit_use):
"""
Function to process the consented allocation from the input tables from Accela and others.
More descriptions in the code below.
Parameters
----------
param : dict
Input parameters
permit_use : DataFrame
DataFrame from the output of the process_use_types function
Returns
-------
DataFrame
"""
run_time_start = pd.Timestamp.today().strftime('%Y-%m-%d %H:%M:%S')
print(run_time_start)
#######################################
### Read in source data and update accela tables in ConsentsReporting db
print('--Reading in source data...')
## Make object to contain the source data
db = types.SimpleNamespace()
for t in param['misc']['AllocationProcessing']['tables']:
p = param['source data'][t]
print(p['table'])
if p['schema'] != 'public':
stmt = 'select {cols} from "{schema}"."{table}"'.format(schema=p['schema'], table=p['table'], cols=json.dumps(p['col_names'])[1:-1])
else:
stmt = 'select {cols} from "{table}"'.format(table=p['table'], cols=json.dumps(p['col_names'])[1:-1])
setattr(db, t, sf.read_table(p['username'], p['password'], p['account'], p['database'], p['schema'], stmt))
##################################################
### Sites
print('--Process Waps')
## takes
wap_allo1 = db.wap_allo.copy()
wap1 = wap_allo1['Wap'].unique()
waps = wap1[~pd.isnull(wap1)].copy()
## Check that all Waps exist in the USM sites table
usm_waps1 = db.waps[db.waps.isin(waps)].copy()
# usm_waps1[['NzTmX', 'NzTmY']] = usm_waps1[['NzTmX', 'NzTmY']].astype(int)
if len(wap1) != len(usm_waps1):
miss_waps = set(wap1).difference(set(usm_waps1.Wap))
print('Missing {} Waps in USM'.format(len(miss_waps)))
wap_allo1 = wap_allo1[~wap_allo1.Wap.isin(miss_waps)].copy()
##################################################
### Permit table
print('--Process Permits')
'''
WILCO:
Selection FromDate and toDate was a bit of a pain in the ass i remember for the Rakaia as well. I don't think there is any filtering done here below yet, but maybe it is
good to consider that:
1) Some consents may have never been active between the FromDate and ToDate. The 'Given Effect To' field can help with that. If the given effect to is larger than the
toDate, then that consent was never exercised and (at least for modelling purposes) should be dropped from the list of consents.
2) If the Given Effect To date is larger than the fromDate, then set FromDate equal to Given Effect To.
3) For parent and child consents (orginal and renewals) it is good to check the FromDate and ToDate. In the Ecan database the FromDate of the renewal is most of the time
equal to the ToDate of the parent (original record), which would lead to double accounting for that day. For the Rakaia I fixed this by making sure that sure that
the toDate is always 1 day before the frommDate of the child consent.
Below I have inserted some (commented) code that I used in my Rakaia work, so not sure whether you want to use this yes/no.
'''
# #-Select consents that were active between sdate and edate
# print 'Filter consents that were active between %s and %s...' %(sdate.strftime('%d-%m-%Y'), edate.strftime('%d-%m-%Y'))
# df1 = df.loc[(df['toDate']>pd.Timestamp(sdate)) & (df['fmDate']<=pd.Timestamp(edate))]
# #-If 'Given Effect To' date is later than 'toDate', then consent was never active in between the fmDate-toDate period, and is therefore removed from the dataframe
# df1.loc[(df1['Given Effect To'] > df1['toDate']),:]=np.nan
# df2 = df1.dropna(how='all')
# #-If 'Given Effect To' date is later than 'fmDate', then the 'fmDate' field is set to 'Given Effect To'
# df2.loc[(df2['fmDate'] < df2['Given Effect To']),['fmDate']]= df2['Given Effect To']
#
# #-Unique consent numbers of 'OriginalRecord'
# ori_records = pd.unique(df2['OriginalRecord'])
# df2_columns = list(df2.columns)
# fmDate_index = df2_columns.index('fmDate')
# toDate_index = df2_columns.index('toDate')
# #-Make sure toDate is always 1 day before the fmDate of the child consent. Required to make sure that a consent isn't active twice on one day
# for c in ori_records:
# #-select the consents that belong to the same group (have same parent so to speak)
# df_short = df2.loc[df2['OriginalRecord']==c]
# for i in range(0,len(df_short)-1):
# toDate = df_short.iloc[i,toDate_index] #-toDate of current record
# fmDate = df_short.iloc[i+1,fmDate_index] #-fromDate of child record
# if toDate == fmDate: #-cannot be equal. If so, then decrease the todate of the current record with one day
# df_short.iloc[i, toDate_index] = toDate - dt.timedelta(days=1)
# df2.loc[df2['OriginalRecord']==c] = df_short
# #-get rid of old dataframes
# df = df2.copy()
# df1 = None; df2 = None; del df1, df2
#
# #-For consents that are active for one day, the toDate may now (because of extracting one day from toDate) be smaller than fmDate. Those records are removed
# df = df.loc[df['toDate']>=df['fmDate']]
## Clean data
permits2 = db.permit.copy()
permits2['FromDate'] = pd.to_datetime(permits2['FromDate'], infer_datetime_format=True, errors='coerce')
permits2['ToDate'] = pd.to_datetime(permits2['ToDate'], infer_datetime_format=True, errors='coerce')
## Filter data
permits2 = permits2[permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull()].copy()
# permits2 = permits2[(permits2['FromDate'] > '1950-01-01') & (permits2['ToDate'] > '1950-01-01') & (permits2['ToDate'] > permits2['FromDate']) & permits2.NzTmX.notnull() & permits2.NzTmY.notnull() & permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()
## Convert datetimes to date
permits2.loc[permits2['FromDate'].isnull(), 'FromDate'] = pd.Timestamp('1900-01-01')
permits2.loc[permits2['ToDate'].isnull(), 'ToDate'] = pd.Timestamp('1900-01-01')
##################################################
### Parent-Child
print('--Process Parent-child table')
## Clean data
pc1 = db.parent_child.copy()
## Filter data
pc1 = pc1.drop_duplicates()
pc1 = pc1[pc1['ParentRecordNumber'].notnull() & pc1['ChildRecordNumber'].notnull()]
## Check foreign keys --> what are foreign keys?
crc1 = permits2.RecordNumber.unique()
pc0 = pc1[pc1.ParentRecordNumber.isin(crc1) & pc1.ChildRecordNumber.isin(crc1)].copy()
#################################################
### AllocatedRatesVolumes
print('--Process Allocation data')
## Rates
# Clean data
wa1 = wap_allo1.copy()
# Check foreign keys
wa4 = wa1[wa1.RecordNumber.isin(crc1)].copy()
# Find the missing Waps per consent
crc_wap_mis1 = wa4.loc[wa4.Wap.isnull(), 'RecordNumber'].unique()
crc_wap4 = wa4[['RecordNumber', 'Wap']].drop_duplicates()
for i in crc_wap_mis1:
crc2 = pc0[np.in1d(pc0.ParentRecordNumber, i)].ChildRecordNumber.values
wap1 = []
while (len(crc2) > 0) & (len(wap1) == 0):
wap1 = crc_wap4.loc[np.in1d(crc_wap4.RecordNumber, crc2), 'Wap'].values
crc2 = pc0[np.in1d(pc0.ParentRecordNumber, crc2)].ChildRecordNumber.values
if len(wap1) > 0:
wa4.loc[wa4.RecordNumber == i, 'Wap'] = wap1[0]
wa4 = wa4[wa4.Wap.notnull()].copy()
## Distribute the months
# Since the tables in accela have no explicit primary/composite keys, it is possible that the eventual composite key 'RecordNumber', 'TakeType', 'SwAllocationBlock', 'Wap' does not fully caapture the Accela data set. It is possible that the rates also change by month. This occurs in less than 100 consents ever, so the simplification seems justified. The below code splits the consents out by each month that the consent is allowed to be active by the appropriate rates and volumes listed in the Accela table. Then the mean is taken over all months to ensure that there is only one value for 'RecordNumber', 'TakeType', 'SwAllocationBlock', 'Wap'.
cols1 = wa4.columns.tolist()
from_mon_pos = cols1.index('FromMonth')
to_mon_pos = cols1.index('ToMonth')
allo_rates_list = []
for val in wa4.itertuples(False, None):
from_month = int(val[from_mon_pos])
to_month = int(val[to_mon_pos])
if from_month > to_month:
mons = list(range(1, to_month + 1))
else:
mons = range(from_month, to_month + 1)
d1 = [val + (i,) for i in mons]
allo_rates_list.extend(d1)
col_names1 = wa4.columns.tolist()
col_names1.extend(['Month'])
wa5 = pd.DataFrame(allo_rates_list, columns=col_names1).drop(['FromMonth', 'ToMonth'], axis=1)
# Mean of all months
grp1 = wa5.groupby(['RecordNumber', 'TakeType', 'SwAllocationBlock', 'Wap'])
mean1 = grp1[['WapRate', 'AllocatedRate', 'VolumeDaily', 'VolumeWeekly', 'Volume150Day']].mean().round(2)
include1 = grp1['IncludeInSwAllocation'].first()
mon_min = grp1['Month'].min()
mon_min.name = 'FromMonth'
mon_max = grp1['Month'].max()
mon_max.name = 'ToMonth'
wa6 = pd.concat([mean1, mon_min, mon_max, include1], axis=1).reset_index()
# wa6['HydroGroup'] = 'Surface Water'
## Rename allocation blocks !!!!!! Need to be changed later!!!!
# av1.rename(columns={'GwAllocationBlock': 'AllocationBlock'}, inplace=True)
# wa6.rename(columns={'SwAllocationBlock': 'AllocationBlock'}, inplace=True)
# wa6.replace({'SwAllocationBlock': {'In Waitaki': 'A'}}, inplace=True)
## Combine volumes with rates !!! Needs to be changed later!!!
# wa7 = pd.merge(av1, wa6, on=['RecordNumber', 'TakeType'])
## Add in stream depletion
waps = db.waps.copy()
wa7 = pd.merge(wa6, waps, on='Wap').drop(['SD1_30Day'], axis=1)
# wa9['SD1_7Day'] = pd.to_numeric(wa9['SD1_7Day'], errors='coerce').round(0)
# wa9['SD1_150Day'] = pd.to_numeric(wa9['SD1_150Day'], errors='coerce').round(0)
## Add in the lowflow bool
wa8 = pd.merge(wa7, db.consented_takes, on=['RecordNumber', 'TakeType'], how='left')
wa8.loc[wa8.LowflowCondition.isnull(), 'LowflowCondition'] = False
## Distribute the rates according to the stream depletion requirements
## According to the LWRP Schedule 9!
allo_rates1 = wa8.drop_duplicates(['RecordNumber', 'SwAllocationBlock', 'Wap']).set_index(['RecordNumber', 'SwAllocationBlock', 'Wap']).copy()
# Convert daily, 7-day, and 150-day volumes to rates in l/s
allo_rates1['RateDaily'] = (allo_rates1['VolumeDaily'] / 24 / 60 / 60) * 1000
allo_rates1['RateWeekly'] = (allo_rates1['VolumeWeekly'] / 7 / 24 / 60 / 60) * 1000
allo_rates1['Rate150Day'] = (allo_rates1['Volume150Day'] / 150 / 24 / 60 / 60) * 1000
# SD categories - According to the LWRP! Schedule 9.
rate_bool = (allo_rates1['Rate150Day'] * (allo_rates1['SD1_150Day'] * 0.01)) > 5
allo_rates1['sd_cat'] = 'low'
allo_rates1.loc[(rate_bool | (allo_rates1['SD1_150Day'] >= 40)), 'sd_cat'] = 'moderate'
allo_rates1.loc[(allo_rates1['SD1_150Day'] >= 60), 'sd_cat'] = 'high'
allo_rates1.loc[(allo_rates1['SD1_7Day'] >= 90), 'sd_cat'] = 'direct'
allo_rates1.loc[(allo_rates1['TakeType'] == 'Take Surface Water'), 'sd_cat'] = 'direct'
# Assign volume ratios
allo_rates1['sw_vol_ratio'] = 1
allo_rates1.loc[allo_rates1.sd_cat == 'low', 'sw_vol_ratio'] = 0
allo_rates1.loc[allo_rates1.sd_cat == 'moderate', 'sw_vol_ratio'] = 0.5
allo_rates1.loc[allo_rates1.sd_cat == 'high', 'sw_vol_ratio'] = 0.75
allo_rates1.loc[allo_rates1.sd_cat == 'direct', 'sw_vol_ratio'] = 1
allo_rates1 = allo_rates1[allo_rates1['IncludeInSwAllocation'] | (allo_rates1['TakeType'] == 'Take Groundwater')].copy()
## Assign Rates
rates1 = allo_rates1.copy()
gw_bool = rates1['TakeType'] == 'Take Groundwater'
sw_bool = rates1['TakeType'] == 'Take Surface Water'
low_bool = rates1.sd_cat == 'low'
mod_bool = rates1.sd_cat == 'moderate'
high_bool = rates1.sd_cat == 'high'
direct_bool = rates1.sd_cat == 'direct'
lf_cond_bool = rates1.LowflowCondition
rates1['Surface Water'] = 0
rates1['Groundwater'] = 0
rates1.loc[gw_bool, 'Groundwater'] = rates1.loc[gw_bool, 'Rate150Day']
rates1.loc[mod_bool | high_bool, 'Surface Water'] = rates1.loc[mod_bool | high_bool, 'Rate150Day'] * (rates1.loc[mod_bool | high_bool, 'SD1_150Day'] * 0.01)
# The below boolean query is directly related to Schedule 9 and the consented allocation document by <NAME> and Don
alt_bool = gw_bool & (((rates1.Storativity | lf_cond_bool) & (mod_bool | high_bool)) | rates1.Combined)
rates1.loc[alt_bool, 'Groundwater'] = rates1.loc[alt_bool, 'Rate150Day'] - rates1.loc[alt_bool, 'Surface Water']
rates1.loc[direct_bool & gw_bool, 'Surface Water'] = rates1.loc[direct_bool & gw_bool, 'RateDaily']
rates1.loc[(direct_bool & gw_bool) & (rates1.Storativity | lf_cond_bool), 'Groundwater'] = 0
rates1.loc[sw_bool, 'Surface Water'] = rates1.loc[sw_bool, 'AllocatedRate']
rates2 = rates1[['Groundwater', 'Surface Water']].stack().reset_index()
rates2.rename(columns={'level_3': 'HydroGroup', 0: 'AllocatedRate'}, inplace=True)
rates2 = pd.merge(rates2, rates1.reset_index()[['RecordNumber', 'SwAllocationBlock', 'Wap', 'FromMonth', 'ToMonth']], on=['RecordNumber', 'SwAllocationBlock', 'Wap'])
# rates2.rename(columns={'SwAllocationBlock': 'AllocationBlock'}, inplace=True)
# rates3 = rates2.drop_duplicates(['RecordNumber', 'HydroGroup', 'SwAllocationBlock', 'Wap']).set_index(['RecordNumber', 'HydroGroup', 'SwAllocationBlock', 'Wap'])
rates3 = rates2.drop_duplicates(['RecordNumber', 'HydroGroup', 'SwAllocationBlock', 'Wap'])
## Allocated Volume
av1 = db.allocated_volume.copy()
# av1.replace({'GwAllocationBlock': {'In Waitaki': 'A'}}, inplace=True)
# Add in the Wap info
ar1 = allo_rates1.reset_index()[['RecordNumber', 'SwAllocationBlock', 'TakeType', 'Wap', 'Rate150Day', 'Storativity', 'Combined', 'sd_cat', 'sw_vol_ratio', 'LowflowCondition']].copy()
ar2_grp = ar1.groupby(['RecordNumber', 'TakeType', 'Wap'])
ar2_rates = ar2_grp[['Rate150Day']].sum()
ar2_others = ar2_grp[['Storativity', 'Combined', 'sd_cat', 'sw_vol_ratio', 'LowflowCondition']].first()
ar3 = pd.concat([ar2_rates, ar2_others], axis=1).reset_index()
# ar3['WapCount'] = ar3.groupby(['RecordNumber', 'TakeType'])['Wap'].transform('count')
vols1 = pd.merge(av1, ar3, on=['RecordNumber', 'TakeType'])
# vols1.groupby(['RecordNumber', 'TakeType', 'Wap'])['GwAllocationBlock'].count()
grp3 = vols1.groupby(['RecordNumber', 'TakeType', 'GwAllocationBlock'])
vols1['Rate150DayAgg'] = grp3['Rate150Day'].transform('sum')
vols1['ratio'] = vols1['Rate150Day'] / vols1['Rate150DayAgg']
vols1.loc[vols1['ratio'].isnull(), 'ratio'] = 0
vols1['FullAnnualVolume'] = (vols1['FullAnnualVolume'] * vols1['ratio'])
vols1.drop(['Rate150DayAgg', 'ratio'], axis=1, inplace=True)
# vols1['FullAnnualVolume'] = (vols1['FullAnnualVolume'] * vols1['ratio'] / vols1['WapCount']).round()
# vols1.drop(['WapRateAgg', 'ratio', 'WapCount'], axis=1, inplace=True)
# Assign volumes with discount exception
# vols1 = allo_rates1.copy()
vols1['Surface Water'] = vols1['FullAnnualVolume'] * vols1['sw_vol_ratio']
vols1['Groundwater'] = vols1['FullAnnualVolume']
vols1.loc[vols1.TakeType == 'Take Surface Water', 'Groundwater'] = 0
# vols1.loc[(vols1.TakeType == 'Take Surface Water') & (vols1['Surface Water'] == 0), 'Surface Water'] = np.nan
# discount_bool = ((vols1.sd_cat == 'moderate') & (vols1.Storativity)) | ((vols1.sd_cat == 'moderate') & vols1.Combined) | (vols1.sd_cat == 'high') | (vols1.sd_cat == 'direct')
discount_bool = ((vols1.Storativity | vols1.LowflowCondition) & ((vols1.sd_cat == 'moderate') | (vols1.sd_cat == 'high') | (vols1.sd_cat == 'direct'))) | vols1.Combined
vols1.loc[discount_bool, 'Groundwater'] = vols1.loc[discount_bool, 'FullAnnualVolume'] - vols1.loc[discount_bool, 'Surface Water']
# Split the take types by SW and GW to assign the appropraite allocation block type - Put more info about why this has to happen!
sw_vols1 = vols1[vols1.TakeType == 'Take Surface Water'].copy()
gw_vols1 = vols1[vols1.TakeType == 'Take Groundwater'].copy()
sw_vols1.rename(columns={'GwAllocationBlock': 'SwAllocationBlock'}, inplace=True)
gw_vols2 = gw_vols1.set_index(['RecordNumber', 'GwAllocationBlock', 'Wap'])[['Groundwater', 'Surface Water']].stack().reset_index()
gw_vols2.rename(columns={'level_3': 'HydroGroup', 0: 'AllocatedAnnualVolume'}, inplace=True)
gw_vols3 = gw_vols2.drop_duplicates(['RecordNumber', 'HydroGroup', 'GwAllocationBlock', 'Wap'])
sw_vols2 = sw_vols1.set_index(['RecordNumber', 'SwAllocationBlock', 'Wap'])[['Groundwater', 'Surface Water']].stack().reset_index()
sw_vols2.rename(columns={'level_3': 'HydroGroup', 0: 'AllocatedAnnualVolume'}, inplace=True)
sw_vols3 = sw_vols2.drop_duplicates(['RecordNumber', 'HydroGroup', 'SwAllocationBlock', 'Wap'])
## Join SW rates to SW volumes
rv0 = pd.merge(rates3, sw_vols3, on=['RecordNumber', 'SwAllocationBlock', 'HydroGroup', 'Wap'])
rv0.rename(columns={'SwAllocationBlock': 'AllocationBlock'}, inplace=True)
## Join GW rates and GW volumes
rv1 = pd.merge(rates3, gw_vols3, on=['RecordNumber', 'HydroGroup', 'Wap'])
# Fix duplicates
rv1['Count'] = rv1.groupby(['RecordNumber', 'HydroGroup', 'SwAllocationBlock', 'Wap'])['AllocatedRate'].transform('count')
rv1['AllocatedRate'] = rv1['AllocatedRate'] / rv1['Count']
rv_grp = rv1.groupby(['RecordNumber', 'HydroGroup', 'GwAllocationBlock', 'Wap'])
rv1['Count'] = rv_grp['AllocatedRate'].transform('count')
rv1['AllocatedAnnualVolume'] = rv1['AllocatedAnnualVolume'] / rv1['Count']
# Distribute volumes according to rates
rv1['rate_ratio'] = rv1['AllocatedRate'] / rv_grp['AllocatedRate'].transform('sum')
rv1.loc[rv1['rate_ratio'].isnull(), 'rate_ratio'] = 0
rv1.loc[rv1['rate_ratio'] == np.inf, 'rate_ratio'] = 1
rv1['vol_sum'] = rv_grp['AllocatedAnnualVolume'].transform('sum')
rv1['AllocatedAnnualVolume'] = rv1['vol_sum'] * rv1['rate_ratio']
# Specify the Allocation blocks and aggregate
rv1['AllocationBlock'] = rv1['SwAllocationBlock']
rv1.loc[rv1.HydroGroup == 'Groundwater', 'AllocationBlock'] = rv1.loc[rv1.HydroGroup == 'Groundwater', 'GwAllocationBlock']
rv1.drop(['SwAllocationBlock', 'GwAllocationBlock', 'Count', 'rate_ratio', 'vol_sum'], axis=1, inplace=True)
rv1_grp = rv1.groupby(['RecordNumber', 'HydroGroup', 'AllocationBlock', 'Wap'])
rv1_sum = rv1_grp[['AllocatedRate', 'AllocatedAnnualVolume']].sum()
rv1_min = rv1_grp[['FromMonth']].min()
rv1_max = rv1_grp[['ToMonth']].max()
rv1a = pd.concat([rv1_sum, rv1_min, rv1_max], axis=1).reset_index()
## Combine the SW and GW data frames
rv2 = pd.concat([rv0, rv1a])
## Deal with the "Include in Allocation" fields
sw_allo_bool = allo_rates1.reset_index()[['RecordNumber', 'Wap', 'IncludeInSwAllocation']].drop_duplicates(['RecordNumber', 'Wap'])
gw_allo_bool = vols1[['RecordNumber', 'Wap', 'IncludeInGwAllocation']].drop_duplicates(['RecordNumber', 'Wap'])
rv2a = pd.merge(rv2, sw_allo_bool, on=['RecordNumber', 'Wap'])
rv2 = pd.merge(rv2a, gw_allo_bool, on=['RecordNumber', 'Wap'])
rv3 = rv2[(rv2.HydroGroup == 'Surface Water') | (rv2.IncludeInGwAllocation)]
rv4 = rv3[(rv3.HydroGroup == 'Groundwater') | (rv3.IncludeInSwAllocation)]
## Calculate missing volumes and rates
# ann_bool = rv4.AllocatedAnnualVolume.isnull()
# rv4.loc[ann_bool, 'AllocatedAnnualVolume'] = (rv4.loc[ann_bool, 'AllocatedRate'] * 0.001*60*60*24*30.42* (rv4.loc[ann_bool, 'ToMonth'] - rv4.loc[ann_bool, 'FromMonth'] + 1)).round()
#
# rate_bool = rv4.AllocatedRate.isnull()
# rv4.loc[rate_bool, 'AllocatedRate'] = np.floor((rv4.loc[rate_bool, 'AllocatedAnnualVolume'] / 60/60/24/30.42/ (rv4.loc[rate_bool, 'ToMonth'] - rv4.loc[rate_bool, 'FromMonth'] + 1) * 1000))
rv4 = rv4[(rv4['AllocatedAnnualVolume'] > 0) | (rv4['AllocatedRate'] > 0)].copy()
# rv4.loc[rv4['AllocatedAnnualVolume'].isnull(), 'AllocatedAnnualVolume'] = 0
# rv4.loc[rv4['AllocatedRate'].isnull(), 'AllocatedRate'] = 0
## Aggregate by crc, allo block, hydrogroup, and wap
# rv_grp = rv4.groupby(['RecordNumber', 'HydroGroup', 'AllocationBlock', 'Wap'])
# sum1 = rv_grp[['AllocatedRate', 'AllocatedAnnualVolume']].sum()
# other1 = rv_grp[['FromMonth', 'ToMonth']].first()
#
# rv4 = pd.concat([sum1, other1], axis=1).reset_index()
## Convert the rates and volumes to integers
rv4['AllocatedAnnualVolume'] = rv4['AllocatedAnnualVolume'].round().astype('int64')
rv4['AllocatedRate'] = rv4['AllocatedRate'].round().astype('int64')
## Combine with permit data
rv5 = pd.merge(rv4, permits2[['RecordNumber', 'ConsentStatus', 'ApplicationStatus', 'FromDate', 'ToDate']].drop_duplicates('RecordNumber', keep='last'), on='RecordNumber')
## Update the Waitaki use types
rv5a = pd.merge(rv5, permit_use[['RecordNumber', 'WaitakiTable5']], on='RecordNumber')
rv5a.loc[rv5a.AllocationBlock == 'In Waitaki', 'AllocationBlock'] = rv5a.loc[rv5a.AllocationBlock == 'In Waitaki', 'WaitakiTable5']
rv5b = rv5a.drop('WaitakiTable5', axis=1)
## Combine with other Wap data
waps1 = waps[['Wap', 'GwSpatialUnitId', 'SwSpatialUnitId', 'Combined']].copy()
rv6 = | pd.merge(rv5b, waps1, on='Wap') | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 30 09:52:31 2021
@author: HaoLI
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 8 11:48:41 2021
@author: HaoLI
"""
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data.sampler import WeightedRandomSampler
import torch.utils.data as data_utils
import pandas as pd
import numpy as np
import os #for working directory
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc, roc_auc_score # 计算roc和auc
import time
import datetime
from imblearn.over_sampling import RandomOverSampler
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
import random
use_gpu = torch.cuda.is_available()
print("GPU",use_gpu)
list_rec = [] #记录参数
randomseed = 22
random.seed(randomseed)
layer1=196
layer2=196
oversample_ratio=0.5
training_epochs = 80
minibatch_size = 5000
learning_rate=2e-4
penalty=2 #p=1 for L1; p=0 for L2, weight_decay only for L2 ; p=2 for default. 范数计算中的幂指数值,默认求2范数. 当p=0为L2正则化,p=1为L1正则化
weight_decay=0.0125 #weight_decay 就是 L2 正则项
dropout=0.0
#os.getcwd()
os.chdir('/Users/HaoLI/Stata/credit/data')
df = | pd.read_csv('data1210rename_use.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
from collections import namedtuple
import csv
import json
import os
import re
import sys
import pkg_resources
from zipfile import ZipFile
import requests
from tiingo.restclient import RestClient
from tiingo.exceptions import (
InstallPandasException,
APIColumnNameError,
InvalidFrequencyError,
MissingRequiredArgumentError)
try:
import pandas as pd
pandas_is_installed = True
except ImportError:
pandas_is_installed = False
VERSION = pkg_resources.get_distribution("tiingo").version
# These methods enable python 2 + 3 compatibility.
def get_zipfile_from_response(response):
if sys.version_info < (3, 0): # python 2
from StringIO import StringIO as Buffer
else: # python 3
from io import BytesIO as Buffer
buffered = Buffer(response.content)
return ZipFile(buffered)
def get_buffer_from_zipfile(zipfile, filename):
if sys.version_info < (3, 0): # python 2
from StringIO import StringIO
return StringIO(zipfile.read(filename))
else: # python 3
# Source:
# https://stackoverflow.com/questions/5627954/py3k-how-do-you-read-a-file-inside-a-zip-file-as-text-not-bytes
from io import (TextIOWrapper, BytesIO)
return TextIOWrapper(BytesIO(zipfile.read(filename)))
def dict_to_object(item, object_name):
"""Converts a python dict to a namedtuple, saving memory."""
fields = item.keys()
values = item.values()
return json.loads(json.dumps(item),
object_hook=lambda d:
namedtuple(object_name, fields)(*values))
class TiingoClient(RestClient):
"""Class for managing interactions with the Tiingo REST API
Supply API Key via Environment Variable TIINGO_API_KEY
or via the Config Object
"""
def __init__(self, *args, **kwargs):
super(TiingoClient, self).__init__(*args, **kwargs)
self._base_url = "https://api.tiingo.com"
try:
api_key = self._config['api_key']
except KeyError:
api_key = os.environ.get('TIINGO_API_KEY')
self._api_key = api_key
if not(api_key):
raise RuntimeError("Tiingo API Key not provided. Please provide"
" via environment variable or config argument.")
self._headers = {
'Authorization': "Token {}".format(api_key),
'Content-Type': 'application/json',
'User-Agent': 'tiingo-python-client {}'.format(VERSION)
}
self._frequency_pattern = re.compile('^[0-9]+(min|hour)$', re.IGNORECASE)
def __repr__(self):
return '<TiingoClient(url="{}")>'.format(self._base_url)
def _is_eod_frequency(self,frequency):
return frequency.lower() in ['daily', 'weekly', 'monthly', 'annually']
# TICKER PRICE ENDPOINTS
# https://api.tiingo.com/docs/tiingo/daily
def list_tickers(self, assetTypes=[]):
"""Return a list of dicts of metadata tickers for all supported tickers
of the specified asset type, as well as metadata about each ticker.
This includes supported date range, the exchange the ticker is traded
on, and the currency the stock is traded on.
Tickers for unrelated products are omitted.
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
"""
listing_file_url = "https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip"
response = requests.get(listing_file_url)
zipdata = get_zipfile_from_response(response)
raw_csv = get_buffer_from_zipfile(zipdata, 'supported_tickers.csv')
reader = csv.DictReader(raw_csv)
if not len(assetTypes):
return [row for row in reader]
assetTypesSet = set(assetTypes)
return [row for row in reader
if row.get('assetType') in assetTypesSet]
def list_stock_tickers(self):
return self.list_tickers(['Stock'])
def list_etf_tickers(self):
return self.list_tickers(['ETF'])
def list_fund_tickers(self):
return self.list_tickers(['Mutual Fund'])
def get_ticker_metadata(self, ticker, fmt='json'):
"""Return metadata for 1 ticker
Use TiingoClient.list_tickers() to get available options
Args:
ticker (str) : Unique identifier for stock
"""
url = "tiingo/daily/{}".format(ticker)
response = self._request('GET', url)
data = response.json()
if fmt == 'json':
return data
elif fmt == 'object':
return dict_to_object(data, "Ticker")
def _invalid_frequency(self, frequency):
"""
Check to see that frequency was specified correctly
:param frequency (string): frequency string
:return (boolean):
"""
is_valid = self._is_eod_frequency(frequency) or re.match(self._frequency_pattern, frequency)
return not is_valid
def _get_url(self, ticker, frequency):
"""
Return url based on frequency. Daily, weekly, or yearly use Tiingo
EOD api; anything less than daily uses the iex intraday api.
:param ticker (string): ticker to be embedded in the url
:param frequency (string): valid frequency per Tiingo api
:return (string): url
"""
if self._invalid_frequency(frequency):
etext = ("Error: {} is an invalid frequency. Check Tiingo API documentation "
"for valid EOD or intraday frequency format.")
raise InvalidFrequencyError(etext.format(frequency))
else:
if self._is_eod_frequency(frequency):
return "tiingo/daily/{}/prices".format(ticker)
else:
return "iex/{}/prices".format(ticker)
def get_ticker_price(self, ticker,
startDate=None, endDate=None,
fmt='json', frequency='daily'):
"""By default, return latest EOD Composite Price for a stock ticker.
On average, each feed contains 3 data sources.
Supported tickers + Available Day Ranges are here:
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
Args:
ticker (string): Unique identifier for stock ticker
startDate (string): Start of ticker range in YYYY-MM-DD format
endDate (string): End of ticker range in YYYY-MM-DD format
fmt (string): 'csv' or 'json'
frequency (string): Resample frequency
"""
url = self._get_url(ticker, frequency)
params = {
'format': fmt if fmt != "object" else 'json', # conversion local
'resampleFreq': frequency
}
if startDate:
params['startDate'] = startDate
if endDate:
params['endDate'] = endDate
# TODO: evaluate whether to stream CSV to cache on disk, or
# load as array in memory, or just pass plain text
response = self._request('GET', url, params=params)
if fmt == "json":
return response.json()
elif fmt == "object":
data = response.json()
return [dict_to_object(item, "TickerPrice") for item in data]
else:
return response.content.decode("utf-8")
def get_dataframe(self, tickers,
startDate=None, endDate=None, metric_name=None, frequency='daily'):
""" Return a pandas.DataFrame of historical prices for one or more ticker symbols.
By default, return latest EOD Composite Price for a list of stock tickers.
On average, each feed contains 3 data sources.
Supported tickers + Available Day Ranges are here:
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
or from the TiingoClient.list_tickers() method.
Args:
tickers (string/list): One or more unique identifiers for a stock ticker.
startDate (string): Start of ticker range in YYYY-MM-DD format.
endDate (string): End of ticker range in YYYY-MM-DD format.
metric_name (string): Optional parameter specifying metric to be returned for each
ticker. In the event of a single ticker, this is optional and if not specified
all of the available data will be returned. In the event of a list of tickers,
this parameter is required.
frequency (string): Resample frequency (defaults to daily).
"""
valid_columns = {'open', 'high', 'low', 'close', 'volume', 'adjOpen', 'adjHigh', 'adjLow',
'adjClose', 'adjVolume', 'divCash', 'splitFactor'}
if metric_name is not None and metric_name not in valid_columns:
raise APIColumnNameError('Valid data items are: ' + str(valid_columns))
if metric_name is None and isinstance(tickers, list):
raise MissingRequiredArgumentError("""When tickers is provided as a list, metric_name is a required argument.
Please provide a metric_name, or call this method with one ticker at a time.""")
params = {
'format': 'json',
'resampleFreq': frequency
}
if startDate:
params['startDate'] = startDate
if endDate:
params['endDate'] = endDate
if pandas_is_installed:
if type(tickers) is str:
stock = tickers
url = self._get_url(stock, frequency)
response = self._request('GET', url, params=params)
df = pd.DataFrame(response.json())
if metric_name is not None:
prices = df[metric_name]
prices.index = df['date']
else:
prices = df
prices.index = df['date']
del (prices['date'])
else:
prices = pd.DataFrame()
for stock in tickers:
url = self._get_url(stock, frequency)
response = self._request('GET', url, params=params)
df = pd.DataFrame(response.json())
df.index = df['date']
df.rename(index=str, columns={metric_name: stock}, inplace=True)
prices = pd.concat([prices, df[stock]], axis=1, sort=True)
prices.index = | pd.to_datetime(prices.index) | pandas.to_datetime |
import ast
import os
import logging
import numpy as np
import pandas as pd
logger = logging.getLogger("iocurves analysis")
def boltzman(x, xmid, tau):
"""
evaluate the boltzman function with midpoint xmid and time constant tau over x
"""
return 1./(1. + np.exp(-(x - xmid)/tau))
def sigmoid(x, x0, k):
"""
evaluate sigmoid function slope k and midpoint x0 over x
"""
y = 1/(1 + np.exp(-k*(x - x0)))
return y
# keep some results in memory for quick re-evaluation (ast can take a while)
memoised = dict()
def get_params(save_name):
"""Convert a name to python data base don a known structure.
:param save_name: str in format:
`"[file name[_KCC2]]_[synapse type]_[synapse numbers]_[syn input]_...[recording location]_[trials].txt"`
:type save_name: str
:return: file_name, synapse_type, synapse_numbers, syn_input, non_default_dict, location, trials
:rtype: (str, int, list of int, dict of str, dict, str, int)
"""
if save_name in memoised:
return memoised[save_name]
params = save_name.split('_')
if params[1] == 'KCC2':
params[0] = params[0] + '_' + params[1]
del params[1]
file_name, synapse_type, synapse_numbers, syn_input = params[:4]
location, trials = params[-2:]
non_default_dict = {"diam": None, "pa_kcc2": None}
if len(params) > 6:
extra_keys = list(non_default_dict.keys())
for i in range(4, len(params) - 2):
non_default_dict[extra_keys[i - 4]] = params[i]
# process some of the params
syn_input = syn_input.replace('=', "':").replace('(', "{'").replace(")", "}").replace(",",",'")
synapse_numbers = ast.literal_eval(synapse_numbers)
syn_input = ast.literal_eval(syn_input)
for key, value in non_default_dict.items():
try:
non_default_dict[key] = ast.literal_eval(non_default_dict[key].replace('=', "':").replace('(', "{'").replace(")", "}").replace(",",",'"))
except (ValueError, AttributeError):
# do nothing
pass
memoised[save_name] = (file_name, synapse_type, synapse_numbers, syn_input, non_default_dict, location, trials)
return file_name, synapse_type, synapse_numbers, syn_input, non_default_dict, location, trials
def get_var(df, recorded_var, mean=True):
"""
Get variable from dataframe one level deep and optionally the mean.
:param df: Recorded data over time
:type df: pd.DataFrame
:param recorded_var: Variable to retrieve from df
:type recorded_var: str
:param mean: Include the mean in the return tuple
:type mean: bool
:return: Dataframe of variable and Series of the mean for the variable for each time step
:rtype: (pd.DataFrame, pd.Series or None)
"""
all_var_columns = df.xs(recorded_var, level=1, axis=1)
if mean:
# mean over the columns
mean_val = all_var_columns.mean(axis=1)
else:
mean_val = None
return all_var_columns, mean_val
def moving_average(spike_indices, time_bin=1., backward=True):
""" Calculate the moving average filter for the spike_indices according to time_bin (in seconds).
:param spike_indices: Array of spike/no spike boolean type over time.
:type spike_indices: np.ndarray of bool
:param time_bin: Size of sliding window computation (in seconds)
:type time_bin: float
:param backward: Return values are for t-time_bin (True) or t+time_bin (False)
:type backward: bool
:return: Instantaneous firing rate for each point in time (overlapping windows)
:rtype: np.ndarray or float
"""
from src.iocurves.sim import DT
time_bin_size = int(time_bin*1000/DT)
ifr = np.cumsum(spike_indices, dtype=float)
if backward:
ifr[:-time_bin_size] = ifr[time_bin_size:] - ifr[:-time_bin_size] # backward window
# amend end (if backward window) of IFR trace to be flat
ifr[-time_bin_size:] = ifr[-time_bin_size - 1]
else:
ifr[time_bin_size:] = ifr[time_bin_size:] - ifr[:-time_bin_size] # forward window
ifr = ifr[time_bin_size - 1:]
return ifr/time_bin
def get_inst_firing_rate(spike_arr, time_bin=1., slide=True):
"""
:param spike_arr:
:type spike_arr: np.ndarray of int or list of int
:param time_bin: Size of sliding window computation (in seconds)
:type time_bin: float
:param slide:
:type slide: bool
:return:
:rtype:
"""
from src.iocurves.sim import DT
if type(spike_arr) != np.ndarray:
spike_arr = np.array(spike_arr)
spike_indices = np.diff(spike_arr) > 0
spike_indices = np.append(spike_indices, [False]) # add an element due to diff losing one
if slide:
# super fast
ifr = moving_average(spike_indices, time_bin)
else:
ifr = np.zeros(shape=len(spike_indices))
time_bin_size = int(time_bin*1000/DT)
# slower for moving average, but fast enough for non-overlapping intervals
for i in range(0, int(len(spike_indices) - time_bin_size), time_bin_size):
ifr[i:i + time_bin_size] = sum(spike_indices[i:i + time_bin_size])/time_bin
return ifr
def save_to_file(title, result):
"""Save results to either h5 file (if DataFrame), otherwise to npy file."""
from src.utils.file_io import create_dir
create_dir("results", timestamp=False)
path = os.path.join('results', title)
if type(result) is list:
np.save(path + '.npy', result)
elif type(result) is pd.DataFrame:
result.to_hdf(path, 'table')
else:
raise RuntimeError(f"saving failed for {result}")
logger.info("saved")
def load_from_file(title):
"""Load results from a file. Can be DataFrame or numpy arrays."""
path = os.path.join('results', title)
try:
return pd.read_hdf(path, 'table')
except IOError:
try:
return np.load(path + ".npy", allow_pickle=True)
except OSError:
return None
def get_data(cl_state_trials, ifr_windowsize, time_points, var='spikes'):
from src.iocurves.sim import DT
FRdf = | pd.DataFrame(index=time_points) | pandas.DataFrame |
import os
import math
import numpy as np
import collections
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib.ticker as ticker
from collections.abc import Iterable
import stethoscope.plotting_constants as plotting_constants
def _roundup(x):
return int(math.ceil(x / 100) * 100)
class UtilizationLineGraph:
FILENAME = 'ts_line_utilization.png'
@classmethod
def plot(cls : type,
utilization_per_service : dict,
simulation_start : pd.Timestamp,
simulation_end : pd.Timestamp,
resolution : pd.Timedelta = pd.Timedelta(1000, unit = 'ms'),
figures_dir = None):
"""
Line graph (x axis - time) of the service system resources utilization,
separately for each service-resource pair
"""
utilization_regionalized = collections.defaultdict(lambda: collections.defaultdict(dict))
for service_name, utilization_per_region in utilization_per_service.items():
for region_name, utilization_per_resource in utilization_per_region.items():
utilization_regionalized[region_name][service_name] = utilization_per_resource
for region_name, utilization_per_service in utilization_regionalized.items():
if len(utilization_per_service) > 0:
fig, axs = plt.subplots(nrows = plotting_constants.SYSTEM_RESOURCES_CNT,
ncols = len(utilization_per_service),
figsize = (len(utilization_per_service) * plotting_constants.SQUARE_PLOT_SIDE_INCH,
plotting_constants.SYSTEM_RESOURCES_CNT * plotting_constants.SQUARE_PLOT_SIDE_INCH))
font = {'color': 'black', 'weight': 'bold', 'size': 12}
if not isinstance(axs, Iterable):
axs = np.asarray([axs])
if len(utilization_per_service) == 1:
axs = np.asarray([axs]).T
i = 0
for service_name, utilization_per_resource in utilization_per_service.items():
j = 0
for resource_name, utilization_ts in utilization_per_resource.items():
index_full = pd.date_range(simulation_start, simulation_end, freq = resolution)
utilization_ts_full = pd.DataFrame({'value': [0.0] * len(index_full)}, index = index_full)
utilization_ts.index = | pd.to_datetime(utilization_ts.index) | pandas.to_datetime |
import json
import pandas as pd
from objects.folder import Folder
from objects.mapping import Mapping
from objects.source import Source
from objects.target import Target
from objects.target_field import TargetField
from objects.source_field import SourceField
from objects.transformation import Transformation
from objects.transformation_field import TransformationField
from objects.connector import Connector
class FileProcess(object):
def __init__(self, input_file: str = None, output_folder: str = None):
self.input_file = input_file
self.output_folder = output_folder
def _get_folders(self, data: dict):
# Data folder informations
powermart_object = data['POWERMART']
repository_object = powermart_object['REPOSITORY']
repository_version = powermart_object['REPOSITORY_VERSION']
repository_name = repository_object['NAME']
folder_object = repository_object['FOLDER']
database_type = repository_object['DATABASETYPE']
folders = []
for _folder in folder_object:
folder_name = _folder['NAME']
# sources
sources = self._get_sources(_folder)
# targets
targets = self._get_targets(_folder)
# mappings
mappings = self._get_mappings(_folder)
folder = Folder(repository_version, repository_name, folder_name, database_type, sources=sources,
mappings=mappings,
targets=targets)
folders.append(folder)
return folders
def _get_sources(self, folder: dict):
sources = []
if not folder.get('SOURCE'):
return sources
if isinstance(folder.get('SOURCE'), list):
for _source in folder['SOURCE']:
source_fields = self._get_source_fields(_source)
source = Source(_source['DATABASETYPE'], _source['DBDNAME'], _source['NAME'], _source['OWNERNAME'],
source_fields, folder['NAME'])
sources.append(source)
elif isinstance(folder.get('SOURCE'), dict):
source_fields = self._get_source_fields(folder['SOURCE'])
source = Source(folder['SOURCE']['DATABASETYPE'], folder['SOURCE']['DBDNAME'], folder['SOURCE']['NAME'],
folder['SOURCE']['OWNERNAME'], source_fields, folder['NAME'])
sources.append(source)
return sources
def _get_source_fields(self, source: dict):
source_fields = []
if not source.get('SOURCEFIELD'):
return source_fields
if isinstance(source.get('SOURCEFIELD'), list):
for _source_field in source['SOURCEFIELD']:
source_field = SourceField(_source_field['DATATYPE'], _source_field['NAME'],
_source_field['NULLABLE'], _source_field['KEYTYPE'], source['NAME'],
_source_field['PRECISION'])
source_fields.append(source_field)
elif isinstance(source.get('SOURCEFIELD'), dict):
source_field = SourceField(source['SOURCEFIELD']['DATATYPE'], source['SOURCEFIELD']['NAME'],
source['SOURCEFIELD']['NULLABLE'], source['SOURCEFIELD']['KEYTYPE'],
source['NAME'], source['SOURCEFIELD']['PRECISION'])
source_fields.append(source_field)
return source_fields
def _get_transformation_fields(self, transformation: dict):
transform_fields = []
if not transformation.get('TRANSFORMFIELD'):
return transform_fields
if isinstance(transformation.get('TRANSFORMFIELD'), list):
for _transform_field in transformation['TRANSFORMFIELD']:
transform_field = TransformationField(_transform_field['DATATYPE'],
_transform_field['NAME'],
_transform_field['PORTTYPE'],
_transform_field['DEFAULTVALUE'],
_transform_field['PRECISION'],
transformation['NAME'],
_transform_field.get('EXPRESSION'),
transformation['MAPPING_NAME'])
transform_fields.append(transform_field)
elif isinstance(transformation.get('TRANSFORMFIELD'), dict):
transform_field = TransformationField(transformation['TRANSFORMFIELD']['DATATYPE'],
transformation['TRANSFORMFIELD']['NAME'],
transformation['TRANSFORMFIELD']['PORTTYPE'],
transformation['TRANSFORMFIELD']['DEFAULTVALUE'],
transformation['TRANSFORMFIELD']['PRECISION'],
transformation['NAME'],
transformation['TRANSFORMFIELD'].get('EXPRESSION'),
transformation['MAPPING_NAME'])
transform_fields.append(transform_field)
return transform_fields
def _get_transformations(self, mapping: dict):
transformations = []
if not mapping.get('TRANSFORMATION'):
return transformations
if isinstance(mapping.get('TRANSFORMATION'), list):
for _transformation in mapping['TRANSFORMATION']:
_transformation['MAPPING_NAME'] = mapping['NAME']
transformation_fields = self._get_transformation_fields(_transformation)
transformation_sql = self._get_query(_transformation)
transformation = Transformation(_transformation['NAME'],
transformation_fields, transformation_sql,
mapping['NAME'])
transformations.append(transformation)
elif isinstance(mapping.get('TRANSFORMATION'), dict):
mapping['TRANSFORMATION']['MAPPING_NAME'] = mapping['NAME']
transformation_fields = self._get_transformation_fields(mapping['TRANSFORMATION'])
transformation_sql = self._get_query(mapping['TRANSFORMATION'])
transformation = Transformation(mapping['TRANSFORMATION']['NAME'],
transformation_fields, transformation_sql,
mapping['NAME'])
transformations.append(transformation)
return transformations
def _get_query(self, transformation):
if isinstance(transformation.get('TABLEATTRIBUTE'), list):
for _table_attribute in transformation['TABLEATTRIBUTE']:
if _table_attribute['NAME'] == "Sql Query":
return _table_attribute['VALUE']
return ""
def _get_targets(self, folder: dict):
targets = []
if not folder.get('TARGET'):
return targets
if isinstance(folder.get('TARGET'), list):
for _target in folder['TARGET']:
target_fields = self._get_target_fields(_target)
target = Target(_target['NAME'], _target['DATABASETYPE'], target_fields, folder['NAME'])
targets.append(target)
elif isinstance(folder.get('TARGET'), dict):
target_fields = self._get_target_fields(folder['TARGET'])
target = Target(folder['TARGET']['NAME'], folder['TARGET']['DATABASETYPE'], target_fields, folder['NAME'])
targets.append(target)
return targets
def _get_target_fields(self, target: dict):
target_fields = []
if not target.get('TARGETFIELD'):
return target_fields
if isinstance(target.get('TARGETFIELD'), list):
for _target_field in target['TARGETFIELD']:
target_field = TargetField(_target_field['DATATYPE'], _target_field['NAME'], _target_field['NULLABLE'],
_target_field['KEYTYPE'], target['NAME'], _target_field['PRECISION'])
target_fields.append(target_field)
elif isinstance(target.get('TARGETFIELD'), dict):
# data_type, name, nullable, key_type, precision
target_field = TargetField(target['TARGETFIELD']['DATATYPE'], target['TARGETFIELD']['NAME'],
target['TARGETFIELD']['NULLABLE'], target['TARGETFIELD']['KEYTYPE'],
target['NAME'], target['TARGETFIELD']['PRECISION'])
target_fields.append(target_field)
return target_fields
def _get_session_name(self, folder: dict, mapping_name: str):
if isinstance(folder.get('SESSION'), list):
for session in folder['SESSION']:
if session['MAPPINGNAME'] == mapping_name:
return session['NAME']
elif isinstance(folder.get('SESSION'), dict):
if folder['SESSION']['MAPPINGNAME'] == mapping_name:
return folder['SESSION']['NAME']
return None
def _get_task_name(self, task_instance: dict):
if not task_instance['NAME']:
return None
if 's_' in task_instance['TASKNAME'] and task_instance['TASKTYPE'] == 'Session':
return task_instance['TASKNAME']
return None
def _get_workflow_name(self, folder: dict, session_name: str):
if isinstance(folder.get('WORKFLOW'), list):
for workflow in folder['WORKFLOW']:
if isinstance(workflow['TASKINSTANCE'], list):
for task_instance in workflow['TASKINSTANCE']:
task_name = self._get_task_name(task_instance)
if task_name == session_name:
return workflow['NAME']
elif isinstance(workflow['TASKINSTANCE'], dict):
task_name = self._get_task_name(workflow['TASKINSTANCE'])
if task_name == session_name:
return workflow['NAME']
elif isinstance(folder.get('WORKFLOW'), dict):
if isinstance(folder['WORKFLOW']['TASKINSTANCE'], list):
for task_instance in folder['WORKFLOW']['TASKINSTANCE']:
task_name = self._get_task_name(task_instance)
if task_name:
return task_name
elif isinstance(folder['WORKFLOW']['TASKINSTANCE'], dict):
task_name = self._get_task_name(folder['WORKFLOW']['TASKINSTANCE'])
if task_name:
return task_name
return None
def _get_mappings(self, folder: dict):
mappings = []
if not folder.get('MAPPING'):
return mappings
if isinstance(folder.get('MAPPING'), list):
for _mapping in folder['MAPPING']:
session_name = self._get_session_name(folder, _mapping['NAME'])
workflow_name = self._get_workflow_name(folder, session_name)
connectors = self._get_connectors(_mapping)
transformations = self._get_transformations(_mapping)
mapping = Mapping(_mapping['NAME'], connectors, transformations, folder['NAME'], session_name,
workflow_name)
mappings.append(mapping)
elif isinstance(folder.get('MAPPING'), dict):
session_name = self._get_session_name(folder, folder['MAPPING']['NAME'])
workflow_name = self._get_workflow_name(folder, session_name)
connectors = self._get_connectors(folder['MAPPING'])
transformations = self._get_transformations(folder['MAPPING'])
mapping = Mapping(folder['MAPPING']['NAME'], connectors, transformations, folder['NAME'], session_name,
workflow_name)
mappings.append(mapping)
return mappings
def _get_connectors(self, mapping: dict):
connectors = []
if not mapping.get('CONNECTOR'):
return connectors
if isinstance(mapping.get('CONNECTOR'), list):
for _connector in mapping['CONNECTOR']:
connector = Connector(_connector['FROMFIELD'], _connector['FROMINSTANCE'],
_connector['FROMINSTANCETYPE'], _connector['TOFIELD'], _connector['TOINSTANCE'],
_connector['TOINSTANCETYPE'], mapping['NAME'])
connectors.append(connector)
elif isinstance(mapping.get('MAPPING'), dict):
connector = Connector(mapping['MAPPING']['FROMFIELD'], mapping['MAPPING']['FROMINSTANCE'],
mapping['MAPPING']['FROMINSTANCETYPE'], mapping['MAPPING']['TOFIELD'],
mapping['MAPPING']['TOINSTANCE'],
mapping['MAPPING']['TOINSTANCETYPE'],
mapping['NAME'])
connectors.append(connector)
return connectors
def _parser(self, input_file):
with open(input_file, 'r') as data_file:
data = json.loads(data_file.read())
return data
def _write_files(self, data_frames: list, names_sheets: list, writer):
if len(data_frames) == len(names_sheets):
ValueError("Dataframes and names sheets must be equal lengths")
for df, sheet_name in zip(data_frames, names_sheets):
# Write each dataframe to a different worksheet.
df.to_excel(writer, sheet_name=sheet_name)
def generate_documentation(self, folders: list):
for folder in folders:
dfs, sheets = [], []
# Sources
sources = [s.to_dict() for s in folder.sources]
source_fields = []
for s in sources:
source_fields.extend(s.pop('source_fields', None))
dfs.append(pd.DataFrame(sources))
sheets.append("Sources")
# source fields
source_fields = [sf.to_dict() for sf in source_fields]
dfs.append(pd.DataFrame(source_fields))
sheets.append("Source Fields")
# targets
targets = [t.to_dict() for t in folder.targets]
target_fields = []
for t in targets:
target_fields.extend(t.pop('target_fields', None))
dfs.append(pd.DataFrame(targets))
sheets.append("Targets")
# target fields
target_fields = [tf.to_dict() for tf in target_fields]
dfs.append(pd.DataFrame(target_fields))
sheets.append("Target Fields")
# mappings
mappings = [m.to_dict() for m in folder.mappings]
connectors = []
transformations = []
for m in mappings:
connectors.extend(m.pop('connectors', None))
transformations.extend(m.pop('transformations', None))
dfs.append(pd.DataFrame(mappings))
sheets.append("Mappings")
# connectors
connectors = [c.to_dict() for c in connectors]
dfs.append(pd.DataFrame(connectors))
sheets.append("Connectors")
# transformations
transformations = [t.to_dict() for t in transformations]
transformation_fields = []
for t in transformations:
transformation_fields.extend(t.pop('transformation_fields', None))
dfs.append( | pd.DataFrame(transformations) | pandas.DataFrame |
import sys
import random as rd
import matplotlib
#matplotlib.use('Agg')
matplotlib.use('TkAgg') # revert above
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import os
import numpy as np
import glob
from pathlib import Path
from scipy.interpolate import UnivariateSpline
from scipy.optimize import curve_fit
import pickle
import pandas as pd
from findiff import FinDiff
from scipy.stats import chisquare
from scipy.stats import spearmanr
def powlaw(x, a, b) :
return np.power(10,a) * np.power(x, b)
def linlaw(x, a, b) :
return a + x * b
def curve_fit_log(xdata, ydata, sigma):
"""Fit data to a power law with weights according to a log scale"""
# Weights according to a log scale
# Apply fscalex
xdata_log = np.log10(xdata)
# Apply fscaley
ydata_log = np.log10(ydata)
sigma_log = np.log10(sigma)
# Fit linear
popt_log, pcov_log = curve_fit(linlaw, xdata_log, ydata_log,
sigma=sigma_log)
#print(popt_log, pcov_log)
# Apply fscaley^-1 to fitted data
ydatafit_log = np.power(10, linlaw(xdata_log, *popt_log))
# There is no need to apply fscalex^-1 as original data is already available
return (popt_log, pcov_log, ydatafit_log)
def big_data_plotter(data_frame, x_name, y_name, index, ax, label, colour, style, lw, figsize):
# plts big_data.data
data_table = data_frame['dfs'][index]
# print(y_name, x_name)
return data_table.plot(ax=ax, kind='line', x=x_name, y=y_name, label=label,
c=colour, style=style, lw=lw, figsize=figsize)
def clipped_h_data_plotter(data_frame, index):
# plts big_data.data
h_data = data_frame['dfs'][index]['Height [Mm]'].dropna()
x = h_data.index.values
k = 3 # 5th degree spline
n = len(h_data)
s = 1#n - np.sqrt(2*n) # smoothing factor
spline_1 = UnivariateSpline(x, h_data, k=k, s=s).derivative(n=1)
sign_change_indx = np.where(np.diff(np.sign(spline_1(x))))[0]
if len(sign_change_indx)>1:
sign_change_indx = sign_change_indx[1]
else:
sign_change_indx = len(h_data)
return x[:sign_change_indx], h_data[:sign_change_indx]
def ballistic_flight(v0, g, t):
# assumes perfectly verticle launch and are matching units
# v0-initial velocity
# g-gravitational acceleration
# t-np time array
x = v0*t
y = v0*t-0.5*g*t**2
y = np.where(y<0,0,y)
t_apex = v0/g
x_apex = v0*t_apex
y_apex = v0*t_apex-0.5*g*(t_apex)**2
return x, y, t_apex, x_apex, y_apex
degree_sign= u'\N{DEGREE SIGN}'
i = 0
shuff = 0
SMALL_SIZE = 42
MEDIUM_SIZE = SMALL_SIZE + 2
BIGGER_SIZE = MEDIUM_SIZE + 2
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=26) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
#path_2_shared_drive = '/run/user/1001/gvfs/smb-share:server=uosfstore.shef.ac.uk,share=shared/mhd_jet1/User/smp16fm/j'
path_2_shared_drive = '/run/user/1000/gvfs/smb-share:server=uosfstore.shef.ac.uk,share=shared/mhd_jet1/User/smp16fm/j'
#dir_paths = glob.glob('data/*')
##data set for paper
#dir_paths = glob.glob('big_data/run1/*')
# run2 is standard jet runs
#dir_paths = glob.glob('big_data/run2/*')
# how to read pickels
#max_h_data_set = pd.read_pickle(dir_paths[1])
#big_data_set = pd.read_pickle(dir_paths[0])
#dir_paths_max_h = glob.glob('sharc_run/jet_B60_A60_T*/max_h_data*')
#dir_paths_big_data = glob.glob('sharc_run/jet_B60_A60_T*/big_data*')
#ne
dir_paths_max_h = glob.glob('sharc_run/new_pscan/*/max_h_data*')
dir_paths_big_data = glob.glob('sharc_run/new_pscan/*/big_data*')
#dir_paths_max_h = glob.glob('sharc_run/tilt_scan/*/max_h_data*')
#dir_paths_big_data = glob.glob('sharc_run/tilt_scan/*/big_data*')
#dir_paths_max_h = glob.glob('sharc_run/sj_tilt_Scan/*/max_h_data*')
#dir_paths_big_data = glob.glob('sharc_run/sj_tilt_Scan/*/big_data*')
dummy_max_h0 = []
max_h_data_set = []
dummy_bd0 = []
big_data_set = []
dummy_max_h0 = pd.read_pickle(dir_paths_max_h[0])
dummy_bd0 = pd.read_pickle(dir_paths_big_data[0])
# Silly fix for badly saved data.
# data files are being appended each run, need to fix this in other script
keep_indx = 1
if len(dummy_max_h0)>1:
# dummy_max_h0 = dummy_max_h0.drop([0])
for clip_idx in dummy_max_h0.index:
if clip_idx == keep_indx:
pass
else:
dummy_max_h0 = dummy_max_h0.drop([clip_idx])
if len(dummy_bd0)>1:
# dummy_bd0 = dummy_bd0.drop([0])
for clip_idx in dummy_bd0.index:
if clip_idx == keep_indx:
pass
else:
dummy_bd0 = dummy_bd0.drop([clip_idx])
first_append = True
for i in range(1,len(dir_paths_max_h)):
dummy_max_h = pd.read_pickle(dir_paths_max_h[i])
dummy_bd = pd.read_pickle(dir_paths_big_data[i])
if len(dummy_max_h)>1:
dummy_max_h = dummy_max_h.drop([0])
for clip_idx in dummy_max_h.index:
if clip_idx == keep_indx:
pass
else:
dummy_max_h = dummy_max_h.drop([clip_idx])
if len(dummy_bd)>1:
# dummy_bd = dummy_bd.drop([0])
for clip_idx in dummy_bd.index:
if clip_idx == keep_indx:
pass
else:
dummy_bd = dummy_bd.drop([clip_idx])
if first_append == True:
first_append = False
max_h_data_set = dummy_max_h0.append(dummy_max_h,ignore_index=True)
big_data_set = dummy_bd0.append(dummy_bd,ignore_index=True)
else:
max_h_data_set = max_h_data_set.append(dummy_max_h,ignore_index=True)
big_data_set = big_data_set.append(dummy_bd,ignore_index=True)
max_h_data_set = max_h_data_set.sort_values(by=['Tilt [deg]'])#.reset_index(drop=True)
order = max_h_data_set.index
max_h_data_set = max_h_data_set.reset_index(drop=True)
big_data_set = big_data_set.reindex(order).reset_index(drop=True)
# constants
unit_length = 1e9 # cm
DOMIAN = [5*unit_length, 3*unit_length]
unit_temperature = 1e6 # K
unit_numberdensity = 1e9 # cm^-3
g_cm3_to_kg_m3 = 1e3
dyne_cm2_to_Pa = 1e-1
cm_to_km = 1e-5
m_to_km = 1e-3
km_to_Mm = 1e-3
cm_to_Mm = 1e-8
s_to_min = 1/60
earth_g = 9.80665 #m s-2
sun_g = 28.02*earth_g*m_to_km # km s-2
unit_density = 2.3416704877999998E-015
unit_velocity = 11645084.295622544
unit_pressure = 0.31754922400000002
unit_magenticfield = 1.9976088799077159
unit_time = unit_length/unit_velocity
# I messed up time scaling on data collection
TIME_CORRECTION_FACTOR = 10/unit_time
unit_mass = unit_density*unit_length**3
unit_specific_energy = (unit_length/unit_time)**2
# options
# IMPORTANT TO CHANGE dt
dt = unit_time/20
#dt = unit_time/200 # high dt
plot_h_vs_t = True
all_data = True # plotss all data as suppose to small selection
plot_w_vs_t = False
plot_error_bars = False
plot_hmax_vs_B = False #
plot_hmax_vs_A = False #
plot_mean_w_vs_tilt = True
power_law_fit = False
plot_hmax_vs_dt = False
data_check = False
interp_check = False # Doesnt work well enough for my purposes
diff_check = False
sf = [0.55, 0.55, 0.5, 0.5]
plot_mean_w_vs_BAdt = False
test_balstic = False
Decelleration_analysis = False
c_data = True
jet_word_search = 'jet_P300_B60_A60_T*/*data.csv'
jl_jet_word_search = 'jet_P300_B60_A60_T*/*jl.csv'
apex_vs_tile = True
plot_cdata_LA = True
quad_plot = True
quad_plot_cdata = True
lw = 3# 2.5#
xliml11, xlimu11 = 15, 125
yliml11, ylimu11 = 1, 9
#xliml21, xlimu21 =
yliml21, ylimu21 = 1, 12
#xliml21, xlimu21 =
#yliml21, ylimu21 =
#xliml22, xlimu22 =
yliml22, ylimu22 = 100, 1800
# max_h_data_set.plot(x ='amplitude [km s-1]', y='max height [Mm]', kind = 'scatter')
# test = [ind for ind, i in enumerate(big_data_set['idx']) if sum(i-[50, 60, 20])==0]
#print(big_data_set['dfs'][test[0]])
#name = "tab20c"
#cmap = get_cmap(name) # type: matplotlib.colors.ListedColormap
#colors = cmap.colors # type: list
colors = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77', '#CC6677', '#882255', '#AA4499']
styles = ['-', '--', '-.', ':','-', '--', '-.', ':','-']
styles_alt = ['-', '--', '-.', ':']
#default_cycler = (cycler(color=colors) +
# cycler(linestyle=styles))
#plt.rc('axes', prop_cycle=default_cycler)
plt.rc('lines', linewidth=lw)
#list_of_indexs = [[300,20,80],[200,20,80],[50,20,80],[300,80,80],[300,100,80]]
#list_of_indexs = [[20],[40],[60],[80]]
list_of_indexs = [[300,40]]
fig_len_h_comp=plt.figure(figsize=(60,40))
gs=GridSpec(2,2) # 2 rows, 2 columns
lhc_ax1 = fig_len_h_comp.add_subplot(gs[:,0])
lhc_ax2 = fig_len_h_comp.add_subplot(gs[0,1])
lhc_ax3 = fig_len_h_comp.add_subplot(gs[1,1], sharex=lhc_ax2)
if diff_check == True:
id_no = 42
driver_time = big_data_set['idx'][id_no][0]
h_data = big_data_set['dfs'][id_no]['Height [Mm]'].dropna()
t_data = big_data_set['dfs'][id_no]['time [s]'].dropna()
time_stop_index = np.argmin(abs(t_data-driver_time))
x = h_data.index.values
dx = x[1] - x[0]
d_dx = FinDiff(0, dx)
d2_dx2 = FinDiff(0, dx, 2)
dh_dx = d_dx(h_data)
d2h_dx2 = d2_dx2(h_data)
mean = d2h_dx2[:time_stop_index].mean()
std = d2h_dx2[:time_stop_index].std()
sigma = 1
range_of_vales = [mean-sigma*std,mean+sigma*std]
test = d2h_dx2-mean
step = np.hstack((np.ones(len(test)), -1*np.ones(len(test))))
dary_step = np.convolve(test, step, mode='valid')
step_indx = np.argmax(dary_step)
clip_indx = np.argwhere((d2h_dx2>range_of_vales[0]) & (d2h_dx2<range_of_vales[1]))
clip_indx = clip_indx.reshape(len(clip_indx))
clip_data = h_data[clip_indx]
print(mean,std,range_of_vales)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t_data, h_data, 'bo', ms=2, label='data')
ax.plot(t_data, dh_dx, 'r', label='1st order derivative')
# ax.plot(t_data[:time_stop_index], d2h_dx2[:time_stop_index], 'b', label='2nd order derivative clip')
# ax.plot(t_data[clip_indx], d2h_dx2[clip_indx], 'g--', label='2nd order derivative')
# ax.plot(t_data[clip_indx], clip_data[clip_indx], 'orange', label='new curve')
ax.legend(loc='best')
plt.show()
if interp_check == True:
# id_no = 22
id_no = 0
h_data = big_data_set['dfs'][id_no]['Height [Mm]'].dropna()
x = h_data.index.values
k = 3 # 5th degree spline
n = len(h_data)
s = 1#n - np.sqrt(2*n) # smoothing factor
spline_0 = UnivariateSpline(x, h_data, k=k, s=s)
spline_1 = UnivariateSpline(x, h_data, k=k, s=s).derivative(n=1)
# spline_2 = UnivariateSpline(x, h_data, k=k, s=s).derivative(n=2)
sign_change_indx = np.where(np.diff(np.sign(spline_1(x))))[0]
if len(sign_change_indx)>1:
sign_change_indx = sign_change_indx[1]
else:
sign_change_indx = len(h_data)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(h_data, 'bo', ms=2, label='data')
ax.plot(x, spline_0(x), 'k', label='5th deg spline')
# ax.plot(x, spline_1(x), 'r', label='1st order derivative')
ax.plot(x[:sign_change_indx], h_data[:sign_change_indx])
# ax.plot(x, spline_2(x), 'g', label='2nd order derivative')
ax.legend(loc='best')
plt.show()
spear_list = []
decell_array = []
vmax_array = []
predicted_decell_array = []
if plot_cdata_LA:
fig, ax = plt.subplots(figsize=(20,12))
path2_c_data = glob.glob('sharc_run/c_data/'+jl_jet_word_search)
tilt_nb = [int(path2_c_data[i].split('_')[-3][1:]) for i in range(len(path2_c_data))]
tilt_order_index = np.argsort(tilt_nb)
path2_c_data = [path2_c_data[i] for i in tilt_order_index]
tilt_nb = [tilt_nb[i] for i in tilt_order_index]
jet_length_max = []
tilt_deg = []
i=0
for cdex, cdata_name in enumerate(path2_c_data):
i = cdex % len(colors)
j = cdex % len(styles_alt)
# if i ==0: styles_alt = [styles_alt[-1]]+styles_alt[:-1]
dumb_file = | pd.read_csv(cdata_name) | pandas.read_csv |
import time
import copy
import pandas as pd
import networkx as nx
from fup.core.manager import Manager
from fup.core.functions import get_module_blueprints, get_blueprint
import fup.profiles
import fup.modules
def overwrite_config(a, b):
for key in b:
if isinstance(a.get(key), dict) and isinstance(b.get(key), dict):
overwrite_config(a[key], b[key])
else:
a[key] = b[key]
def get_sorted_module_names(modules):
G = nx.DiGraph()
G.add_node("root")
for module_name, module in modules.items():
if module.run_end_of_year:
continue
G.add_node(module_name)
for dep_name in module.depends_on_modules:
G.add_edge(dep_name, module_name)
# FIXME theoretical end_of_year module can appear here, too.
# FIXME instead add psedo end of year module and add dependencies
for modify_name in module.modifies_modules:
G.add_edge(module_name, modify_name)
# check for loop
try:
cycle = nx.find_cycle(G, orientation="original")
raise Exception(f"Dependency loop found: {cycle}")
except nx.NetworkXNoCycle:
pass
# add root dependencies
for node in G.nodes():
if node == "root":
continue
if len(G.in_edges(node)) < 1:
G.add_edge("root", node)
# Traverse Graph
sorted_module_names = list(reversed(list(nx.dfs_postorder_nodes(G, source="root"))))[1:]
return sorted_module_names
# TODO put this method somewhere else, where??!
def get_sorted_module_blueprints(config):
# imports Manager -> no cyclic imports!
module_blueprints = get_module_blueprints(config=config, root_module=fup.modules)
profile_blueprint = get_blueprint(config=config["profile"], root_module=fup.profiles)
# dry run to get dependencies
manager = Manager(config=config,
module_blueprints=module_blueprints,
profile_blueprint=profile_blueprint,
current_account_name="CurrentAccount")
manager.dependency_check()
sorted_module_names = get_sorted_module_names(modules=manager.modules)
sorted_modules = []
for module_name in sorted_module_names:
sorted_modules += [m for m in module_blueprints if m.name == module_name]
# FIXME remove special treatment of end of year
for module_name, module in manager.modules.items():
if module.run_end_of_year:
sorted_modules += [m for m in module_blueprints if m.name == module_name]
return sorted_modules
def get_start_values(config):
config = copy.deepcopy(config)
config["simulation"]["random"] = False
config["modules"]["main.environment.Inflation"]["inflation_mean"] = 1
sorted_module_blueprints = get_sorted_module_blueprints(config=config)
profile_blueprint = get_blueprint(config=config["profile"], root_module=fup.profiles)
manager = fup.core.manager.Manager(config=config,
module_blueprints=sorted_module_blueprints,
profile_blueprint=profile_blueprint,
current_account_name="CurrentAccount")
manager.next_year()
rows = []
for module_name in manager.modules:
rows += [manager.modules[module_name].info]
return pd.DataFrame(rows)
def run_simulations(config, runs=100, debug=False):
time_start = time.time()
sorted_module_blueprints = get_sorted_module_blueprints(config)
profile_blueprint = get_blueprint(config=config["profile"], root_module=fup.profiles)
dfs = []
stats = []
for i in range(runs):
manager = fup.core.manager.Manager(config=config,
module_blueprints=sorted_module_blueprints,
profile_blueprint=profile_blueprint,
current_account_name="CurrentAccount")
rows = []
for i_year in range(config["simulation"]["end_year"] - config["simulation"]["start_year"]):
manager.next_year()
rows += [manager.df_row]
df = pd.DataFrame(rows)
df["run"] = i
# tax correction
df["expenses_net"] = df["expenses"] - df["tax"] - df["tax_offset"]
df["income_net"] = df["income"] - df["tax"] - df["tax_offset"]
# Inflation corrected
df["expenses_net_cor"] = df["expenses_net"] / df["total_inflation"]
df["income_net_cor"] = df["income_net"] / df["total_inflation"]
df["assets_cor"] = df["assets"] / df["total_inflation"]
dfs += [df]
# TODO implement me stats += [manager.get_stats()]
df = pd.concat(dfs)
df_stats = | pd.DataFrame(stats) | pandas.DataFrame |
# coding: utf8
import torch
import numpy as np
import os
import warnings
import pandas as pd
from time import time
import logging
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from sklearn.utils import column_or_1d
import scipy.sparse as sp
from clinicadl.tools.deep_learning.iotools import check_and_clean
from clinicadl.tools.deep_learning import EarlyStopping, save_checkpoint
#####################
# CNN train / test #
#####################
def train(model, train_loader, valid_loader, criterion, optimizer, resume, log_dir, model_dir, options, logger=None):
"""
Function used to train a CNN.
The best model and checkpoint will be found in the 'best_model_dir' of options.output_dir.
Args:
model: (Module) CNN to be trained
train_loader: (DataLoader) wrapper of the training dataset
valid_loader: (DataLoader) wrapper of the validation dataset
criterion: (loss) function to calculate the loss
optimizer: (torch.optim) optimizer linked to model parameters
resume: (bool) if True, a begun job is resumed
log_dir: (str) path to the folder containing the logs
model_dir: (str) path to the folder containing the models weights and biases
options: (Namespace) ensemble of other options given to the main script.
logger: (logging object) writer to stdout and stderr
"""
from tensorboardX import SummaryWriter
from time import time
if logger is None:
logger = logging
columns = ['epoch', 'iteration', 'time',
'balanced_accuracy_train', 'loss_train',
'balanced_accuracy_valid', 'loss_valid']
if hasattr(model, "variational") and model.variational:
columns += ["kl_loss_train", "kl_loss_valid"]
filename = os.path.join(os.path.dirname(log_dir), 'training.tsv')
if not resume:
check_and_clean(model_dir)
check_and_clean(log_dir)
results_df = pd.DataFrame(columns=columns)
with open(filename, 'w') as f:
results_df.to_csv(f, index=False, sep='\t')
options.beginning_epoch = 0
else:
if not os.path.exists(filename):
raise ValueError('The training.tsv file of the resumed experiment does not exist.')
truncated_tsv = pd.read_csv(filename, sep='\t')
truncated_tsv.set_index(['epoch', 'iteration'], inplace=True)
truncated_tsv.drop(options.beginning_epoch, level=0, inplace=True)
truncated_tsv.to_csv(filename, index=True, sep='\t')
# Create writers
writer_train = SummaryWriter(os.path.join(log_dir, 'train'))
writer_valid = SummaryWriter(os.path.join(log_dir, 'validation'))
# Initialize variables
best_valid_accuracy = -1.0
best_valid_loss = np.inf
epoch = options.beginning_epoch
model.train() # set the model to training mode
train_loader.dataset.train()
early_stopping = EarlyStopping('min', min_delta=options.tolerance, patience=options.patience)
mean_loss_valid = None
t_beginning = time()
while epoch < options.epochs and not early_stopping.step(mean_loss_valid):
logger.info("Beginning epoch %i." % epoch)
model.zero_grad()
evaluation_flag = True
step_flag = True
tend = time()
total_time = 0
for i, data in enumerate(train_loader, 0):
t0 = time()
total_time = total_time + t0 - tend
if options.gpu:
imgs, labels = data['image'].cuda(), data['label'].cuda()
else:
imgs, labels = data['image'], data['label']
if hasattr(model, "variational") and model.variational:
z, mu, std, train_output = model(imgs)
kl_loss = kl_divergence(z, mu, std)
loss = criterion(train_output, labels) + kl_loss
else:
train_output = model(imgs)
loss = criterion(train_output, labels)
# Back propagation
loss.backward()
del imgs, labels
if (i + 1) % options.accumulation_steps == 0:
step_flag = False
optimizer.step()
optimizer.zero_grad()
del loss
# Evaluate the model only when no gradients are accumulated
if options.evaluation_steps != 0 and (i + 1) % options.evaluation_steps == 0:
evaluation_flag = False
_, results_train = test(model, train_loader, options.gpu, criterion)
mean_loss_train = results_train["total_loss"] / (len(train_loader) * train_loader.batch_size)
_, results_valid = test(model, valid_loader, options.gpu, criterion)
mean_loss_valid = results_valid["total_loss"] / (len(valid_loader) * valid_loader.batch_size)
model.train()
train_loader.dataset.train()
global_step = i + epoch * len(train_loader)
writer_train.add_scalar('balanced_accuracy', results_train["balanced_accuracy"], global_step)
writer_train.add_scalar('loss', mean_loss_train, global_step)
writer_valid.add_scalar('balanced_accuracy', results_valid["balanced_accuracy"], global_step)
writer_valid.add_scalar('loss', mean_loss_valid, global_step)
logger.info("%s level training accuracy is %f at the end of iteration %d"
% (options.mode, results_train["balanced_accuracy"], i))
logger.info("%s level validation accuracy is %f at the end of iteration %d"
% (options.mode, results_valid["balanced_accuracy"], i))
t_current = time() - t_beginning
row = [epoch, i, t_current,
results_train["balanced_accuracy"], mean_loss_train,
results_valid["balanced_accuracy"], mean_loss_valid]
if hasattr(model, "variational") and model.variational:
row += [results_train["total_kl_loss"] / (len(train_loader) * train_loader.batch_size),
results_valid["total_kl_loss"] / (len(valid_loader) * valid_loader.batch_size)]
row_df = pd.DataFrame([row], columns=columns)
with open(filename, 'a') as f:
row_df.to_csv(f, header=False, index=False, sep='\t')
tend = time()
logger.debug('Mean time per batch loading: %.10f s'
% (total_time / len(train_loader) * train_loader.batch_size))
# If no step has been performed, raise Exception
if step_flag:
raise Exception('The model has not been updated once in the epoch. The accumulation step may be too large.')
# If no evaluation has been performed, warn the user
elif evaluation_flag and options.evaluation_steps != 0:
warnings.warn('Your evaluation steps are too big compared to the size of the dataset.'
'The model is evaluated only once at the end of the epoch')
# Always test the results and save them once at the end of the epoch
model.zero_grad()
logger.debug('Last checkpoint at the end of the epoch %d' % epoch)
_, results_train = test(model, train_loader, options.gpu, criterion)
mean_loss_train = results_train["total_loss"] / (len(train_loader) * train_loader.batch_size)
_, results_valid = test(model, valid_loader, options.gpu, criterion)
mean_loss_valid = results_valid["total_loss"] / (len(valid_loader) * valid_loader.batch_size)
model.train()
train_loader.dataset.train()
global_step = (epoch + 1) * len(train_loader)
writer_train.add_scalar('balanced_accuracy', results_train["balanced_accuracy"], global_step)
writer_train.add_scalar('loss', mean_loss_train, global_step)
writer_valid.add_scalar('balanced_accuracy', results_valid["balanced_accuracy"], global_step)
writer_valid.add_scalar('loss', mean_loss_valid, global_step)
logger.info("%s level training accuracy is %f at the end of iteration %d"
% (options.mode, results_train["balanced_accuracy"], len(train_loader)))
logger.info("%s level validation accuracy is %f at the end of iteration %d"
% (options.mode, results_valid["balanced_accuracy"], len(train_loader)))
t_current = time() - t_beginning
row = [epoch, i, t_current,
results_train["balanced_accuracy"], mean_loss_train,
results_valid["balanced_accuracy"], mean_loss_valid]
if hasattr(model, "variational") and model.variational:
row += [results_train["total_kl_loss"] / (len(train_loader) * train_loader.batch_size),
results_valid["total_kl_loss"] / (len(valid_loader) * valid_loader.batch_size)]
row_df = pd.DataFrame([row], columns=columns)
with open(filename, 'a') as f:
row_df.to_csv(f, header=False, index=False, sep='\t')
accuracy_is_best = results_valid["balanced_accuracy"] > best_valid_accuracy
loss_is_best = mean_loss_valid < best_valid_loss
best_valid_accuracy = max(results_valid["balanced_accuracy"], best_valid_accuracy)
best_valid_loss = min(mean_loss_valid, best_valid_loss)
save_checkpoint({'model': model.state_dict(),
'epoch': epoch,
'valid_loss': mean_loss_valid,
'valid_acc': results_valid["balanced_accuracy"]},
accuracy_is_best, loss_is_best,
model_dir)
# Save optimizer state_dict to be able to reload
save_checkpoint({'optimizer': optimizer.state_dict(),
'epoch': epoch,
'name': options.optimizer,
},
False, False,
model_dir,
filename='optimizer.pth.tar')
epoch += 1
os.remove(os.path.join(model_dir, "optimizer.pth.tar"))
os.remove(os.path.join(model_dir, "checkpoint.pth.tar"))
def evaluate_prediction(y, y_pred):
"""
Evaluates different metrics based on the list of true labels and predicted labels.
Args:
y: (list) true labels
y_pred: (list) corresponding predictions
Returns:
(dict) ensemble of metrics
"""
true_positive = np.sum((y_pred == 1) & (y == 1))
true_negative = np.sum((y_pred == 0) & (y == 0))
false_positive = np.sum((y_pred == 1) & (y == 0))
false_negative = np.sum((y_pred == 0) & (y == 1))
accuracy = (true_positive + true_negative) / (true_positive + true_negative + false_positive + false_negative)
if (true_positive + false_negative) != 0:
sensitivity = true_positive / (true_positive + false_negative)
else:
sensitivity = 0.0
if (false_positive + true_negative) != 0:
specificity = true_negative / (false_positive + true_negative)
else:
specificity = 0.0
if (true_positive + false_positive) != 0:
ppv = true_positive / (true_positive + false_positive)
else:
ppv = 0.0
if (true_negative + false_negative) != 0:
npv = true_negative / (true_negative + false_negative)
else:
npv = 0.0
balanced_accuracy = (sensitivity + specificity) / 2
results = {'accuracy': accuracy,
'balanced_accuracy': balanced_accuracy,
'sensitivity': sensitivity,
'specificity': specificity,
'ppv': ppv,
'npv': npv,
}
return results
def test(model, dataloader, use_cuda, criterion, mode="image", use_labels=True):
"""
Computes the predictions and evaluation metrics.
Args:
model: (Module) CNN to be tested.
dataloader: (DataLoader) wrapper of a dataset.
use_cuda: (bool) if True a gpu is used.
criterion: (loss) function to calculate the loss.
mode: (str) input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
use_labels (bool): If True the true_label will be written in output DataFrame and metrics dict will be created.
Returns
(DataFrame) results of each input.
(dict) ensemble of metrics + total loss on mode level.
"""
model.eval()
dataloader.dataset.eval()
if mode == "image":
columns = ["participant_id", "session_id", "true_label", "predicted_label"]
elif mode in ["patch", "roi", "slice"]:
columns = ['participant_id', 'session_id', '%s_id' % mode, 'true_label', 'predicted_label', 'proba0', 'proba1']
else:
raise ValueError("The mode %s is invalid." % mode)
softmax = torch.nn.Softmax(dim=1)
results_df = pd.DataFrame(columns=columns)
total_loss = 0
total_kl_loss = 0
total_time = 0
tend = time()
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
t0 = time()
total_time = total_time + t0 - tend
if use_cuda:
inputs, labels = data['image'].cuda(), data['label'].cuda()
else:
inputs, labels = data['image'], data['label']
if hasattr(model, "variational") and model.variational:
z, mu, std, outputs = model(inputs)
kl_loss = kl_divergence(z, mu, std)
total_kl_loss += kl_loss.item()
else:
outputs = model(inputs)
if use_labels:
loss = criterion(outputs, labels)
total_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
# Generate detailed DataFrame
for idx, sub in enumerate(data['participant_id']):
if mode == "image":
row = [[sub, data['session_id'][idx], labels[idx].item(), predicted[idx].item()]]
else:
normalized_output = softmax(outputs)
row = [[sub, data['session_id'][idx], data['%s_id' % mode][idx].item(),
labels[idx].item(), predicted[idx].item(),
normalized_output[idx, 0].item(), normalized_output[idx, 1].item()]]
row_df = pd.DataFrame(row, columns=columns)
results_df = pd.concat([results_df, row_df])
del inputs, outputs, labels
tend = time()
results_df.reset_index(inplace=True, drop=True)
if not use_labels:
results_df = results_df.drop("true_label", axis=1)
metrics_dict = None
else:
metrics_dict = evaluate_prediction(results_df.true_label.values.astype(int),
results_df.predicted_label.values.astype(int))
metrics_dict['total_loss'] = total_loss
metrics_dict['total_kl_loss'] = total_kl_loss
torch.cuda.empty_cache()
return results_df, metrics_dict
def sort_predicted(model, data_df, input_dir, model_options, criterion, keep_true,
batch_size=1, num_workers=0, gpu=False):
from .data import return_dataset, get_transforms
from torch.utils.data import DataLoader
from copy import copy
if keep_true is None:
return data_df
_, all_transforms = get_transforms(model_options.mode, model_options.minmaxnormalization)
dataset = return_dataset(mode=model_options.mode, input_dir=input_dir,
data_df=data_df, preprocessing=model_options.preprocessing,
train_transformations=None, all_transformations=all_transforms,
params=model_options)
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
test_options = copy(model_options)
test_options.gpu = gpu
results_df, _ = test(model, dataloader, gpu, criterion, model_options.mode, use_labels=True)
sorted_df = data_df.sort_values(['participant_id', 'session_id']).reset_index(drop=True)
results_df = results_df.sort_values(['participant_id', 'session_id']).reset_index(drop=True)
if keep_true:
return sorted_df[results_df.true_label == results_df.predicted_label].reset_index(drop=True)
else:
return sorted_df[results_df.true_label != results_df.predicted_label].reset_index(drop=True)
#################################
# Voting systems
#################################
def mode_level_to_tsvs(output_dir, results_df, metrics, fold, selection, mode, dataset='train', cnn_index=None):
"""
Writes the outputs of the test function in tsv files.
Args:
output_dir: (str) path to the output directory.
results_df: (DataFrame) the individual results per patch.
metrics: (dict or DataFrame) the performances obtained on a series of metrics.
fold: (int) the fold for which the performances were obtained.
selection: (str) the metrics on which the model was selected (best_acc, best_loss)
mode: (str) input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
dataset: (str) the dataset on which the evaluation was performed.
cnn_index: (int) provide the cnn_index only for a multi-cnn framework.
"""
if cnn_index is None:
performance_dir = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', selection)
else:
performance_dir = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', 'cnn-%i' % cnn_index,
selection)
os.makedirs(performance_dir, exist_ok=True)
results_df.to_csv(os.path.join(performance_dir, '%s_%s_level_prediction.tsv' % (dataset, mode)), index=False,
sep='\t')
if metrics is not None:
metrics["%s_id" % mode] = cnn_index
if isinstance(metrics, dict):
pd.DataFrame(metrics, index=[0]).to_csv(os.path.join(performance_dir, '%s_%s_level_metrics.tsv' % (dataset, mode)),
index=False, sep='\t')
elif isinstance(metrics, pd.DataFrame):
metrics.to_csv(os.path.join(performance_dir, '%s_%s_level_metrics.tsv' % (dataset, mode)),
index=False, sep='\t')
else:
raise ValueError("Bad type for metrics: %s. Must be dict or DataFrame." % type(metrics).__name__)
def concat_multi_cnn_results(output_dir, fold, selection, mode, dataset, num_cnn):
"""Concatenate the tsv files of a multi-CNN framework"""
prediction_df = pd.DataFrame()
metrics_df = pd.DataFrame()
for cnn_index in range(num_cnn):
cnn_dir = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', 'cnn-%i' % cnn_index)
performance_dir = os.path.join(cnn_dir, selection)
cnn_pred_path = os.path.join(performance_dir, '%s_%s_level_prediction.tsv' % (dataset, mode))
cnn_metrics_path = os.path.join(performance_dir, '%s_%s_level_metrics.tsv' % (dataset, mode))
cnn_pred_df = pd.read_csv(cnn_pred_path, sep='\t')
prediction_df = pd.concat([prediction_df, cnn_pred_df])
os.remove(cnn_pred_path)
if os.path.exists(cnn_metrics_path):
cnn_metrics_df = | pd.read_csv(cnn_metrics_path, sep='\t') | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
pd.set_option('display.max_columns', 100)
pd.options.mode.chained_assignment = None
train_path = '../input/forest-cover-type-prediction/train.csv'
test_path = '../input/forest-cover-type-prediction/test.csv'
submit_path = '../input/forest-cover-type-prediction/sampleSubmission.csv'
dtrain = pd.read_csv(train_path, index_col=0)
dtest = pd.read_csv(test_path, index_col=0)
dtrain['Cover_Type'].value_counts()
dtrain.info()
# Now this includes values for all classes, better to groupyby the target variable and then get description.
dtrain.describe()
print(dtrain.skew())
grouped_dataframe = dtrain.groupby(['Cover_Type'])
# Dictionary of the Cover_type and the label
label_dict = {1 : 'Spruce/Fir', 2 : 'Lodgepole Pine', 3 : 'Ponderosa Pine', 4 : 'Cottonwood/Willow', 5 :
'Aspen', 6 : 'Douglas-fir', 7 : 'Krummholz'}
from IPython.display import display
for cover in dtrain.Cover_Type.unique():
print(f'Forest Cover Type - {cover}')
display(grouped_dataframe.get_group(cover).describe())
# Only continuous columns
d_train_cont=dtrain.iloc[:,:10]
# To plot multiple distributions filtered by the target for each continuous variable.
import math
targets = dtrain.Cover_Type.unique()
fig = plt.figure()
height = 34
width = 18
fig.set_figheight(height)
fig.set_figwidth(width)
for i, col in enumerate(d_train_cont.columns):
ax = fig.add_subplot(math.ceil(len(d_train_cont.columns.to_list())/2), 2, i+1)
for cover_type in targets:
temp = d_train_cont.loc[dtrain.Cover_Type == cover_type]
sns.distplot(temp[col], label = label_dict[cover_type])
ax.legend()
ax.set_title(col)
#plt.savefig('Graph/Univariate_cont_dist.jpg')
plt.show()
d_train_cont['Cover_type'] = dtrain.Cover_Type
fig = plt.figure()
fig.set_figheight(34)
fig.set_figwidth(18)
for i, item in enumerate(d_train_cont.columns.to_list()):
fig.add_subplot(math.ceil(len(d_train_cont.columns.to_list())/2), 2, i+1)
sns.violinplot(y= item, x = 'Cover_type', data = d_train_cont)
#plt.savefig('Graph/Bivariate_feat_cover.jpg')
plt.show()
# Correlation heatmap would be too large, find largest correlations.
plt.figure(figsize=(9, 7))
sns.heatmap(d_train_cont.corr(),annot=True, cbar = True)
plt.show()
# 2nd Method, get all corrrelations and corresponding rows and columns in numpyarray from the top triangle
# of matrix. Then sort this array.
corr_list = []
for row_num, row in enumerate(d_train_cont.corr().index):
for col_num, col in enumerate(d_train_cont.corr().index):
# Ignoring comparison between the same columns
if col_num > row_num:
corr_list.append([row, col, np.abs(d_train_cont.corr().iloc[row_num, col_num])])
corr_array = np.array(corr_list)
corr_array = corr_array[corr_array[:,2].argsort()][::-1]
corr_array[:10]
# Iterating over the corr_array array and then using the column names from the 1st, 2nd element of list.
# create new figure and add subplots inside loop
fig = plt.figure()
fig.set_figheight(30)
fig.set_figwidth(22)
fig.set_dpi(120)
for i, item in enumerate(corr_array[:10]):
fig.add_subplot(math.ceil(len(corr_array[:10])/2), 2, i+1 )
sns.scatterplot(x = item[0], y = item[1], data = dtrain, hue = 'Cover_Type', legend = 'full', palette=sns.husl_palette(7))
#plt.savefig('Graph/data_interaction.jpg')
plt.show()
# Filter cover type and then barplot of wilderness area to see if any trees grow exclusively in a region.
#data.describe()
data = dtrain.groupby(['Cover_Type'])[['Wilderness_Area1', 'Wilderness_Area2', 'Wilderness_Area3', 'Wilderness_Area4']].sum()
# Transpose to get numbers by wilderness type.
data.T.plot(kind = 'bar', figsize = (12,8))
plt.show()
# Drop Soil type 15,7 - They have no variation.
dtrain.drop(['Soil_Type7', 'Soil_Type15'], axis = 1, inplace = True)
# filtering all columns that contain the str Soil
soil_columns = dtrain.columns[dtrain.columns.str.contains('Soil')].to_list()
data_soil = dtrain.groupby(['Cover_Type'])[soil_columns[:10]].sum()
data_soil.T.plot(kind = 'bar', figsize = (18,8))
plt.show()
data_soil = dtrain.groupby(['Cover_Type'])[soil_columns[10:20]].sum()
data_soil.T.plot(kind = 'bar', figsize = (18,8))
plt.show()
data_soil = dtrain.groupby(['Cover_Type'])[soil_columns[20:30]].sum()
data_soil.T.plot(kind = 'bar', figsize = (18,8))
plt.show()
data_soil = dtrain.groupby(['Cover_Type'])[soil_columns[30:]].sum()
data_soil.T.plot(kind = 'bar', figsize = (18,8))
plt.show()
label = dtrain['Cover_Type']
dtrain.drop(['Cover_Type'], axis = 1, inplace=True)
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.metrics import accuracy_score, f1_score, classification_report,confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
x_train, x_test, y_train, y_test = train_test_split(dtrain, label, test_size = .3)
dirty_clf = RandomForestClassifier()
dirty_clf.fit(x_train, y_train)
print(dirty_clf.score(x_test, y_test))
imp_feat = pd.DataFrame(index= dtrain.columns.to_list() , data= dirty_clf.feature_importances_)
imp_feat.rename(columns={0 : 'Importance'}, inplace=True)
imp_feat.sort_values(by='Importance', axis =0, ascending=False)[:15]
baseline_features = ['Elevation', 'Horizontal_Distance_To_Roadways']
features = ['Elevation', 'Horizontal_Distance_To_Roadways', 'Horizontal_Distance_To_Hydrology',
'Horizontal_Distance_To_Fire_Points', 'Aspect','Wilderness_Area1', 'Wilderness_Area4', 'Soil_Type3',
'Soil_Type4','Soil_Type10', 'Soil_Type29',
'Soil_Type38']
x_train, x_test, y_train, y_test = train_test_split(dtrain[features], label, test_size = .3)
clf = DecisionTreeClassifier(criterion='gini', max_depth=8, min_samples_split=2, class_weight= None, max_features=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
presort='deprecated',
random_state=None,)
grid_params = {'criterion' : ["gini", "entropy"]}
grid = GridSearchCV(estimator=clf, param_grid=grid_params, n_jobs=-1, cv = 5)
grid.fit(x_train, y_train)
grid.score(x_test, y_test)
grid.best_estimator_
y_pred = grid.predict(x_test)
clf.fit(x_train, y_train)
print(f'No of Leaves : {clf.get_n_leaves()}')
clf.feature_importances_
# With the Selected Features.
print(classification_report(y_test, y_pred, labels= list(label_dict.keys()), target_names=list(label_dict.values())))
rnd_clf = RandomForestClassifier()
grid_params_1 = {'max_depth' : [18], 'n_estimators' : [127], 'criterion':['entropy']}
grid = GridSearchCV(estimator=rnd_clf, param_grid=grid_params_1, n_jobs=-1, cv = 5)
grid.fit(x_train, y_train)
print(grid.best_score_)
print(grid.score(x_test, y_test))
#grid.cv_results_
#grid.best_estimator_
final_clf = RandomForestClassifier(max_depth=18, n_estimators=127, criterion='entropy')
final_clf.fit(x_train, y_train)
print(final_clf.score(x_train, y_train))
print(final_clf.score(x_test, y_test))
y_hat = final_clf.predict(x_test)
print(classification_report(y_test, y_hat, target_names=label_dict.values()))
plt.figure(figsize=(8,8))
sns.heatmap(pd.DataFrame(confusion_matrix(y_test, y_pred),
index = label_dict.values(), columns= label_dict.values()), annot=True, cbar = False)
plt.show()
imp_feat = pd.DataFrame(index= features , data= final_clf.feature_importances_)
imp_feat.rename(columns={0 : 'Importance'}, inplace=True)
imp_feat.sort_values(by='Importance', axis =0, ascending=False)
xgb_clf = XGBClassifier(n_estimators=100, max_depth = 12)
#grid_params = {'max_depth' : [12,14,16]}
#grid_xgb = GridSearchCV(xgb_clf, grid_params, cv= 5)
#grid_xgb.fit(x_train, y_train)
#print(grid_xgb.best_score_)
#grid_xgb.cv_results_
#grid_xgb.score(x_test, y_test)
xgb_clf.fit(x_train, y_train)
xgb_clf.score(x_test, y_test)
y_pred = xgb_clf.predict(x_test)
print(classification_report(y_test, y_pred, target_names=label_dict.values()))
# Final Fit
xgb_clf.fit(dtrain[features], label)
y_test_hat = xgb_clf.predict(dtest[features])
dtest['Predicted_cover_type'] = y_test_hat
sns.countplot(x = 'Predicted_cover_type', data = dtest)
sns.distplot(dtest.Elevation)
test_targets = dtest.Predicted_cover_type.unique()
plt.figure(figsize=(10,6))
for target in test_targets:
temp = dtest.loc[dtest.Predicted_cover_type == target]
sns.distplot(temp.Elevation, label = label_dict[target])
plt.legend()
plt.title('Distribution of Elevation of Predicted Cover Type')
#plt.savefig('Graph/Predicted_classes.jpg')
plt.show()
df_submit = | pd.read_csv(submit_path, index_col=0) | pandas.read_csv |
# Library for parsing arbitrary valid ipac tbl files and writing them out.
# Written by: <NAME>
# at: UCLA 2012, July 18
# The main elements the user should concern themselves with are:
#
# TblCol: a class for storing an IPAC table column, including all data and
# functions needed to input/output that column.
# name - the name of the column
# type - the data type stored in the column
# units - the units of the quantity in the column (if any)
# null - the string to write in the column if the value is not valid
# or otherwise missing.
# data - list containing the column's data
# mask - bolean list contains True if the corresponding element in
# data is valid, False if not.
# Stringer - the function used to convert column values to strings.
# Parser - the function used to parse the values in an ASCII tbl.
# ResetStringer - function that insures that the stringer will produce
# columns of sufficient width to store the data.
#
# Tbl: a class that stores a complete IPAC table, including all the columns,
# comment lines, and functions needed to read/write Tbl files.
# hdr - a list conting the comment lines, one item per line.
# colnames - a list of the names of the columns. The primary importance of
# this item is that it controls the order of when columns are
# written to the ouput file, and even whether they are written
# out at all.
# cols - a dictionary containing the table columns as TblCols. It is
# indexed using the name of the column in question.
# Read - function for reading in an IPAC table. The only essential
# argument is fname, the name of the table to be read in.
# The optional items are:
# RowMask - a function that takes a row's zero indexed order and
# the row's raw string and returns True if the row is to
# be read in, False if it is to be ignored.
# startrow - a long integer specifying the first zero indexed data
# row/line to be read in.
# breakrow - a long integer specifying the last zero indexed data
# row/line to be read in.
# Row - a convenience function for grabbing all the data in the given
# zero indexed row from the columns.
# ResetStringers - convenience function that calls the ResetStringer
# method of every column, returning both the data and
# boolean mask for the row in question.
# Print - writes the table to stdout.
# header - boolean set to True if the comment strings and column
# headers are to be printed.
# Write - writes the table to the file named in fname. Will append to the
# file if append=True (WARNING: it is impossible for this library
# to gaurantee that this operation will produce a valid IPAC tbl
# file. Use at your own risk).
import sys
import os
import gzip as gz
if sys.version_info[0] >= 3:
long = int
decode = lambda x: x.decode()
encode = lambda x: bytes(x, "ascii")
else:
decode = lambda x: x
encode = lambda x: x
class FormatError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def IPACExpandType( IPACtp, shrink=False ):
"""Takes the header from an IPAC table and parses it into the full ipac
name of the type if shrink==False. If shrink != False, return the 1
character IPAC table type."""
if len(IPACtp) == 0:
raise ValueError( "IPACExpandType requires a string with length at least 1." )
pfx = IPACtp[0]
if shrink == False:
if pfx == "i":
result = "int"
elif pfx == "l":
result = "long"
elif pfx == "f":
result = "float"
elif pfx == "d":
result = "double"
elif pfx == "r":
result = "real"
elif pfx == "c":
result = "char"
elif IPACtp == "t" or ( len(IPACtp) > 1 and IPACtp[:1] == "da" ):
result = "date"
else:
raise ValueError( "Invalid IPAC type supplied to IPACExpandType: " + IPACtp )
else:
if pfx in ( "i", "l", "f", "d", "r", "c" ):
result = pfx
elif IPACtp == "t" or ( len(IPACtp) > 1 and IPACtp[:1] == "da" ):
result = "t"
else:
raise ValueError( "Invalid IPAC type supplied to IPACExpandType: " + IPACtp )
return result
def IPACtoPythonType( IPACtp ):
if IPACtp in ( "c", "char", "t", "date" ):
return type("a")
elif IPACtp in ( "i", "int" ):
return type(int(1))
elif IPACtp in ( "l", "long" ):
return type(long(1))
elif IPACtp in ( "d", "double", "f", "float", "r", "real" ):
return type( float(1.0) )
else:
raise ValueError("Argument valtype to IPACtoPythonType must be a " + \
"valid IPAC table column type. " + \
"Type given: " + valtype )
def MakeStringer( valtype, width, null="null", precision=None ):
#Check argument validity
if type(valtype) != type("a"):
raise TypeError("MakeStringer's first argument must be a string.")
if type(null) != type("a"):
raise TypeError("MakeStringer's null argument must be a string.")
if type(width) != type(int(1)):
raise TypeError("MakeStringer's width argument must be an int.")
if width <= 0:
raise ValueError("the width passed to MakeStringer must be > 0.")
if precision != None:
if type(precision) != type(int(1)):
raise TypeError("MakeStringer's precision argument must" +
" be an int.")
if precision <= 0:
raise ValueError("the precision passed to MakeStringer" +
" must be > 0.")
#Format string stuff doesn't work so well.
# #Make the formatting string
# valfmtstring = "{0: ^" + str(width)
# if precision != None:
# valfmtstring += "." + str(precision)
# if valtype in ( "c", "char", "date" ):
# valfmtstring += "s"
# elif valtype in ( "i", "int", "l", "long" ):
# valfmtstring += "d"
# elif valtype in ( "d", "double", "f", "float", "r", "real" ):
# valfmtstring += "g"
# else:
# raise ValueError("Argument valtype to MakeStringer must be a " + \
# "valid IPAC table column type. " + \
# "Type given: " + valtype )
# valfmtstring += "}"
padstring = "{0: ^" + str(width) + "s}"
def result( val, mask ):
if mask == True:
r = padstring.format(str(val))
if len(r) > width:
raise FormatError( "Column width insufficient. Width " +
str(width) + ", " + str(val) )
else:
r = padstring.format(null)
if len(r) > width:
raise FormatError( "Column width insufficient. Width " +
str(width) + ", " + str(null) )
return r
result.width = width
return result
def MakeParser( valtype, null="null" ):
if type(valtype) != type("a"):
raise TypeError("MakeParser's first argument must be a string.")
if type(null) != type("a"):
raise TypeError("MakeParser's null argument must be a string.")
if valtype in ( "i", "int" ):
baseparse = int
default = 1
elif valtype in ( "l", "long" ):
baseparse = long
default = long(1)
elif valtype in ( "d", "double", "f", "float", "r", "real" ):
def baseparse( x ):
try:
return float(x)
except ValueError:
return float.fromhex(x)
default = 1.0
elif valtype in ( "c", "char", "t", "date" ):
baseparse = lambda x: x
default = ""
else:
raise ValueError("Argument valtype to MakeParser must be a " + \
"valid IPAC table column type. " + \
"Type given: " + valtype )
def parser( x ):
y = x.strip()
if y != null:
return ( baseparse( y ), True )
else:
return ( default, False )
return parser
def MakeNullParser( valtype, null="null" ):
if type(valtype) != type("a"):
raise TypeError("MakeParser's first argument must be a string.")
if type(null) != type("a"):
raise TypeError("MakeParser's null argument must be a string.")
if valtype in ( "i", "int" ):
default = 1
elif valtype in ( "l", "long" ):
default = long(1)
elif valtype in ( "d", "double", "f", "float", "r", "real" ):
default = 1.0
elif valtype in ( "c", "char", "t", "date" ):
default = ""
else:
raise ValueError("Argument valtype to MakeParser must be a " + \
"valid IPAC table column type. " + \
"Type given: " + valtype )
def parser( x ):
return ( default, False )
return parser
class TblCol:
def __init__(self):
self.name = ""
self.type = ""
self.units = ""
self.null = "null"
self.mask = []
self.data = []
self.Stringer = lambda x, y: "undefined"
#to be defined
self.Parser = None
def __len__(self):
if len(self.data) == len(self.mask):
return len(self.data)
else:
raise FormatError( "Length of mask, " + str(len(self.mask)) +
", inconsistent with data, " +
str(len(self.data)) + "." )
def ResetStringer( self ):
width = max( len(self.name), len(self.type), len(self.units),
len(self.null) )
for v, m in zip(self.data, self.mask):
if m == True:
width = max( width, len(str(v)) )
#width += 10 #Deal with python's crappy formatting funcs
self.Stringer = MakeStringer( self.type, width, null=self.null );
return None
def ResetParser( self ):
self.Parser = MakeParser( self.type, null=self.null );
return None
import sys
class TblRow:
def __init__(self, colnames=[]):
self.colnames = colnames
self.data = [ None for n in colnames ]
self.mask = [ False for n in colnames ]
return None
def __getitem__(self, k):
if type(k) in ( type(1), type(long(1)) ):
return ( self.data[k], self.mask[k] )
elif type(k) == type("a"):
if k in self.colnames:
i = self.colnames.index(k)
return( self.data[i], self.mask[i])
else:
raise KeyError( "Column name given not understood by row." )
else:
raise TypeError("Rows must be indexed by a string, integer, or long.")
return None
def __setitem__( self, k, val ):
if type(k) in ( type(1), type(long(1)) ):
if k < len(self.data) and k >= -len(self.data):
self.data[k] = val
self.mask[k] = True
return( val, True )
else:
raise IndexError( "list index out of range" )
elif type(k) == type("a"):
if k in self.colnames:
i = self.colnames.index(k)
self.data[i] = val
self.mask[i] = True
return( val, True )
else:
raise KeyError( "Column name given not understood by row.")
else:
raise TypeError("Rows must be indexed by a string, integer, or long.")
return None
def __delitem__( self, k ):
if type(k) in ( type(1), type(long(1)) ):
if k < len(self.data) and k >= -len(self.data):
self.mask[k] = False
else:
raise IndexError( "list index out of range" )
elif type(k) == type("a"):
if k in self.colnames:
i = self.colnames.index(k)
self.mask[i] = False
else:
raise KeyError( "Column name given not understood by row.")
else:
raise TypeError("Rows must be indexed by a string, integer, or long.")
return None
def ReadTable( fname, RowMask = lambda x, y: True, startrow=long(0),
breakrow=None, gzip=False ):
"""Function for reading IPAC tables into a dictionary of Tblcolumns. Will
only read lines for which the function RowMask returns True when passed
the row number and row. Rows are zero indexed, just like Python lists.
The first row read will be startrow, and will not read past breakrow.
Does not support universal line endings for gzipped files."""
if gzip != True:
f = open(fname, "rb")
else:
f = gz.open( fname, "r" )
#Read past header
hdrlines = []
cnames = []
while (True):
l = decode(f.readline())
if (l[0] == "\\"):
hdrlines.append( l.rstrip("\n\r") )
elif (l[0] == "|"):
break
else:
raise FormatError("The header of file " + fname +
" has an error in it.")
linelen = len(l)
rawcolnames = (l.strip("|\n\r")).split("|")
#The "-" in headers part of the spec can cause problems for
# negative numerical null values, but it seems to be a problem
# inherent in the spec.
colnames = [ x.strip(" -") for x in rawcolnames ]
cols = {}
rawcoltypes = ((decode(f.readline())).strip("|\n\r")).split("|")
coltypes = [ IPACExpandType( x.strip(" -") ) for x in rawcoltypes ]
for n, r, t in zip(colnames, rawcolnames, coltypes):
newcol = TblCol()
newcol.width = len(r)
newcol.type = t
newcol.name = n
cols[n] = newcol
pos = f.tell()
l = decode(f.readline())
if ( l[0] != "|" ):
#We've read past the header
f.seek(pos)
else:
units = (l.strip("|\n\r")).split( "|" )
if ( len(units) != len( cols ) ):
raise FormatError( "Header format broken." )
for n, u in zip( colnames, units ):
cols[n].units = u.strip( " -" )
pos = f.tell()
l = decode(f.readline())
if ( l[0] != "|" ):
#We've read past the header
f.seek(pos)
else:
nulls = (l.strip("|\n\r")).split( "|" )
if ( len(nulls) != len( cols ) ):
raise FormatError( "Header format broken." )
for n, nl in zip( colnames, nulls ):
cols[n].null = nl.strip( " -" )
#Define the stringer and parser functions
colwidths = [ len(r) for r in rawcolnames ]
for n, w in zip(colnames, colwidths):
tp = cols[n].type
nl = cols[n].null
cols[n].Stringer = MakeStringer( tp, w, null=nl )
cols[n].Parser = MakeParser( tp, null=nl )
#read past ignored rows
if startrow > long(0) and gzip == False:
f.seek( long(linelen) * long(startrow), os.SEEK_CUR )
elif startrow > long(0) and gzip == True: #Read past the hard way
for i in range( startrow ):
dummy = f.readline()
del(dummy)
colstarts = [ 1 ]
colends = []
for w in colwidths:
colends.append( colstarts[-1] + w )
colstarts.append( colends[-1] + 1 )
del colstarts[-1]
# colstarts = [ 1 for w in colwidths ]
# colends = [ 1 + w for w in colwidths ]
# for i in range(1, len(colwidths)):
# colstarts[i] = colstarts[i - 1] + colwidths[i-1] + 1
# colends[i] = colends[i - 1] + colwidths[i] + 1
parsers = [ MakeParser( cols[nm].type, null=cols[nm].null )
for nm in colnames ]
alldata = [ [] for n in colnames ]
allmask = [ [] for n in colnames ]
rownum = long(startrow)
for line in f:
line = decode(line)
if breakrow != None and rownum >= breakrow:
break;
if RowMask(rownum, line) != True:
continue
rownum += long(1)
parts = [ line[start:end] for start, end in zip( colstarts, colends ) ]
for p, par, i in zip( parts, parsers, range(len(colnames)) ):
r = par( p )
alldata[i].append( r[0] )
allmask[i].append( r[1] )
for n, d, m in zip( colnames, alldata, allmask ):
cols[n].data = d
cols[n].mask = m
f.close()
return [ hdrlines, colnames, cols ]
class Tbl:
def Read( self, fname, RowMask = lambda x, l: True, startrow=long(0),
breakrow=None, gzip=False ):
"""Function for reading IPAC tables into a the Tbl. Will
only read lines for which the function RowMask returns True for the
row number. Rows are zero indexed, just like Python lists."""
if type(fname) == type("asdf"):
self.hdr, self.colnames, self.cols = ReadTable( fname,
RowMask=RowMask,
startrow=startrow,
breakrow=breakrow,
gzip=gzip)
else:
raise TypeError(" tbl file name must be a string.")
return None
def __init__( self, fname = "", gzip=False ):
if fname == "":
self.hdr = []
self.colnames = []
self.cols = {}
else:
self.Read( fname, gzip=gzip )
return None
def __len__(self):
return len(self.cols.keys())
def Row(self, rownum):
result = TblRow()
#Prep the structures - this avoids dereferencing the column dict twice
result.colnames = [ x for x in self.colnames ]
result.data = [ None for k in self.colnames ]
result.mask = [ False for k in self.colnames ]
for i in range(len(self.colnames)):
col = self.cols[self.colnames[i]]
result.data[i] = col.data[rownum]
result.mask[i] = col.mask[rownum]
return result
def ResetStringers(self):
"""Updates the stringer functions to ensure that they have the
null and type specified by the columns and produce fields wide
enough to hold all the values in the columns."""
#sys.stderr.write(str(self.cols.keys()) + "\n")
for k in self.colnames:
self.cols[k].ResetStringer()
return None
def __out(self, ofile, header=True):
"""Prints the contents of the table. if "header" is set to false,
only the data is printed. The column widths are set by the
colwidths list, the order is set by colnames, special null values
are set by nulls, and units set by units. """
if header == True:
colwidths = [ len(self.cols[k].Stringer( None, False ) ) \
for k in self.colnames ]
def hdrstrn( strs, wids ):
r = [ ("{0: ^" + str(w) + "s}").format(s) \
for s, w in zip( strs, wids ) ]
for v, w in zip( r, wids ):
if len(v) > w:
raise FormatError( "column width insufficient.")
return "|" + "|".join(r) + "|\n"
for l in self.hdr:
ofile.write(encode(l + "\n"))
l = hdrstrn( self.colnames, colwidths )
ofile.write(encode( l ))
coltypes = [ self.cols[k].type for k in self.colnames ]
l = hdrstrn( coltypes, colwidths )
ofile.write(encode( l ))
units = [ self.cols[k].units for k in self.colnames ]
l = hdrstrn( units, colwidths )
ofile.write(encode( l ))
nulls = [ self.cols[k].Stringer( "asdf", False ) \
for k in self.colnames ]
l = hdrstrn( nulls, colwidths )
ofile.write(encode( l ))
for i in range(len(self.cols[self.colnames[0]])):
strcols = [ self.cols[n].Stringer(self.cols[n].data[i],
self.cols[n].mask[i])
for n in self.colnames ]
ofile.write(encode( " " + " ".join( strcols ) + " \n" ))
return None
def Print(self, header=True):
self.__out( sys.stdout, header=header )
return None
def Write(self, fname, append=False, gzip=-1):
if type(gzip) != type(int(1)):
raise TypeError( "Keyword argument gzip expects an int." )
elif gzip <= -1:
op = lambda x, y: open( x, y )
else:
op = lambda x, y: gz.open( x, y, min( max(gzip, 1), 9 ) )
if append == False:
f = op( fname, "wb" )
self.__out(f, header=True)
else:
f = op( fname, "a" )
self.__out(f, header=False)
f.close()
return(None)
linebuffersize = 5 * 1024**2 #5 megabytes
class BigTbl:
def OpenRead( self, fname, gzip=False ):
self.hdr = []
self.types = {}
self.nulls = {}
self.units = {}
self.parsers = []
self.stringers = []
self.__seekable = not gzip
self.__inputbuffer = []
if gzip != True:
self.infile = open( fname, "rb" )
else:
self.infile = gz.open( fname, "r" )
#First read past the comment header
while True:
l = decode(self.infile.readline())
if l[0] == "\\":
self.hdr.append( l.rstrip( "\n\r" ) )
elif l[0] == "|":
break
else:
raise FormatError("The header of file " + fname +
" has an error in it.")
self.__inlinelen = len(l.rstrip( "\n\r" ))
#We now have the data necessary to find the column widths
rawcolnames = ( l.strip("|\n\r") ).split("|")
self.colwidths = [ len(n) for n in rawcolnames ]
self.colstarts = [ 1 ]
self.colends = []
for w in self.colwidths:
self.colends.append( self.colstarts[-1] + w )
self.colstarts.append( self.colends[-1] + 1 )
del self.colstarts[-1]
self.colnames = [ n.strip(" -") for n in rawcolnames ]
l = (decode(self.infile.readline()).strip("|\n\r")).split("|")
coltypes = [ IPACExpandType( n.strip(" -") ) for n in l ]
self.__indatstart = self.infile.tell()
#Defaults
units = [ "" for n in self.colnames ]
nulls = [ "null" for n in self.colnames ]
l = decode(self.infile.readline())
if l[0] == "|":
units = map( lambda x: x.strip(" -"),
(l.strip("|\n\r")).split( "|" ))
if len(units) != len(self.colnames):
raise FormatError( "Header format broken." )
self.__indatstart = self.infile.tell()
l = decode(self.infile.readline())
if l[0] == "|":
nulls = map( lambda x: x.strip(" -"),
(l.strip("|\n\r")).split( "|" ))
if len(nulls) != len(self.colnames):
raise FormatError( "Header format broken." )
self.__indatstart = self.infile.tell()
else:
self.infile.seek( self.__indatstart )
else:
self.infile.seek( self.__indatstart )
#Now parse the header info into the local variables
for n, t, nul, u, w in zip( self.colnames, coltypes, nulls, units,
self.colwidths ):
self.types[n] = t
self.nulls[n] = nul
self.units[n] = u
self.parsers.append( MakeParser( t, null=nul ) )
self.stringers.append( MakeStringer( t, w, null=nul ) )
return None
def CloseRead(self):
if self.infile != None:
self.infile.close()
self.infile = None
return None
def __init__(self, fname="", gzip=False ):
self.outfile = None
self.__currow = long(0)
if fname == "":
self.hdr = []
self.colnames = []
self.types = {}
self.nulls = {}
self.units = {}
self.parsers = []
self.stringers = []
self.colwidths = []
self.colstarts = []
self.colends = []
self.__seekable = False
self.outfile = None
self.infile = None
self.__inlinelen = long(0)
self.__indatstart = long(0)
self.__inputbuffer = []
else:
self.OpenRead( fname, gzip=gzip )
return None
def ReadRow( self, rownum=-long(1) ):
if self.__currow != rownum and rownum >= long(0):
if self.__seekable == True:
self.infile.seek( self.__indatstart +
self.__inlinelen * rownum )
else:
sys.stderr.write( "Warning: seeking in compressed tables " +
"is slower than uncompressed.\n" )
if rownum < self.__currow:
self.infile.seek( self.__indatstart )
seeknum = rownum
else:
seeknum = rownum - self.__currow
for i in range(seeknum):
dummy = self.infile.readline()
del(dummy, i)
self.__currow = rownum
self.__inputbuffer = []
if len( self.__inputbuffer ) == 0:
self.__inputbuffer = self.infile.readlines( linebuffersize )
#End of file reached, return None
if len( self.__inputbuffer ) == 0:
return None
line = ( self.__inputbuffer[0] ).rstrip( "\n\r" )
del self.__inputbuffer[0]
#Check formatting
if len(line) != self.__inlinelen:
raise FormatError( "Malformed line: " + line )
result = TblRow()
result.data = [ None for n in self.colnames ]
result.mask = [ False for n in self.colnames ]
result.colnames = [ n for n in self.colnames ] #Ensures independence
result.data, result.mask = zip(*[ p(line[s:e])
for p, s, e in zip( self.parsers,
self.colstarts,
self.colends )
])
result.data = list(result.data)
result.mask = list(result.mask)
# colstart = 1
# for i, n in zip(range(len(self.colnames)), self.colnames):
# colend = colstart + self.colwidths[i]
# result.data[i], result.mask[i] = self.parsers[i](
# line[colstart:colend] )
# colstart = colend + 1
self.__currow += 1
return result
def ReadLine(self):
if len( self.__inputbuffer ) == 0:
self.__inputbuffer = self.infile.readlines( linebuffersize )
#End of file reached, return None
if len( self.__inputbuffer ) == 0:
return None
line = ( self.__inputbuffer[0] ).rstrip( "\n\r" )
del self.__inputbuffer[0]
return line
def RefreshParsers(self):
for n in self.colnames:
self.parsers = [ MakeParser( self.types[n], null=self.nulls[n] )
for n in self.colnames ]
return None
def RefreshStringers(self):
for n, w in zip( self.colnames, self.colwidths ):
self.stringers = [ MakeStringer( self.types[n], w,
null=self.nulls[n] )
for n, w in zip( self.colnames,
self.colwidths ) ]
return None
def WriteHeader( self ):
for l in self.hdr:
self.outfile.write(encode( l + "\n" ))
hdrstringers = [ MakeStringer( "char", w ) for w in self.colwidths ]
def hdrstrn( input ):
if type(input) == type([]):
strs = input
else:
strs = [ input[n] for n in self.colnames ]
strs = [ S( x, True ) for x, S in zip( strs, hdrstringers )]
return( "|" + "|".join( strs ) + "|\n" )
self.outfile.write(encode( hdrstrn( self.colnames ) ))
self.outfile.write(encode( hdrstrn( self.types ) ))
self.outfile.write(encode( hdrstrn( self.units ) ))
self.outfile.write(encode( hdrstrn( self.nulls ) ))
return None
def OpenWrite( self, fname, appendmode=False, gzip=-1 ):
if type(gzip) != type(int(1)):
raise TypeError( "Keyword argument gzip expects an int." )
elif gzip <= -1:
op = lambda x, y: open( x, y )
else:
op = lambda x, y: gz.open( x, y, min( max(gzip, 1), 9 ) )
if appendmode == True:
self.outfile = op( fname, "ab" )
else:
self.outfile = op( fname, "wb" )
self.WriteHeader()
return None
def CloseWrite( self ):
if self.outfile != None:
self.outfile.close()
self.outfile = None
return None
def WriteRow( self, row ):
outarr = [ row[n] for n in self.colnames ]
parts = [ S( r[0], r[1] )
for r, S in zip( outarr, self.stringers ) ]
self.outfile.write(encode( " " + " ".join( parts ) + " \n" ))
return None
def Close( self ):
self.CloseRead()
self.CloseWrite()
return None
try:
import numpy as np
def arrayize_cols( tbl ):
type_dict = { "int": np.int32, \
"long": np.int64, \
"float": np.float32, \
"double": np.float64, \
"real": np.float64, \
"char": "S", \
"date": "S" }
for c in tbl.colnames:
longtype = IPACExpandType( tbl.cols[c].type )
dtype = type_dict[ longtype ]
tbl.cols[c].data = np.asarray(tbl.cols[c].data, dtype=dtype)
tbl.cols[c].mask = np.asarray(tbl.cols[c].mask)
return None
except ModuleNotFoundError:
sys.stderr.write("Warning: numpy not found - ipac array features disabled.")
try:
import pandas as pd
def tbl_to_DFrame( tbl ):
typedict = { "int": "int32", \
"long": "int64", \
"float": "float32", \
"double": "float64", \
"real": "float64", \
"char": "str" }
df = pd.DataFrame()
for n in tbl.colnames:
col = tbl.cols[n]
dat = col.data
if col.type not in ( "t", "date" ):
longtype = IPACExpandType( col.type )
dtype = typedict[longtype]
dat = pd.array(col.data, dtype=dtype)
else:
dat = pd.array([ | pd.to_datetime(v) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 2 15:46:53 2020
@author: Barney
"""
import os
import pandas as pd
import scipy.stats as stats
import numpy as np
#Misc
MINUTES_IN_HOUR = 60 #minutes
DAYS_IN_WEEK = 7 #days
HOURS_IN_DAY = 24 #hours
DIARY_INTERVAL = 10 #minutes
DIARY_OFFSET = 4 #hours (i.e. diary starts at 4am)
#Addresses
data_root = os.path.join("D:\\", "Barney" , "data", "UKDA-8128-tab", "tab") # DOI : 10.5255/UKDA-SN-8128-1
output_root = os.path.join("C:\\", "Users", "Barney", "Documents", "GitHub", "cwsd_demand", "data", "processed")
timeuse_fid = os.path.join(data_root, "uktus15_diary_wide.tab")
indiv_fid = os.path.join(data_root, "uktus15_individual.tab")
week_fid = os.path.join(data_root, "uktus15_wksched.tab")
#Load data
indiv_df = pd.read_csv(indiv_fid, sep = '\t', low_memory = False)
timeuse_df = pd.read_csv(timeuse_fid, sep= '\t', low_memory = False)
week_df = pd.read_csv(week_fid, sep ='\t', low_memory = False)
#Extract London (should test with and without)
london_number = 7
indiv_df = indiv_df.loc[indiv_df.dgorpaf == london_number]
timeuse_df = timeuse_df.loc[timeuse_df.serial.isin(indiv_df.serial.unique())]
#Format timeuse_df
timeuse_df = timeuse_df.rename(columns = {'DiaryDate_Act' : 'date'})
timeuse_df.date = pd.to_datetime(timeuse_df.date, format = '%m/%d/%Y', errors = "coerce")
timeuse_df = timeuse_df.dropna()
timeuse_df = timeuse_df.set_index(['serial', 'pnum', 'date'])
def format_record(df, columns):
time_num = [int(x[1]) for x in columns.str.split('_')]
df_ = df[columns].copy()
df_.columns = time_num
return df_
location_df = format_record(timeuse_df, timeuse_df.columns[timeuse_df.columns.str.contains('wher')])
activity_df = format_record(timeuse_df, timeuse_df.columns[28:28+144])
#Generate action dataframe
df = pd.DataFrame(columns = activity_df.columns, index = activity_df.index)
int_map = {'sleep' : 0, 'work' : 1, 'home' : 2, 'away' : 3}
athome_ind = location_df == 11
df[activity_df.isin([110,111])] = int_map['sleep']
df[(activity_df >= 1000) & (activity_df < 2000) & ~athome_ind] = int_map['work']
df[df.isna() & athome_ind] = int_map['home']
df[df.isna() & ~athome_ind] = int_map['away']
#Format columns to timeoffsets
hour = (np.array([int((x-1) * DIARY_INTERVAL / MINUTES_IN_HOUR) for x in df.columns]) + DIARY_OFFSET) % HOURS_IN_DAY
mins = ((df.columns - 1) * DIARY_INTERVAL) % MINUTES_IN_HOUR
df.columns = [pd.DateOffset(minutes = x, hours = y) for x, y in zip(mins.tolist(),hour.tolist())]
#Extract state changes
time_results_df = []
for idx, row in df.iterrows():
changes = row[row.diff() != 0]
changes = changes.map({x:y for y ,x in int_map.items()})
changes = changes.reset_index()
changes.columns = ['time','measurement']
changes['serial'] = idx[0]
changes['pnum'] = idx[1]
changes['datetime'] = idx[2] + changes.time
changes['period'] = 'week'
changes.loc[changes.datetime.dt.weekday >= 5, 'period'] = 'weekend'
changes = changes.drop('time', axis=1)
changes['workstatus'] = 'nonwork'
if (changes.measurement == 'work').any():
changes['workstatus'] = 'work'
time_results_df.append(changes)
#Print
time_results_df = | pd.concat(time_results_df) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Project : PyCoA
Date : april 2020 - march 2021
Authors : <NAME>, <NAME>, <NAME>
Copyright ©pycoa.fr
License: See joint LICENSE file
Module : coa.display
About :
-------
An interface module to easily plot pycoa data with bokeh
"""
from coa.tools import kwargs_test, extract_dates, verb, get_db_list_dict
from coa.error import *
import math
import pandas as pd
import geopandas as gpd
import numpy as np
from collections import defaultdict
import itertools
import json
import io
from io import BytesIO
import base64
from IPython import display
import copy
import locale
from bokeh.models import ColumnDataSource, TableColumn, DataTable, ColorBar, LogTicker,\
HoverTool, CrosshairTool, BasicTicker, GeoJSONDataSource, LinearColorMapper, LogColorMapper,Label, \
PrintfTickFormatter, BasicTickFormatter, NumeralTickFormatter, CustomJS, CustomJSHover, Select, \
Range1d, DatetimeTickFormatter, Legend, LegendItem, Text
from bokeh.models.widgets import Tabs, Panel
from bokeh.plotting import figure
from bokeh.layouts import row, column, gridplot
from bokeh.palettes import Category10, Category20, Viridis256
from bokeh.models import Title
from bokeh.io import export_png
from bokeh import events
from bokeh.models.widgets import DateSlider
from bokeh.models import LabelSet, WMTSTileSource
from bokeh.transform import transform, cumsum
import shapely.geometry as sg
import branca.colormap
from branca.colormap import LinearColormap
from branca.element import Element, Figure
import folium
from PIL import Image
import coa.geo as coge
import matplotlib.pyplot as plt
import datetime as dt
import bisect
from functools import wraps
from IPython.core.display import display, HTML
width_height_default = [500, 380]
MAXCOUNTRIESDISPLAYED = 27
class CocoDisplay:
def __init__(self, db=None, geo = None):
verb("Init of CocoDisplay() with db=" + str(db))
self.database_name = db
self.dbld = get_db_list_dict()
self.lcolors = Category20[20]
self.scolors = Category10[5]
self.ax_type = ['linear', 'log']
self.geom = []
self.geopan = gpd.GeoDataFrame()
self.location_geometry = None
self.boundary_metropole = None
self.listfigs = []
self.options_stats = ['when','input','input_field']
self.options_charts = [ 'bins']
self.options_front = ['where','option','which','what','visu']
self.available_tiles = ['openstreet','esri','stamen']
self.available_modes = ['mouse','vline','hline']
self.uptitle, self.subtitle = ' ',' '
self.dfigure_default = {'plot_height':width_height_default[1] ,'plot_width':width_height_default[0],'title':None,'textcopyright':'default'}
self.dvisu_default = {'mode':'mouse','tile':self.available_tiles[0],'orientation':'horizontal','cursor_date':None,'maplabel':None,'guideline':False}
self.when_beg = dt.date(1, 1, 1)
self.when_end = dt.date(1, 1, 1)
self.alloptions = self.options_stats + self.options_charts + self.options_front + list(self.dfigure_default.keys()) +\
list(self.dvisu_default.keys()) + ['resumetype']
self.iso3country = self.dbld[self.database_name][0]
self.granularity = self.dbld[self.database_name][1]
self.namecountry = self.dbld[self.database_name][2]
try:
if self.granularity != 'nation':
self.geo = coge.GeoCountry(self.iso3country)
if self.granularity == 'region':
self.location_geometry = self.geo.get_region_list()[['code_region', 'name_region', 'geometry']]
self.location_geometry = self.location_geometry.rename(columns={'name_region': 'location'})
if self.iso3country == 'PRT':
tmp=self.location_geometry.rename(columns={'name_region': 'location'})
tmp = tmp.loc[tmp.code_region=='PT.99']
self.boundary_metropole =tmp['geometry'].total_bounds
if self.iso3country == 'FRA':
tmp=self.location_geometry.rename(columns={'name_region': 'location'})
tmp = tmp.loc[tmp.code_region=='999']
self.boundary_metropole =tmp['geometry'].total_bounds
elif self.granularity == 'subregion':
list_dep_metro = None
self.location_geometry = self.geo.get_subregion_list()[['code_subregion', 'name_subregion', 'geometry']]
self.location_geometry = self.location_geometry.rename(columns={'name_subregion': 'location'})
#if country == 'FRA':
# list_dep_metro = geo.get_subregions_from_region(name='Métropole')
#elif country == 'ESP':
# list_dep_metro = geo.get_subregions_from_region(name='España peninsular')
#if list_dep_metro:
# self.boundary_metropole = self.location_geometry.loc[self.location_geometry.code_subregion.isin(list_dep_metro)]['geometry'].total_bounds
else:
self.geo=coge.GeoManager('name')
geopan = gpd.GeoDataFrame()#crs="EPSG:4326")
info = coge.GeoInfo()
allcountries = self.geo.get_GeoRegion().get_countries_from_region('world')
geopan['location'] = [self.geo.to_standard(c)[0] for c in allcountries]
geopan = info.add_field(field=['geometry'],input=geopan ,geofield='location')
geopan = gpd.GeoDataFrame(geopan, geometry=geopan.geometry, crs="EPSG:4326")
geopan = geopan[geopan.location != 'Antarctica']
geopan = geopan.dropna().reset_index(drop=True)
self.location_geometry = geopan
except:
raise CoaTypeError('What data base are you looking for ?')
''' FIGURE COMMUN FOR ALL '''
def standardfig(self, **kwargs):
"""
Create a standard Bokeh figure, with pycoa.fr copyright, used in all the bokeh charts
"""
plot_width = kwargs.get('plot_width', self.dfigure_default['plot_width'])
plot_height = kwargs.get('plot_height', self.dfigure_default['plot_height'])
textcopyright = kwargs.get('textcopyright', self.dfigure_default['textcopyright'])
if textcopyright == 'default':
textcopyright = '©pycoa.fr (data from: {})'.format(self.database_name)
else:
textcopyright = '©pycoa.fr ' + textcopyright
citation = Label(x=0.65 * plot_width - len(textcopyright), y=0.01 * plot_height,
x_units='screen', y_units='screen',
text_font_size='1.5vh', background_fill_color='white', background_fill_alpha=.75,
text=textcopyright)
for i in list(self.dvisu_default.keys()) + self.options_front + self.options_charts + ['textcopyright'] + self.options_stats + ['date_slider']:
if i in kwargs.keys():
kwargs.pop(i)
kwargs.pop('title')
fig = figure(**kwargs, tools=['save', 'box_zoom,reset'], toolbar_location="right")
#fig.add_layout(citation)
fig.add_layout(Title(text=self.uptitle, text_font_size="10pt"), 'above')
fig.add_layout(Title(text=self.subtitle, text_font_size="8pt", text_font_style="italic"), 'below')
return fig
def get_listfigures(self):
return self.listfigs
def set_listfigures(self,fig):
if not isinstance(fig,list):
fig = [fig]
self.listfigs = fig
''' WRAPPER COMMUN FOR ALL'''
def decowrapper(func):
'''
Main decorator it mainly deals with arg testings
'''
@wraps(func)
def wrapper(self, input = None, input_field = None, **kwargs):
"""
Parse a standard input, return :
- pandas: with location keyword (eventually force a column named 'where' to 'location')
- kwargs:
* keys = [plot_width, plot_width, title, when, title_temporal,bins, what, which]
Note that method used only the needed variables, some of them are useless
"""
if not isinstance(input, pd.DataFrame):
raise CoaTypeError(input + 'Must be a pandas, with pycoa structure !')
kwargs_test(kwargs, self.alloptions, 'Bad args used in the display function.')
when = kwargs.get('when', None)
which = kwargs.get('which', input.columns[2])
if input_field and 'cur_' in input_field:
what = which
else:
# cumul is the default
what = kwargs.get('what', which)
if input_field is None:
input_field = which
if isinstance(input_field,list):
test = input_field[0]
else:
test = input_field
if input[[test,'date']].isnull().values.all():
raise CoaKeyError('All values for '+ which + ' is nan nor empty')
option = kwargs.get('option', None)
bins = kwargs.get('bins', 10)
title = kwargs.get('title', None)
#textcopyright = kwargs.get('textcopyright', 'default')
kwargs['plot_width'] = kwargs.get('plot_width', self.dfigure_default['plot_width'])
kwargs['plot_height'] = kwargs.get('plot_height', self.dfigure_default['plot_height'])
if 'where' in input.columns:
input = input.rename(columns={'where': 'location'})
if 'codelocation' and 'clustername' not in input.columns:
input['codelocation'] = input['location']
input['clustername'] = input['location']
input['rolloverdisplay'] = input['location']
input['permanentdisplay'] = input['location']
else:
if self.granularity == 'nation' :
#input['codelocation'] = input['codelocation'].apply(lambda x: str(x).replace('[', '').replace(']', '') if len(x)< 10 else x[0]+'...'+x[-1] )
input['permanentdisplay'] = input.apply(lambda x: x.clustername if self.geo.get_GeoRegion().is_region(x.clustername) else str(x.codelocation), axis = 1)
else:
if self.granularity == 'subregion' :
input = input.reset_index(drop=True)
if isinstance(input['codelocation'][0],list):
input['codelocation'] = input['codelocation'].apply(lambda x: str(x).replace("'", '')\
if len(x)<5 else '['+str(x[0]).replace("'", '')+',...,'+str(x[-1]).replace("'", '')+']')
trad={}
cluster = input.clustername.unique()
if isinstance(input.location[0],list):
cluster = [i for i in cluster]
for i in cluster:
if i == self.namecountry:
input['permanentdisplay'] = input.clustername #[self.dbld[self.database_name][2]]*len(input)
else:
if self.geo.is_region(i):
trad[i] = self.geo.is_region(i)
elif self.geo.is_subregion(i):
trad[i] = self.geo.is_subregion(i)#input.loc[input.clustername==i]['codelocation'].iloc[0]
else:
trad[i] = i
trad={k:(v[:3]+'...'+v[-3:] if len(v)>8 else v) for k,v in trad.items()}
if ',' in input.codelocation[0]:
input['permanentdisplay'] = input.clustername
else:
input['permanentdisplay'] = input.codelocation#input.clustername.map(trad)
elif self.granularity == 'region' :
if all(i == self.namecountry for i in input.clustername.unique()):
input['permanentdisplay'] = [self.namecountry]*len(input)
else:
input['permanentdisplay'] = input.codelocation
input['rolloverdisplay'] = input['location']
maplabel = kwargs.get('maplabel', None)
if maplabel and 'unsorted' in maplabel:
pass
else:
input = input.sort_values(by=input_field, ascending = False).reset_index(drop=True)
uniqloc = input.clustername.unique()
if len(uniqloc) < 5:
colors = self.scolors
else:
colors = self.lcolors
colors = itertools.cycle(colors)
dico_colors = {i: next(colors) for i in uniqloc}
input = input.copy()
if not 'colors' in input.columns:
input.loc[:,'colors'] = input['clustername'].map(dico_colors)#(pd.merge(input, country_col, on='location'))
if not isinstance(input_field, list):
input_field = [input_field]
else:
input_field = input_field
col2=which
when_beg = input[[col2,'date']].date.min()
when_end = input[[col2,'date']].date.max()
if when:
when_beg, when_end = extract_dates(when)
if when_end > input[[col2,'date']].date.max():
when_end = input[[col2,'date']].date.max()
if when_beg == dt.date(1, 1, 1):
when_beg = input[[col2,'date']].date.min()
if not isinstance(when_beg, dt.date):
raise CoaNoData("With your current cuts, there are no data to plot.")
if when_end <= when_beg:
print('Requested date below available one, take', when_beg)
when_end = when_beg
if when_beg > input[[col2,'date']].date.max() or when_end > input[[col2,'date']].date.max():
raise CoaNoData("No available data after "+str(input[[input_field[0],'date']].date.max()))
when_end_change = when_end
for i in input_field:
if input[i].isnull().all():
raise CoaTypeError("Sorry all data are NaN for " + i)
else:
when_end_change = min(when_end_change,CocoDisplay.changeto_nonull_date(input, when_end, i))
if func.__name__ not in ['pycoa_date_plot', 'pycoa_plot', 'pycoa_scrollingmenu', 'pycoa_spiral_plot','pycoa_yearly_plot']:
if len(input_field) > 1:
print(str(input_field) + ' is dim = ' + str(len(input_field)) + '. No effect with ' + func.__name__ + '! Take the first input: ' + input_field[0])
input_field = input_field[0]
if when_end_change != when_end:
when_end = when_end_change
self.when_beg = when_beg
self.when_end = when_end
input = input.loc[(input['date'] >= self.when_beg) & (input['date'] <= self.when_end)]
title_temporal = ' (' + 'between ' + when_beg.strftime('%d/%m/%Y') + ' and ' + when_end.strftime('%d/%m/%Y') + ')'
if func.__name__ not in ['pycoa_date_plot', 'pycoa_plot', 'pycoa_scrollingmenu', 'pycoa_spiral_plot','pycoa_yearly_plot']:
title_temporal = ' (' + when_end.strftime('%d/%m/%Y') + ')'
title_option=''
if option:
if 'sumallandsmooth7' in option:
option.remove('sumallandsmooth7')
option += ['sumall','smooth7']
title_option = ' (option: ' + str(option)+')'
input_field_tostring = str(input_field).replace('[', '').replace(']', '').replace('\'', '')
whichtitle = which
if 'pop' in input_field_tostring:
whichtitle = input_field_tostring.replace('weekly ','').replace('daily ','')
if 'daily' in input_field_tostring:
titlefig = whichtitle + ', ' + 'day to day difference' + title_option
elif 'weekly' in input_field_tostring:
titlefig = whichtitle + ', ' + 'week to week difference' + title_option
else:
if 'cur_' in which or 'idx_' in which:
#titlefig = which + ', ' + 'current ' + which.replace('cur_','').replace('idx_','')+ title_option
titlefig = whichtitle + ', current value' + title_option
else:
titlefig = whichtitle + ', cumulative'+ title_option
if title:
title = title
else:
title = titlefig
self.uptitle = title
textcopyright = kwargs.get('textcopyright', None)
if textcopyright:
textcopyright = '©pycoa.fr ' + textcopyright + title_temporal
kwargs.pop('textcopyright')
else:
textcopyright = '©pycoa.fr data from: {}'.format(self.database_name)+' '+title_temporal
self.subtitle = textcopyright
kwargs['title'] = title+title_temporal
return func(self, input, input_field, **kwargs)
return wrapper
@decowrapper
def pycoa_resume_data(self, input, input_field, **kwargs):
loc=list(input['clustername'].unique())
input['cases'] = input[input_field]
resumetype = kwargs.get('resumetype','spiral')
if resumetype == 'spiral':
dspiral={i:CocoDisplay.spiral(input.loc[ (input.clustername==i) &
(input.date >= self.when_beg) &
(input.date <= self.when_end)].sort_values(by='date')) for i in loc}
input['resume']=input['clustername'].map(dspiral)
elif resumetype == 'spark':
spark={i:CocoDisplay.sparkline(input.loc[ (input.clustername==i) &
(input.date >= self.when_beg) &
(input.date <= self.when_end)].sort_values(by='date')) for i in loc}
input['resume']=input['clustername'].map(spark)
else:
raise CoaError('pycoa_resume_data can use spiral or spark ... here what ?')
input = input.loc[input.date==input.date.max()].reset_index(drop=True)
def path_to_image_html(path):
return '<img src="'+ path + '" width="60" >'
input=input.drop(columns=['permanentdisplay','rolloverdisplay','colors','cases'])
input=input.apply(lambda x: x.round(2) if x.name in [input_field,'daily','weekly'] else x)
if isinstance(input['location'][0], list):
col=[i for i in list(input.columns) if i not in ['clustername','location','codelocation']]
col.insert(0,'clustername')
input = input[col]
input=input.set_index('clustername')
else:
input = input.drop(columns='clustername')
input=input.set_index('location')
return input.to_html(escape=False,formatters=dict(resume=path_to_image_html))
''' DECORATORS FOR PLOT: DATE, VERSUS, SCROLLINGMENU '''
def decoplot(func):
"""
decorator for plot purpose
"""
@wraps(func)
def inner_plot(self, input = None, input_field = None, **kwargs):
mode = kwargs.get('mode', None)
if mode:
mode = mode
else:
mode = self.dvisu_default['mode']
if mode not in self.available_modes:
raise CoaTypeError('Don\'t know the mode wanted. So far:' + str(self.available_modes))
kwargs['mode'] = mode
if 'location' in input.columns:
location_ordered_byvalues = list(
input.loc[input.date == self.when_end].sort_values(by=input_field, ascending=False)['clustername'].unique())
input = input.copy() # needed to avoid warning
input.loc[:,'clustername'] = pd.Categorical(input.clustername,
categories=location_ordered_byvalues, ordered=True)
input = input.sort_values(by=['clustername', 'date']).reset_index(drop = True)
if func.__name__ != 'pycoa_scrollingmenu' :
if len(location_ordered_byvalues) >= MAXCOUNTRIESDISPLAYED:
input = input.loc[input.clustername.isin(location_ordered_byvalues[:MAXCOUNTRIESDISPLAYED])]
list_max = []
for i in input_field:
list_max.append(max(input.loc[input.clustername.isin(location_ordered_byvalues)][i]))
if len([x for x in list_max if not np.isnan(x)]) > 0:
amplitude = (np.nanmax(list_max) - np.nanmin(list_max))
if amplitude > 10 ** 4:
self.ax_type.reverse()
if func.__name__ == 'pycoa_scrollingmenu' :
if isinstance(input_field,list):
if len(input_field) > 1:
print(str(input_field) + ' is dim = ' + str(len(input_field)) + '. No effect with ' + func.__name__ + '! Take the first input: ' + input_field[0])
input_field = input_field[0]
if self.dbld[self.database_name][1] == 'nation' and self.dbld[self.database_name][0] != 'WW':
func.__name__ = 'pycoa_date_plot'
return func(self, input, input_field, **kwargs)
return inner_plot
''' PLOT VERSUS '''
@decowrapper
@decoplot
def pycoa_plot(self, input = None, input_field = None ,**kwargs):
'''
-----------------
Create a versus plot according to arguments.
See help(pycoa_plot).
Keyword arguments
-----------------
- input = None : if None take first element. A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element. It should be a list dim=2. Moreover the 2 variables must be present
in the DataFrame considered.
- plot_heigh = width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
if len(input_field) != 2:
raise CoaTypeError('Two variables are needed to plot a versus chart ... ')
panels = []
cases_custom = CocoDisplay.rollerJS()
if self.get_listfigures():
self.set_listfigures([])
listfigs=[]
for axis_type in self.ax_type:
standardfig = self.standardfig( x_axis_label = input_field[0], y_axis_label = input_field[1],
y_axis_type = axis_type, **kwargs )
standardfig.add_tools(HoverTool(
tooltips=[('Location', '@rolloverdisplay'), ('date', '@date{%F}'),
(input_field[0], '@{casesx}' + '{custom}'),
(input_field[1], '@{casesy}' + '{custom}')],
formatters={'location': 'printf', '@{casesx}': cases_custom, '@{casesy}': cases_custom,
'@date': 'datetime'}, mode = kwargs['mode'],
point_policy="snap_to_data")) # ,PanTool())
for loc in input.clustername.unique():
pandaloc = input.loc[input.clustername == loc].sort_values(by='date', ascending='True')
pandaloc.rename(columns={input_field[0]: 'casesx', input_field[1]: 'casesy'}, inplace=True)
standardfig.line(x='casesx', y='casesy',
source=ColumnDataSource(pandaloc), legend_label=pandaloc.clustername.iloc[0],
color=pandaloc.colors.iloc[0], line_width=3, hover_line_width=4)
standardfig.legend.label_text_font_size = "12px"
panel = Panel(child=standardfig, title=axis_type)
panels.append(panel)
standardfig.legend.background_fill_alpha = 0.6
standardfig.legend.location = "top_left"
listfigs.append(standardfig)
CocoDisplay.bokeh_legend(standardfig)
self.set_listfigures(listfigs)
tabs = Tabs(tabs=panels)
return tabs
''' DATE PLOT '''
@decowrapper
@decoplot
def pycoa_date_plot(self, input = None, input_field = None, **kwargs):
'''
-----------------
Create a date plot according to arguments. See help(pycoa_date_plot).
Keyword arguments
-----------------
- input = None : if None take first element. A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- guideline = False
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
guideline = kwargs.get('guideline',self.dvisu_default['guideline'])
panels = []
listfigs = []
cases_custom = CocoDisplay.rollerJS()
if isinstance(input['rolloverdisplay'][0],list):
input['rolloverdisplay'] = input['clustername']
for axis_type in self.ax_type:
standardfig = self.standardfig( y_axis_type = axis_type, x_axis_type = 'datetime',**kwargs)
i = 0
r_list=[]
maxou=-1000
lcolors = iter(self.lcolors)
line_style = ['solid', 'dashed', 'dotted', 'dotdash','dashdot']
for val in input_field:
for loc in list(input.clustername.unique()):
input_filter = input.loc[input.clustername == loc].reset_index(drop = True)
src = ColumnDataSource(input_filter)
leg = input_filter.clustername[0]
#leg = input_filter.permanentdisplay[0]
if len(input_field)>1:
leg = input_filter.permanentdisplay[0] + ', ' + val
if len(list(input.clustername.unique())) == 1:
color = next(lcolors)
else:
color = input_filter.colors[0]
r = standardfig.line(x = 'date', y = val, source = src,
color = color, line_width = 3,
legend_label = leg,
hover_line_width = 4, name = val, line_dash=line_style[i%4])
r_list.append(r)
maxou=max(maxou,np.nanmax(input_filter[val].values))
i += 1
for r in r_list:
label = r.name
tooltips = [('Location', '@rolloverdisplay'), ('date', '@date{%F}'), (r.name, '@$name{0,0.0}')]
formatters = {'location': 'printf', '@date': 'datetime', '@name': 'printf'}
hover=HoverTool(tooltips = tooltips, formatters = formatters, point_policy = "snap_to_data", mode = kwargs['mode'], renderers=[r]) # ,PanTool())
standardfig.add_tools(hover)
if guideline:
cross= CrosshairTool()
standardfig.add_tools(cross)
if axis_type == 'linear':
if maxou < 1e4 :
standardfig.yaxis.formatter = BasicTickFormatter(use_scientific=False)
standardfig.legend.label_text_font_size = "12px"
panel = Panel(child=standardfig, title = axis_type)
panels.append(panel)
standardfig.legend.background_fill_alpha = 0.6
standardfig.legend.location = "top_left"
standardfig.legend.click_policy="hide"
standardfig.legend.label_text_font_size = '8pt'
if len(input_field) > 1 and len(input_field)*len(input.clustername.unique())>16:
standardfig.legend.visible=False
standardfig.xaxis.formatter = DatetimeTickFormatter(
days = ["%d/%m/%y"], months = ["%d/%m/%y"], years = ["%b %Y"])
CocoDisplay.bokeh_legend(standardfig)
listfigs.append(standardfig)
self.set_listfigures(listfigs)
tabs = Tabs(tabs = panels)
return tabs
''' SPIRAL PLOT '''
@decowrapper
@decoplot
def pycoa_spiral_plot(self, input = None, input_field = None, **kwargs):
guideline = kwargs.get('guideline',self.dvisu_default['guideline'])
panels = []
listfigs = []
if isinstance(input['rolloverdisplay'][0],list):
input['rolloverdisplay'] = input['clustername']
borne=300
kwargs.pop('plot_width')
standardfig = self.standardfig(y_axis_type = None, x_axis_type = None,
width=kwargs['plot_height'], x_range=[-borne, borne], y_range=[-borne, borne], match_aspect=True,**kwargs)
if len(input.clustername.unique()) > 1 :
print('Can only display spiral for ONE location. I took the first one:', input.clustername[0])
input = input.loc[input.clustername == input.clustername[0]].copy()
input['date']=pd.to_datetime(input["date"])
input["dayofyear"]=input.date.dt.dayofyear
input['year']=input.date.dt.year
input['cases'] = input[input_field]
K = 2*input[input_field].max()
#drop bissextile fine tuning in needed in the future
input = input.loc[~(input['date'].dt.month.eq(2) & input['date'].dt.day.eq(29))].reset_index(drop=True)
input["dayofyear_angle"] = input["dayofyear"]*2 * np.pi/365
input["r_baseline"] = input.apply(lambda x : ((x["year"]-2020)*2 * np.pi + x["dayofyear_angle"])*K,axis=1)
size_factor = 16
input["r_cas_sup"] = input.apply(lambda x : x["r_baseline"] + 0.5*x[input_field]*size_factor,axis=1)
input["r_cas_inf"] = input.apply(lambda x : x["r_baseline"] - 0.5*x[input_field]*size_factor,axis=1)
radius = 200
def polar(theta,r,norm=radius/input["r_baseline"].max()):
x = norm*r*np.cos(theta)
y = norm*r*np.sin(theta)
return x,y
x_base,y_base=polar(input["dayofyear_angle"],input["r_baseline"])
x_cas_sup,y_cas_sup=polar(input["dayofyear_angle"],input["r_cas_sup"])
x_cas_inf,y_cas_inf=polar(input["dayofyear_angle"],input["r_cas_inf"])
xcol,ycol=[],[]
[ xcol.append([i,j]) for i,j in zip(x_cas_inf,x_cas_sup)]
[ ycol.append([i,j]) for i,j in zip(y_cas_inf,y_cas_sup)]
standardfig.patches(xcol,ycol,color='blue',fill_alpha = 0.5)
src = ColumnDataSource(data=dict(
x=x_base,
y=y_base,
date=input['date'],
cases=input['cases']
))
standardfig.line( x = 'x', y = 'y', source = src, legend_label = input.clustername[0],
line_width = 3, line_color = 'blue')
circle = standardfig.circle('x', 'y', size=2, source=src)
cases_custom = CocoDisplay.rollerJS()
hover_tool = HoverTool(tooltips=[('Cases', '@cases{0,0.0}'), ('date', '@date{%F}')],
formatters={'Cases': 'printf', '@{cases}': cases_custom, '@date': 'datetime'},
renderers=[circle],
point_policy="snap_to_data")
standardfig.add_tools(hover_tool)
outer_radius=250
[standardfig.annular_wedge(
x=0, y=0, inner_radius=0, outer_radius=outer_radius, start_angle=i*np.pi/6,\
end_angle=(i+1)*np.pi/6,fill_color=None,line_color='black',line_dash='dotted')
for i in range(12)]
label = ['January','February','March','April','May','June','July','August','September','October','November','December']
xr,yr = polar(np.linspace(0, 2 * np.pi, 13),outer_radius,1)
standardfig.text(xr[:-1], yr[:-1], label,text_font_size="9pt", text_align="center", text_baseline="middle")
standardfig.legend.background_fill_alpha = 0.6
standardfig.legend.location = "top_left"
standardfig.legend.click_policy="hide"
return standardfig
''' SCROLLINGMENU PLOT '''
@decowrapper
@decoplot
def pycoa_scrollingmenu(self, input = None, input_field = None, **kwargs):
'''
-----------------
Create a date plot, with a scrolling menu location, according to arguments.
See help(pycoa_scrollingmenu).
Keyword arguments
-----------------
len(location) > 2
- input = None : if None take first element. A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
-guideline = False
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
mode = kwargs.get('mode',self.dvisu_default['mode'])
guideline = kwargs.get('guideline',self.dvisu_default['guideline'])
uniqloc = input.clustername.unique().to_list()
uniqloc.sort()
if 'location' in input.columns:
if len(uniqloc) < 2:
raise CoaTypeError('What do you want me to do ? You have selected, only one country.'
'There is no sens to use this method. See help.')
input = input[['date', 'clustername', input_field]]
input = input.sort_values(by='clustername', ascending = True).reset_index(drop=True)
mypivot = pd.pivot_table(input, index='date', columns='clustername', values=input_field)
column_order = uniqloc
mypivot = mypivot.reindex(column_order, axis=1)
source = ColumnDataSource(mypivot)
filter_data1 = mypivot[[uniqloc[0]]].rename(columns={uniqloc[0]: 'cases'})
src1 = ColumnDataSource(filter_data1)
filter_data2 = mypivot[[uniqloc[1]]].rename(columns={uniqloc[1]: 'cases'})
src2 = ColumnDataSource(filter_data2)
cases_custom = CocoDisplay.rollerJS()
hover_tool = HoverTool(tooltips=[('Cases', '@cases{0,0.0}'), ('date', '@date{%F}')],
formatters={'Cases': 'printf', '@{cases}': cases_custom, '@date': 'datetime'}, mode = mode,
point_policy="snap_to_data") # ,PanTool())
panels = []
for axis_type in self.ax_type:
standardfig = self.standardfig( y_axis_type = axis_type, x_axis_type = 'datetime', **kwargs)
standardfig.yaxis[0].formatter = PrintfTickFormatter(format = "%4.2e")
standardfig.add_tools(hover_tool)
if guideline:
cross= CrosshairTool()
standardfig.add_tools(cross)
def add_line(src, options, init, color):
s = Select(options = options, value = init)
r = standardfig.line(x = 'date', y = 'cases', source = src, line_width = 3, line_color = color)
li = LegendItem(label = init, renderers = [r])
s.js_on_change('value', CustomJS(args=dict(s0=source, s1=src, li=li),
code="""
var c = cb_obj.value;
var y = s0.data[c];
s1.data['cases'] = y;
li.label = {value: cb_obj.value};
s1.change.emit();
"""))
return s, li
s1, li1 = add_line(src1, uniqloc, uniqloc[0], self.scolors[0])
s2, li2 = add_line(src2, uniqloc, uniqloc[1], self.scolors[1])
standardfig.add_layout(Legend(items = [li1, li2]))
standardfig.legend.location = 'top_left'
layout = row(column(row(s1, s2), row(standardfig)))
panel = Panel(child = layout, title = axis_type)
panels.append(panel)
tabs = Tabs(tabs = panels)
label = standardfig.title
return tabs
''' YEARLY PLOT '''
@decowrapper
@decoplot
def pycoa_yearly_plot(self, input = None, input_field = None, **kwargs):
'''
-----------------
Create a date plot according to arguments. See help(pycoa_date_plot).
Keyword arguments
-----------------
- input = None : if None take first element. A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- guideline = False
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
guideline = kwargs.get('guideline',self.dvisu_default['guideline'])
if len(input.clustername.unique()) > 1 :
print('Can only display yearly plot for ONE location. I took the first one:', input.clustername[0])
input = input.loc[input.clustername == input.clustername[0]].copy()
panels = []
listfigs = []
cases_custom = CocoDisplay.rollerJS()
input['date']=pd.to_datetime(input["date"])
#drop bissextile fine tuning in needed in the future
input = input.loc[~(input['date'].dt.month.eq(2) & input['date'].dt.day.eq(29))].reset_index(drop=True)
input = input.copy()
input.loc[:,'allyears']=input['date'].apply(lambda x : x.year)
input['allyears'] = input['allyears'].astype(int)
input.loc[:,'dayofyear']= input['date'].apply(lambda x : x.dayofyear)
allyears = list(input.allyears.unique())
if isinstance(input['rolloverdisplay'][0],list):
input['rolloverdisplay'] = input['clustername']
if len(input_field)>1:
CoaError('Only one variable could be displayed')
else:
input_field=input_field[0]
for axis_type in self.ax_type:
standardfig = self.standardfig( y_axis_type = axis_type,**kwargs)
i = 0
r_list=[]
maxou=-1000
input['cases']=input[input_field]
line_style = ['solid', 'dashed', 'dotted', 'dotdash']
colors = itertools.cycle(self.lcolors)
for loc in list(input.clustername.unique()):
for year in allyears:
input_filter = input.loc[(input.clustername == loc) & (input['date'].dt.year.eq(year))].reset_index(drop = True)
src = ColumnDataSource(input_filter)
leg = loc + ' ' + str(year)
r = standardfig.line(x = 'dayofyear', y = input_field, source = src,
color = next(colors), line_width = 3,
legend_label = leg,
hover_line_width = 4, name = input_field)
maxou=max(maxou,np.nanmax(input_filter[input_field].values))
label = input_field
tooltips = [('Location', '@rolloverdisplay'), ('date', '@date{%F}'), ('Cases', '@cases{0,0.0}')]
formatters = {'location': 'printf', '@date': 'datetime', '@name': 'printf'}
hover=HoverTool(tooltips = tooltips, formatters = formatters, point_policy = "snap_to_data", mode = kwargs['mode']) # ,PanTool())
standardfig.add_tools(hover)
if guideline:
cross= CrosshairTool()
standardfig.add_tools(cross)
if axis_type == 'linear':
if maxou < 1e4 :
standardfig.yaxis.formatter = BasicTickFormatter(use_scientific=False)
standardfig.legend.label_text_font_size = "12px"
panel = Panel(child=standardfig, title = axis_type)
panels.append(panel)
standardfig.legend.background_fill_alpha = 0.6
standardfig.legend.location = "top_left"
standardfig.legend.click_policy="hide"
labelspd=input.loc[(input.allyears.eq(2021)) & (input.date.dt.day.eq(1))]
standardfig.xaxis.ticker = list(labelspd['dayofyear'].astype(int))
replacelabelspd = labelspd['date'].apply(lambda x: str(x.strftime("%b")))
#label_dict = dict(zip(input.loc[input.allyears.eq(2020)]['daymonth'],input.loc[input.allyears.eq(2020)]['date'].apply(lambda x: str(x.day)+'/'+str(x.month))))
standardfig.xaxis.major_label_overrides = dict(zip(list(labelspd['dayofyear'].astype(int)),list(replacelabelspd)))
CocoDisplay.bokeh_legend(standardfig)
listfigs.append(standardfig)
tooltips = [('Location', '@rolloverdisplay'), ('date', '@date{%F}'), (r.name, '@$name{0,0.0}')]
formatters = {'location': 'printf', '@date': 'datetime', '@name': 'printf'}
hover=HoverTool(tooltips = tooltips, formatters = formatters, point_policy = "snap_to_data", mode = kwargs['mode'], renderers=[r]) # ,PanTool())
standardfig.add_tools(hover)
if guideline:
cross= CrosshairTool()
standardfig.add_tools(cross)
self.set_listfigures(listfigs)
tabs = Tabs(tabs = panels)
return tabs
''' DECORATORS FOR HISTO VERTICAL, HISTO HORIZONTAL, PIE & MAP'''
def decohistomap(func):
"""
Decorator function used for histogram and map
"""
@wraps(func)
def inner_hm(self, input = None, input_field = None, **kwargs):
tile = kwargs.get('tile', self.dvisu_default['tile'])
maplabel = kwargs.get('maplabel', None)
if not isinstance(maplabel,list):
maplabel=[maplabel]
#if maplabel:
# maplabel = maplabel
if 'map' in func.__name__:
kwargs['maplabel'] = maplabel
orientation = kwargs.get('orientation', self.dvisu_default['orientation'])
cursor_date = kwargs.get('cursor_date', None)
#if orientation:
# kwargs['orientation'] = orientation
#kwargs['cursor_date'] = kwargs.get('cursor_date', self.dvisu_default['cursor_date'])
if isinstance(input['location'].iloc[0],list):
input['rolloverdisplay'] = input['clustername']
input = input.explode('location')
else:
input['rolloverdisplay'] = input['location']
uniqloc = input.clustername.unique()
geopdwd = input
if maplabel and 'unsorted' in maplabel:
pass
else:
geopdwd = geopdwd.sort_values(by=input_field, ascending = False).reset_index(drop=True)
started = geopdwd.date.min()
ended = geopdwd.date.max()
if cursor_date:
date_slider = DateSlider(title = "Date: ", start = started, end = ended,
value = ended, step=24 * 60 * 60 * 1000, orientation = orientation)
#wanted_date = date_slider.value_as_datetime.date()
#if func.__name__ == 'pycoa_mapfolium' or func.__name__ == 'pycoa_map' or func.__name__ == 'innerdecomap' or func.__name__ == 'innerdecopycoageo':
if func.__name__ in ['pycoa_mapfolium','pycoa_map','pycoageo' ,'pycoa_pimpmap']:
if isinstance(input.location.to_list()[0],list):
geom = self.location_geometry
geodic={loc:geom.loc[geom.location==loc]['geometry'].values[0] for loc in geopdwd.location.unique()}
geopdwd['geometry'] = geopdwd['location'].map(geodic)
else:
geopdwd = pd.merge(geopdwd, self.location_geometry, on='location')
kwargs['tile'] = tile
if self.iso3country in ['USA']:#['FRA','USA']
geo = copy.deepcopy(self.geo)
d = geo._list_translation
if func.__name__ != 'pycoa_mapfolium':
if any(i in list(geopdwd.codelocation.unique()) for i in d.keys()) \
or any(True for i in d.keys() if ''.join(list(geopdwd.codelocation.unique())).find(i)!=-1):
geo.set_dense_geometry()
kwargs.pop('tile')
else:
geo.set_main_geometry()
d = {}
new_geo = geo.get_data()[['name_'+self.granularity,'geometry']]
new_geo = new_geo.rename(columns={'name_'+self.granularity:'location'})
new_geo = new_geo.set_index('location')['geometry'].to_dict()
geopdwd['geometry'] = geopdwd['location'].map(new_geo)
geopdwd = gpd.GeoDataFrame(geopdwd, geometry=geopdwd.geometry, crs="EPSG:4326")
if func.__name__ == 'pycoa_histo':
pos = {}
new = pd.DataFrame()
n = 0
for i in uniqloc:
perloc = geopdwd.loc[geopdwd.clustername == i]
if all(perloc != 0):
pos = perloc.index[0]
if new.empty:
new = perloc
else:
new = new.append(perloc)
n += 1
geopdwd = new.reset_index(drop=True)
if cursor_date:
date_slider = date_slider
else:
date_slider = None
kwargs['date_slider'] = date_slider
return func(self, geopdwd, input_field, **kwargs)
return inner_hm
''' VERTICAL HISTO '''
@decowrapper
@decohistomap
def pycoa_histo(self, geopdwd, input_field = None, **kwargs):
'''
-----------------
Create 1D histogramme by value according to arguments.
See help(pycoa_histo).
Keyword arguments
-----------------
- geopdwd : A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
geopdwd_filter = geopdwd.loc[geopdwd.date == self.when_end]
geopdwd_filter = geopdwd_filter.reset_index(drop = True)
input = geopdwd_filter.rename(columns = {'cases': input_field})
bins = kwargs.get('bins', None)
if 'location' in input.columns:
uniqloc = list(input.clustername.unique())
allval = input.loc[input.clustername.isin(uniqloc)][['clustername', input_field,'permanentdisplay']]
min_val = allval[input_field].min()
max_val = allval[input_field].max()
if bins:
bins = bins
else:
if len(uniqloc) == 1:
bins = 2
min_val = 0.
else:
bins = 11
delta = (max_val - min_val ) / bins
interval = [ min_val + i*delta for i in range(bins+1)]
contributors = { i : [] for i in range(bins+1)}
for i in range(len(allval)):
rank = bisect.bisect_left(interval, allval.iloc[i][input_field])
if rank == bins+1:
rank = bins
contributors[rank].append(allval.iloc[i]['clustername'])
colors = itertools.cycle(self.lcolors)
lcolors = [next(colors) for i in range(bins+1)]
contributors = dict(sorted(contributors.items()))
frame_histo = pd.DataFrame({
'left': [0]+interval[:-1],
'right':interval,
'middle_bin': [format((i+j)/2, ".1f") for i,j in zip([0]+interval[:-1],interval)],
'top': [len(i) for i in list(contributors.values())],
'contributors': [', '.join(i) for i in contributors.values()],
'colors': lcolors})
#tooltips = """
#<div style="width: 400px">
#<b>Middle value:</b> @middle_bin <br>
#<b>Contributors:</b> @contributors{safe} <br>
#</div>
#"""
tooltips = """
<b>Middle value:</b> @middle_bin <br>
<b>Contributors:</b> @contributors{safe} <br>
"""
hover_tool = HoverTool(tooltips = tooltips)
panels = []
bottom = 0
x_axis_type, y_axis_type, axis_type_title = 3 * ['linear']
for axis_type in ["linear", "linlog", "loglin", "loglog"]:
if axis_type == 'linlog':
y_axis_type, axis_type_title = 'log', 'logy'
if axis_type == 'loglin':
x_axis_type, y_axis_type, axis_type_title = 'log', 'linear', 'logx'
if axis_type == 'loglog':
x_axis_type, y_axis_type = 'log', 'log'
axis_type_title = 'loglog'
standardfig = self.standardfig(x_axis_type=x_axis_type, y_axis_type=y_axis_type, **kwargs)
standardfig.yaxis[0].formatter = PrintfTickFormatter(format = "%4.2e")
standardfig.xaxis[0].formatter = PrintfTickFormatter(format="%4.2e")
standardfig.add_tools(hover_tool)
standardfig.x_range = Range1d(1.05 * interval[0], 1.05 * interval[-1])
standardfig.y_range = Range1d(0, 1.05 * frame_histo['top'].max())
if x_axis_type == "log":
left = 0.8
if frame_histo['left'][0] <= 0:
frame_histo.at[0, 'left'] = left
else:
left = frame_histo['left'][0]
standardfig.x_range = Range1d(left, 10 * interval[-1])
if y_axis_type == "log":
bottom = 0.0001
standardfig.y_range = Range1d(0.001, 10 * frame_histo['top'].max())
standardfig.quad(source=ColumnDataSource(frame_histo), top='top', bottom=bottom, left='left', \
right='right', fill_color='colors')
panel = Panel(child=standardfig, title=axis_type_title)
panels.append(panel)
tabs = Tabs(tabs=panels)
return tabs
''' DECORATORS FOR HISTO VERTICAL, HISTO HORIZONTAL, PIE '''
def decohistopie(func):
@wraps(func)
def inner_decohistopie(self, geopdwd, input_field, **kwargs):
"""
Decorator for
Horizontal histogram & Pie Chart
"""
geopdwd['cases'] = geopdwd[input_field]
maplabel = kwargs.get('maplabel',None)
plot_width = kwargs.get('plot_width',self.dfigure_default['plot_width'])
plot_height = kwargs.get('plot_height',self.dfigure_default['plot_height'])
geopdwd_filter = geopdwd.loc[geopdwd.date == self.when_end]
geopdwd_filter = geopdwd_filter.reset_index(drop = True)
geopdwd_filter['cases'] = geopdwd_filter[input_field]
cursor_date = kwargs.get('cursor_date',self.dvisu_default['cursor_date'])
date_slider = kwargs['date_slider']
my_date = geopdwd.date.unique()
dico_utc = {i: DateSlider(value=i).value for i in my_date}
geopdwd['date_utc'] = [dico_utc[i] for i in geopdwd.date]
#geopdwd = geopdwd.drop_duplicates(["date", "codelocation","clustername"])#for sumall avoid duplicate
#geopdwd_filter = geopdwd_filter.drop_duplicates(["date", "codelocation","clustername"])
geopdwd = geopdwd.drop_duplicates(["date","clustername"])#for sumall avoid duplicate
geopdwd_filter = geopdwd_filter.drop_duplicates(["date","clustername"])
locunique = geopdwd_filter.clustername.unique()#geopdwd_filtered.location.unique()
geopdwd_filter = geopdwd_filter.copy()
nmaxdisplayed = MAXCOUNTRIESDISPLAYED
if len(locunique) >= nmaxdisplayed :#and func.__name__ != 'pycoa_pie' :
if func.__name__ != 'pycoa_pie' :
geopdwd_filter = geopdwd_filter.loc[geopdwd_filter.clustername.isin(locunique[:nmaxdisplayed])]
else:
geopdwd_filter_first = geopdwd_filter.loc[geopdwd_filter.clustername.isin(locunique[:nmaxdisplayed-1])]
geopdwd_filter_other = geopdwd_filter.loc[geopdwd_filter.clustername.isin(locunique[nmaxdisplayed-1:])]
geopdwd_filter_other = geopdwd_filter_other.groupby('date').sum()
geopdwd_filter_other['location'] = 'others'
geopdwd_filter_other['clustername'] = 'others'
geopdwd_filter_other['codelocation'] = 'others'
geopdwd_filter_other['permanentdisplay'] = 'others'
geopdwd_filter_other['rolloverdisplay'] = 'others'
geopdwd_filter_other['colors'] = '#FFFFFF'
geopdwd_filter = geopdwd_filter_first
geopdwd_filter = geopdwd_filter.append(geopdwd_filter_other)
if func.__name__ == 'pycoa_horizonhisto' :
#geopdwd_filter['bottom'] = geopdwd_filter.index
geopdwd_filter['left'] = geopdwd_filter['cases']
geopdwd_filter['right'] = geopdwd_filter['cases']
geopdwd_filter['left'] = geopdwd_filter['left'].apply(lambda x: 0 if x > 0 else x)
geopdwd_filter['right'] = geopdwd_filter['right'].apply(lambda x: 0 if x < 0 else x)
n = len(geopdwd_filter.index)
d = plot_height / n
ymax = plot_height
geopdwd_filter['top'] = [ymax*(n-i)/n + d/2 for i in range(n)]
geopdwd_filter['bottom'] = [ymax*(n-i)/n - d/2 for i in range(n)]
geopdwd_filter['horihistotexty'] = geopdwd_filter['bottom'] + d/2
geopdwd_filter['horihistotextx'] = geopdwd_filter['right']
if maplabel and 'label%' in maplabel:
geopdwd_filter['right'] = geopdwd_filter['right'].apply(lambda x: 100.*x)
geopdwd_filter['horihistotextx'] = geopdwd_filter['right']
geopdwd_filter['horihistotext'] = [str(round(i))+'%' for i in geopdwd_filter['right']]
else:
geopdwd_filter['horihistotext'] = [ '{:.3g}'.format(float(i)) if float(i)>1.e4 else round(float(i),2) for i in geopdwd_filter['right'] ]
geopdwd_filter['horihistotext'] = [str(i) for i in geopdwd_filter['horihistotext']]
if func.__name__ == 'pycoa_pie' :
geopdwd_filter = self.add_columns_for_pie_chart(geopdwd_filter,input_field)
geopdwd = self.add_columns_for_pie_chart(geopdwd,input_field)
if maplabel and 'label%' in maplabel:
geopdwd_filter['textdisplayed2'] = geopdwd_filter['percentage']
geopdwd['textdisplayed2'] = geopdwd['percentage']
source = ColumnDataSource(data = geopdwd)
input_filter = geopdwd_filter
srcfiltered = ColumnDataSource(data = input_filter)
max_value = max(input_filter['cases'])
min_value = min(input_filter['cases'])
min_value_gt0 = min(input_filter[input_filter['cases'] > 0]['cases'])
panels = []
for axis_type in self.ax_type:
plot_width = kwargs['plot_width']
plot_height = kwargs['plot_height']
standardfig = self.standardfig( x_axis_type = axis_type, x_range = (1.05*min_value, 1.05 * max_value),**kwargs)
if maplabel and 'label%' in maplabel:
standardfig.x_range = Range1d(0.01, 1.2 * max_value*100)
standardfig.xaxis.axis_label = 'percentage(%)'
standardfig.xaxis.formatter = BasicTickFormatter(use_scientific=False)
else:
standardfig.xaxis[0].formatter = PrintfTickFormatter(format="%4.2e")
standardfig.x_range = Range1d(0.01, 1.2 * max_value)
if not input_filter[input_filter[input_field] < 0.].empty:
standardfig.x_range = Range1d(1.2 * min_value, 1.2 * max_value)
if axis_type == "log":
if not input_filter[input_filter[input_field] < 0.].empty:
print('Some value are negative, can\'t display log scale in this context')
else:
if func.__name__ == 'pycoa_horizonhisto' :
if maplabel and 'label%' in maplabel:
standardfig.x_range = Range1d(0.01, 50 * max_value*100)
else:
standardfig.x_range = Range1d(0.01, 50 * max_value)
srcfiltered.data['left'] = [0.01] * len(srcfiltered.data['right'])
if func.__name__ == 'pycoa_pie':
if not input_filter[input_filter[input_field] < 0.].empty:
raise CoaKeyError('Some values are negative, can\'t display a Pie chart, try histo by location')
standardfig.plot_width = plot_height
standardfig.plot_height = plot_height
if date_slider:
date_slider.width = int(0.8*plot_width)
callback = CustomJS(args = dict(source = source,
source_filter = srcfiltered,
date_slider = date_slider,
ylabel = standardfig.yaxis[0],
title = standardfig.title,
x_range = standardfig.x_range,
x_axis_type = axis_type,
figure = standardfig),
code = """
var date_slide = date_slider.value;
var dates = source.data['date_utc'];
var val = source.data['cases'];
var loc = source.data['clustername'];
//var loc = source.data['location'];
var subregion = source.data['name_subregion'];
var codeloc = source.data['codelocation'];
var colors = source.data['colors'];
var newval = [];
var newloc = [];
var newcolors = [];
var newcodeloc = [];
var newname_subregion = [];
var labeldic = {};
for (var i = 0; i < dates.length; i++){
if (dates[i] == date_slide){
newval.push(parseFloat(val[i]));
newloc.push(loc[i]);
newcodeloc.push(codeloc[i]);
newcolors.push(colors[i]);
if(typeof subregion !== 'undefined')
newname_subregion.push(subregion[i]);
}
}
var len = source_filter.data['clustername'].length;
var indices = new Array(len);
for (var i = 0; i < len; i++) indices[i] = i;
indices.sort(function (a, b) { return newval[a] > newval[b] ? -1 : newval[a] < newval[b] ? 1 : 0; });
var orderval = [];
var orderloc = [];
var ordercodeloc = [];
var ordername_subregion = [];
var ordercolors = [];
var textdisplayed = [];
for (var i = 0; i < len; i++)
{
orderval.push(newval[indices[i]]);
orderloc.push(newloc[indices[i]]);
ordercodeloc.push(newcodeloc[indices[i]]);
if(typeof subregion !== 'undefined')
ordername_subregion.push(newname_subregion[i]);
ordercolors.push(newcolors[indices[i]]);
//labeldic[len-indices[i]] = newcodeloc[indices[i]];
textdisplayed.push(newcodeloc[indices[i]].padStart(40,' '));
}
source_filter.data['cases'] = orderval;
const reducer = (accumulator, currentValue) => accumulator + currentValue;
var tot = orderval.reduce(reducer);
var top = [];
var bottom = [];
var starts = [];
var ends = [];
var middle = [];
var text_x = [];
var text_y = [];
var r = 0.7;
var bthick = 0.95;
var cumul = 0.;
var percentage = [];
var angle = [];
var text_size = [];
var left_quad = [];
var right_quad = [];
for(var i = 0; i < orderval.length; i++)
{
cumul += ((orderval[i] / tot) * 2 * Math.PI);
ends.push(cumul);
if(i==0)
starts.push(0);
else
starts.push(ends[i-1]);
middle.push((ends[i]+starts[i])/2);
text_x.push(r*Math.cos(middle[i]));
text_y.push(r*Math.sin(middle[i]));
percentage.push(String(100.*orderval[i] / tot).slice(0, 4));
angle.push((orderval[i] / tot) * 2 * Math.PI)
/*if ((ends[i]-starts[i]) > 0.08*(2 * Math.PI))
text_size.push('10pt');
else
text_size.push('6pt');*/
//top.push((orderval.length-i) + bthick/2);
//bottom.push((orderval.length-i) - bthick/2);
if (isNaN(orderval[i])) orderval[i] = 0.;
if(orderval[i]<=0.)
{
left_quad.push(orderval[i]);
right_quad.push(0.);
}
else
{
left_quad.push(0);
right_quad.push(orderval[i]);
}
}
source_filter.data['clustername'] = orderloc;
source_filter.data['codelocation'] = ordercodeloc;
//source_filter.data['colors'] = ordercolors;
if(typeof subregion !== 'undefined')
source_filter.data['rolloverdisplay'] = ordername_subregion;
else
source_filter.data['rolloverdisplay'] = orderloc;
source_filter.data['ends'] = ends;
source_filter.data['starts'] = starts;
source_filter.data['middle'] = middle;
source_filter.data['text_x'] = text_x;
source_filter.data['text_y'] = text_y;
//source_filter.data['text_size'] = text_size;
source_filter.data['percentage'] = percentage;
source_filter.data['angle'] = angle;
source_filter.data['left'] = left_quad;
source_filter.data['right'] = right_quad;
var mid =[];
var ht = [];
var textdisplayed2 = [];
var n = right_quad.length;
var d = figure.plot_height / n;
var ymax = figure.plot_height;
for(i=0; i<right_quad.length;i++){
top.push(parseInt(ymax*(n-i)/n+d/2));
bottom.push(parseInt(ymax*(n-i)/n-d/2));
mid.push(parseInt(ymax*(n-i)/n));
labeldic[parseInt(ymax*(n-i)/n)] = ordercodeloc[i];
ht.push(right_quad[i].toFixed(2).toString());
var a=new Intl.NumberFormat().format(right_quad[i])
textdisplayed2.push(a.toString().padStart(26,' '));
//textdisplayed2.push(right_quad[i].toFixed(2).toString().padStart(40,' '));
}
source_filter.data['top'] = top;
source_filter.data['bottom'] = bottom;
source_filter.data['horihistotextxy'] = mid;
source_filter.data['horihistotextx'] = right_quad;
source_filter.data['horihistotext'] = ht;
source_filter.data['permanentdisplay'] = ordercodeloc;
source_filter.data['textdisplayed'] = textdisplayed;
source_filter.data['textdisplayed2'] = textdisplayed2;
var maxx = Math.max.apply(Math, right_quad);
var minx = Math.min.apply(Math, left_quad);
ylabel.major_label_overrides = labeldic;
console.log(labeldic);
x_range.end = 1.2 * maxx;
x_range.start = 1.05 * minx;
if(minx >= 0){
x_range.start = 0.01;
source_filter.data['left'] = Array(left_quad.length).fill(0.01);
}
var tmp = title.text;
tmp = tmp.slice(0, -11);
var dateconverted = new Date(date_slide);
var dd = String(dateconverted.getDate()).padStart(2, '0');
var mm = String(dateconverted.getMonth() + 1).padStart(2, '0'); //January is 0!
var yyyy = dateconverted.getFullYear();
var dmy = dd + '/' + mm + '/' + yyyy;
title.text = tmp + dmy+")";
source_filter.change.emit();
""")
date_slider.js_on_change('value', callback)
cases_custom = CocoDisplay.rollerJS()
if func.__name__ == 'pycoa_pie' :
standardfig.add_tools(HoverTool(
tooltips=[('Location', '@rolloverdisplay'), (input_field, '@cases{0,0.0}'), ('%','@percentage'), ],
formatters={'location': 'printf', '@{' + 'cases' + '}': cases_custom, '%':'printf'},
point_policy="snap_to_data")) # ,PanTool())
else:
standardfig.add_tools(HoverTool(
tooltips=[('Location', '@rolloverdisplay'), (input_field, '@cases{0,0.0}'), ],
formatters={'location': 'printf', '@{' + 'cases' + '}': cases_custom, },
point_policy="snap_to_data")) # ,PanTool())
panel = Panel(child = standardfig, title = axis_type)
panels.append(panel)
return func(self, srcfiltered, panels, date_slider)
return inner_decohistopie
''' VERTICAL HISTO '''
@decowrapper
@decohistomap
@decohistopie
def pycoa_horizonhisto(self, srcfiltered, panels, date_slider):
'''
-----------------
Create 1D histogramme by location according to arguments.
See help(pycoa_histo).
Keyword arguments
-----------------
- srcfiltered : A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
n = len(panels)
new_panels = []
for i in range(n):
fig = panels[i].child
fig.y_range = Range1d(min(srcfiltered.data['bottom']), max(srcfiltered.data['top']))
fig.yaxis[0].formatter = NumeralTickFormatter(format="0.0")
ytick_loc = [int(i) for i in srcfiltered.data['horihistotexty']]
fig.yaxis.ticker = ytick_loc
label_dict = dict(zip(ytick_loc,srcfiltered.data['permanentdisplay']))
fig.yaxis.major_label_overrides = label_dict
#print(fig.y_range ,fig.yaxis.major_label_overrides)
fig.quad(source = srcfiltered,
top='top', bottom = 'bottom', left = 'left', right = 'right', color = 'colors', line_color = 'black',
line_width = 1, hover_line_width = 2)
labels = LabelSet(
x = 'horihistotextx',
y = 'horihistotexty',
x_offset=5,
y_offset=-4,
text = 'horihistotext',
source = srcfiltered,text_font_size='10px',text_color='black')
fig.add_layout(labels)
panel = Panel(child = fig, title = panels[i].title)
new_panels.append(panel)
tabs = Tabs(tabs = new_panels)
if date_slider:
tabs = column(date_slider,tabs)
return tabs
''' PIE '''
def add_columns_for_pie_chart(self,df,column_name):
df = df.copy()
column_sum = df[column_name].sum()
df['percentage'] = df[column_name]/column_sum
percentages = [0] + df['percentage'].cumsum().tolist()
df['angle'] = (df[column_name]/column_sum)*2 * np.pi
df['starts'] = [p * 2 * np.pi for p in percentages[:-1]]
df['ends'] = [p * 2 * np.pi for p in percentages[1:]]
df['diff'] = (df['ends'] - df['starts'])
df['middle'] = df['starts']+np.abs(df['ends']-df['starts'])/2.
df['cos'] = np.cos(df['middle']) * 0.9
df['sin'] = np.sin(df['middle']) * 0.9
df['text_size'] = '8pt'
df['textdisplayed'] = df['permanentdisplay'].str.pad(36, side = "left")
locale.setlocale(locale.LC_ALL, 'en_US')
df['textdisplayed2'] = [ locale.format("%d", i, grouping=True)\
for i in df[column_name]]
#df['textdisplayed2'] = df[column_name].astype(str) #[i.str for i in df[column_name]]
df['textdisplayed2'] = df['textdisplayed2'].str.pad(26, side = "left")
#df['textdisplayed2'] = df[column_name].str.pad(26, side = "left")
df.loc[df['diff'] <= np.pi/20,'textdisplayed']=''
df.loc[df['diff'] <= np.pi/20,'textdisplayed2']=''
df['percentage'] = 100.*df['percentage']
return df
@decowrapper
@decohistomap
@decohistopie
def pycoa_pie(self, srcfiltered, panels, date_slider):
'''
-----------------
Create a pie chart according to arguments.
See help(pycoa_pie).
Keyword arguments
-----------------
- srcfiltered : A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- cursor_date = None if True
- orientation = horizontal
'''
standardfig = panels[0].child
standardfig.plot_height=400
standardfig.plot_width=400
standardfig.x_range = Range1d(-1.1, 1.1)
standardfig.y_range = Range1d(-1.1, 1.1)
standardfig.axis.visible = False
standardfig.xgrid.grid_line_color = None
standardfig.ygrid.grid_line_color = None
standardfig.wedge(x=0, y=0, radius=1.,line_color='#E8E8E8',
start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
fill_color='colors', legend_label='clustername', source=srcfiltered)
standardfig.legend.visible = False
labels = LabelSet(x=0, y=0,text='textdisplayed',angle=cumsum('angle', include_zero=True),
text_font_size="10pt",source=srcfiltered,render_mode='canvas')
labels2 = LabelSet(x=0, y=0, text='textdisplayed2',
angle=cumsum('angle', include_zero=True),text_font_size="8pt",source=srcfiltered)
standardfig.add_layout(labels)
standardfig.add_layout(labels2)
if date_slider:
standardfig = column(date_slider,standardfig)
return standardfig
''' MAP FOLIUM '''
@decowrapper
@decohistomap
def pycoa_mapfolium(self, geopdwd, input_field, **kwargs):
'''
-----------------
Create a map folium to arguments.
See help(pycoa_histo).
Keyword arguments
-----------------
- srcfiltered : A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
title = kwargs.get('title', None)
tile = kwargs.get('tile', self.dvisu_default['tile'])
tile = CocoDisplay.convert_tile(tile, 'folium')
maplabel = kwargs.get('maplabel',self.dvisu_default['maplabel'])
plot_width = kwargs.get('plot_width',self.dfigure_default['plot_width'])
plot_height = kwargs.get('plot_height',self.dfigure_default['plot_height'])
geopdwd['cases'] = geopdwd[input_field]
geopdwd_filtered = geopdwd.loc[geopdwd.date == self.when_end]
geopdwd_filtered = geopdwd_filtered.reset_index(drop = True)
geopdwd_filtered['cases'] = geopdwd_filtered[input_field]
my_date = geopdwd.date.unique()
dico_utc = {i: DateSlider(value=i).value for i in my_date}
geopdwd['date_utc'] = [dico_utc[i] for i in geopdwd.date]
#geopdwd = geopdwd.drop_duplicates(["date", "codelocation","clustername"])#for sumall avoid duplicate
#geopdwd_filtered = geopdwd_filtered.sort_values(by='cases', ascending = False).reset_index()
#locunique = geopdwd_filtered.clustername.unique()#geopdwd_filtered.location.unique()
if self.database_name == 'risklayer':
geopdwd_filtered = geopdwd_filtered.loc[geopdwd_filtered.geometry.notna()]
uniqloc = list(geopdwd_filtered.codelocation.unique())
geopdwd_filtered = geopdwd_filtered.drop(columns=['date', 'colors'])
msg = "(data from: {})".format(self.database_name)
minx, miny, maxx, maxy = geopdwd_filtered.total_bounds
mapa = folium.Map(tiles=tile, attr='<a href=\"http://pycoa.fr\"> ©pycoa.fr </a>' + msg)
#min_lat=minx, max_lat=maxx, min_lon=miny, max_lon=maxy)
#location=[geopdwd_filtered.centroid.y.mean(),geopdwd_filtered.centroid.x.mean()],)
if self.dbld[self.database_name][0] != 'WW':
mapa.fit_bounds([(miny, minx), (maxy, maxx)])
fig = Figure(width=plot_width, height=plot_height)
fig.add_child(mapa)
min_col, max_col = CocoDisplay.min_max_range(np.nanmin(geopdwd_filtered[input_field]),
np.nanmax(geopdwd_filtered[input_field]))
min_col_non0 = (np.nanmin(geopdwd_filtered.loc[geopdwd_filtered['cases']>0.]['cases']))
invViridis256 = Viridis256[::-1]
if 'log' in maplabel:
geopdwd_filtered['cases'] = geopdwd_filtered.loc[geopdwd_filtered['cases']>0]['cases']
color_mapper = LinearColorMapper(palette=invViridis256, low=min_col_non0, high=max_col, nan_color='#d9d9d9')
colormap = branca.colormap.LinearColormap(color_mapper.palette).to_step(data=list(geopdwd_filtered['cases']),n=10,method='log')
else:
color_mapper = LinearColorMapper(palette=invViridis256, low=min_col, high=max_col, nan_color='#d9d9d9')
colormap = branca.colormap.LinearColormap(color_mapper.palette).scale(min_col, max_col)
colormap.caption = title
colormap.add_to(mapa)
map_id = colormap.get_name()
custom_label_colorbar_js = """
var div = document.getElementById('legend');
var ticks = document.getElementsByClassName('tick')
for(var i = 0; i < ticks.length; i++){
var values = ticks[i].textContent.replace(',','')
val = parseFloat(values).toExponential(1).toString().replace("+", "")
if(parseFloat(ticks[i].textContent) == 0) val = 0.
div.innerHTML = div.innerHTML.replace(ticks[i].textContent,val);
}
"""
e = Element(custom_label_colorbar_js)
html = colormap.get_root()
html.script.get_root().render()
html.script._children[e.get_name()] = e
geopdwd_filtered[input_field + 'scientific_format'] = \
(['{:.5g}'.format(i) for i in geopdwd_filtered['cases']])
# (['{:.3g}'.format(i) if i>100000 else i for i in geopdwd_filter[input_field]])
map_dict = geopdwd_filtered.set_index('location')[input_field].to_dict()
if np.nanmin(geopdwd_filtered[input_field]) == np.nanmax(geopdwd_filtered[input_field]):
map_dict['FakeCountry'] = 0.
if 'log' in maplabel:
color_scale = branca.colormap.LinearColormap(color_mapper.palette).to_step(data=list(geopdwd_filtered['cases']),n=10,method='log')
else:
color_scale = LinearColormap(color_mapper.palette, vmin=min(map_dict.values()), vmax=max(map_dict.values()))
def get_color(feature):
value = map_dict.get(feature['properties']['location'])
if value is None or np.isnan(value):
return '#8c8c8c' # MISSING -> gray
else:
return color_scale(value)
displayed = 'rolloverdisplay'
folium.GeoJson(
geopdwd_filtered,
style_function=lambda x:
{
'fillColor': get_color(x),
'fillOpacity': 0.8,
'color': None
},
highlight_function=lambda x: {'weight': 2, 'color': 'green'},
tooltip=folium.features.GeoJsonTooltip(fields=[displayed, input_field + 'scientific_format'],
aliases=['location' + ':', input_field + ":"],
style="""
background-color: #F0EFEF;
border: 2px solid black;
border-radius: 3px;
box-shadow: 3px;
opacity: 0.2;
"""),
# '<div style="barialckground-color: royalblue 0.2; color: black; padding: 2px; border: 1px solid black; border-radius: 2px;">'+input_field+'</div>'])
).add_to(mapa)
return mapa
''' DECORATOR FOR MAP BOKEH '''
def decopycoageo(func):
@wraps(func)
def innerdecopycoageo(self, geopdwd, input_field, **kwargs):
geopdwd['cases'] = geopdwd[input_field]
geopdwd_filtered = geopdwd.loc[geopdwd.date == self.when_end]
geopdwd_filtered = geopdwd_filtered.reset_index(drop = True)
geopdwd_filtered = gpd.GeoDataFrame(geopdwd_filtered, geometry=geopdwd_filtered.geometry, crs="EPSG:4326")
geopdwd = geopdwd.sort_values(by=['clustername', 'date'], ascending = [True, False])
geopdwd_filtered = geopdwd_filtered.sort_values(by=['clustername', 'date'], ascending = [True, False]).drop(columns=['date', 'colors'])
new_poly = []
geolistmodified = dict()
for index, row in geopdwd_filtered.iterrows():
split_poly = []
new_poly = []
if row['geometry']:
for pt in self.get_polycoords(row):
if type(pt) == tuple:
new_poly.append(CocoDisplay.wgs84_to_web_mercator(pt))
elif type(pt) == list:
shifted = []
for p in pt:
shifted.append(CocoDisplay.wgs84_to_web_mercator(p))
new_poly.append(sg.Polygon(shifted))
else:
raise CoaTypeError("Neither tuple or list don't know what to do with \
your geometry description")
if type(new_poly[0]) == tuple:
geolistmodified[row['location']] = sg.Polygon(new_poly)
else:
geolistmodified[row['location']] = sg.MultiPolygon(new_poly)
ng = pd.DataFrame(geolistmodified.items(), columns=['location', 'geometry'])
geolistmodified = gpd.GeoDataFrame({'location': ng['location'], 'geometry': gpd.GeoSeries(ng['geometry'])}, crs="epsg:3857")
geopdwd_filtered = geopdwd_filtered.drop(columns='geometry')
geopdwd_filtered = pd.merge(geolistmodified, geopdwd_filtered, on='location')
#if kwargs['wanted_dates']:
# kwargs.pop('wanted_dates')
return func(self, geopdwd, geopdwd_filtered, **kwargs)
return innerdecopycoageo
''' RETURN GEOMETRY, LOCATIO + CASES '''
@decowrapper
@decohistomap
@decopycoageo
def pycoageo(self, geopdwd, geopdwd_filtered, **kwargs):
return geopdwd_filtered
def decomap(func):
@wraps(func)
def innerdecomap(self, geopdwd, geopdwd_filtered, **kwargs):
title = kwargs.get('title', None)
maplabel = kwargs.get('maplabel',self.dvisu_default['maplabel'])
tile = kwargs.get('tile', None)
if tile:
tile = CocoDisplay.convert_tile(tile, 'bokeh')
uniqloc = list(geopdwd_filtered.clustername.unique())
dfLabel = pd.DataFrame()
sourcemaplabel = ColumnDataSource(dfLabel)
if maplabel or func.__name__ == 'pycoa_pimpmap':
locsum = geopdwd_filtered.clustername.unique()
numberpercluster = geopdwd_filtered['clustername'].value_counts().to_dict()
sumgeo = geopdwd_filtered.copy()
sumgeo['geometry'] = sumgeo['geometry'].buffer(0.001) #needed with geopandas 0.10.2
sumgeo = sumgeo.dissolve(by='clustername', aggfunc='sum').reset_index()
sumgeo['nb'] = sumgeo['clustername'].map(numberpercluster)
#print(geopdwd_filtered.loc[geopdwd_filtered.clustername=='Île-de-France'].reset_index(drop=True).explode(index_parts=False))
centrosx = sumgeo['geometry'].centroid.x
centrosy = sumgeo['geometry'].centroid.y
cases = sumgeo['cases']/sumgeo['nb']
dfLabel= | pd.DataFrame({'clustername':sumgeo.clustername,'centroidx':centrosx,'centroidy':centrosy,'cases':cases,'geometry':sumgeo['geometry']}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinkage
from recordlinkage.base import BaseCompareFeature
STRING_SIM_ALGORITHMS = [
'jaro', 'q_gram', 'cosine', 'jaro_winkler', 'dameraulevenshtein',
'levenshtein', 'lcs', 'smith_waterman'
]
NUMERIC_SIM_ALGORITHMS = ['step', 'linear', 'squared', 'exp', 'gauss']
FIRST_NAMES = [
u'Ronald', u'Amy', u'Andrew', u'William', u'Frank', u'Jessica', u'Kevin',
u'Tyler', u'Yvonne', nan
]
LAST_NAMES = [
u'Graham', u'Smith', u'Holt', u'Pope', u'Hernandez', u'Gutierrez',
u'Rivera', nan, u'Crane', u'Padilla'
]
STREET = [
u'<NAME>', nan, u'<NAME>', u'<NAME>', u'<NAME>',
u'<NAME>', u'Williams Trail', u'Durham Mountains', u'Anna Circle',
u'<NAME>'
]
JOB = [
u'Designer, multimedia', u'Designer, blown glass/stained glass',
u'Chiropractor', u'Engineer, mining', u'Quantity surveyor',
u'Phytotherapist', u'Teacher, English as a foreign language',
u'Electrical engineer', u'Research officer, government', u'Economist'
]
AGES = [23, 40, 70, 45, 23, 57, 38, nan, 45, 46]
# Run all tests in this file with:
# nosetests tests/test_compare.py
class TestData(object):
@classmethod
def setup_class(cls):
N_A = 100
N_B = 100
cls.A = DataFrame({
'age': np.random.choice(AGES, N_A),
'given_name': np.random.choice(FIRST_NAMES, N_A),
'lastname': np.random.choice(LAST_NAMES, N_A),
'street': np.random.choice(STREET, N_A)
})
cls.B = DataFrame({
'age': np.random.choice(AGES, N_B),
'given_name': np.random.choice(FIRST_NAMES, N_B),
'lastname': np.random.choice(LAST_NAMES, N_B),
'street': np.random.choice(STREET, N_B)
})
cls.A.index.name = 'index_df1'
cls.B.index.name = 'index_df2'
cls.index_AB = MultiIndex.from_arrays(
[arange(len(cls.A)), arange(len(cls.B))],
names=[cls.A.index.name, cls.B.index.name])
# Create a temporary directory
cls.test_dir = tempfile.mkdtemp()
@classmethod
def teardown_class(cls):
# Remove the test directory
shutil.rmtree(cls.test_dir)
class TestCompareApi(TestData):
"""General unittest for the compare API."""
def test_repr(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
c_str = str(comp)
c_repr = repr(comp)
assert c_str == c_repr
start_str = '<{}'.format(comp.__class__.__name__)
assert c_str.startswith(start_str)
def test_instance_linking(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A, self.B)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_instance_dedup(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_label_linking(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A, self.B)
assert "my_feature_label" in result.columns.tolist()
def test_label_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A)
assert "my_feature_label" in result.columns.tolist()
def test_multilabel_none_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_none_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_error_dedup(self):
def ones(s1, s2):
return np.ones((len(s1), 2))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones, 'given_name', 'given_name', label=['a', 'b', 'c'])
with pytest.raises(ValueError):
comp.compute(self.index_AB, self.A)
def test_incorrect_collabels_linking(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A, self.B)
def test_incorrect_collabels_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A)
def test_compare_custom_vectorized_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='my_feature_label')
result = comp.compute(ix, A, B)
expected = DataFrame(
[1, 1, 1, 1, 1], index=ix, columns=['my_feature_label'])
pdt.assert_frame_equal(result, expected)
# def test_compare_custom_nonvectorized_linking(self):
# A = DataFrame({'col': [1, 2, 3, 4, 5]})
# B = DataFrame({'col': [1, 2, 3, 4, 5]})
# ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# def custom_func(a, b):
# return np.int64(1)
# # test without label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix)
# pdt.assert_frame_equal(result, expected)
# # test with label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col',
# label='test'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
# pdt.assert_frame_equal(result, expected)
def test_compare_custom_instance_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def call(s1, s2):
# this should raise on incorrect types
assert isinstance(s1, np.ndarray)
assert isinstance(s2, np.ndarray)
return np.ones(len(s1), dtype=np.int)
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
# test with kwarg
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
x=5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='test')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_parallel_comparing_api(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
def test_parallel_comparing(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=4)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_4processes = comp.compute(self.index_AB, self.A, self.B)
result_4processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
pdt.assert_frame_equal(result_single, result_4processes)
def test_pickle(self):
# test if it is possible to pickle the Compare class
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.numeric('number', 'number')
comp.geo('lat', 'lng', 'lat', 'lng')
comp.date('before', 'after')
# do the test
pickle_path = os.path.join(self.test_dir, 'pickle_compare_obj.pickle')
pickle.dump(comp, open(pickle_path, 'wb'))
def test_manual_parallel_joblib(self):
# test if it is possible to pickle the Compare class
# This is only available for python 3. For python 2, it is not
# possible to pickle instancemethods. A workaround can be found at
# https://stackoverflow.com/a/29873604/8727928
if sys.version.startswith("3"):
# import joblib dependencies
from joblib import Parallel, delayed
# split the data into smaller parts
len_index = int(len(self.index_AB) / 2)
df_chunks = [self.index_AB[0:len_index], self.index_AB[len_index:]]
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.string('lastname', 'lastname')
comp.exact('street', 'street')
# do in parallel
Parallel(n_jobs=2)(
delayed(comp.compute)(df_chunks[i], self.A, self.B)
for i in [0, 1])
def test_indexing_types(self):
# test the two types of indexing
# this test needs improvement
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B_reversed = B[::-1].copy()
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
comp_label = recordlinkage.Compare(indexing_type='label')
comp_label.exact('col', 'col')
result_label = comp_label.compute(ix, A, B_reversed)
# test with position indexing type
comp_position = recordlinkage.Compare(indexing_type='position')
comp_position.exact('col', 'col')
result_position = comp_position.compute(ix, A, B_reversed)
assert (result_position.values == 1).all(axis=0)
pdt.assert_frame_equal(result_label, result_position)
def test_pass_list_of_features(self):
from recordlinkage.compare import FrequencyA, VariableA, VariableB
# setup datasets and record pairs
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
features = [
VariableA('col', label='y1'),
VariableB('col', label='y2'),
FrequencyA('col', label='y3')
]
comp_label = recordlinkage.Compare(features=features)
result_label = comp_label.compute(ix, A, B)
assert list(result_label) == ["y1", "y2", "y3"]
class TestCompareFeatures(TestData):
def test_feature(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = lambda s1, s2: np.ones(len(s1))
feature.compute(ix, A, B)
def test_feature_multicolumn_return(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def ones(s1, s2):
return DataFrame(np.ones((len(s1), 3)))
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = ones
result = feature.compute(ix, A, B)
assert result.shape == (5, 3)
def test_feature_multicolumn_input(self):
# test using classes and the base class
A = DataFrame({
'col1': ['abc', 'abc', 'abc', 'abc', 'abc'],
'col2': ['abc', 'abc', 'abc', 'abc', 'abc']
})
B = DataFrame({
'col1': ['abc', 'abd', 'abc', 'abc', '123'],
'col2': ['abc', 'abd', 'abc', 'abc', '123']
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature(['col1', 'col2'], ['col1', 'col2'])
feature._f_compare_vectorized = \
lambda s1_1, s1_2, s2_1, s2_2: np.ones(len(s1_1))
feature.compute(ix, A, B)
class TestCompareExact(TestData):
"""Test the exact comparison method."""
def test_exact_str_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 0, 1, 1, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_exact_num_type(self):
A = DataFrame({'col': [42, 42, 41, 43, nan]})
B = DataFrame({'col': [42, 42, 42, 42, 42]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 1, 0, 0, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_link_exact_missing(self):
A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]})
B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.exact('col', 'col', label='na_')
comp.exact('col', 'col', missing_value=0, label='na_0')
comp.exact('col', 'col', missing_value=9, label='na_9')
comp.exact('col', 'col', missing_value=nan, label='na_na')
comp.exact('col', 'col', missing_value='str', label='na_str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1, 1, 0, 0, 0], index=ix, name='na_')
pdt.assert_series_equal(result['na_'], expected)
# Missing values as 0
expected = Series([1, 1, 0, 0, 0], index=ix, name='na_0')
pdt.assert_series_equal(result['na_0'], expected)
# Missing values as 9
expected = Series([1, 1, 0, 9, 9], index=ix, name='na_9')
pdt.assert_series_equal(result['na_9'], expected)
# Missing values as nan
expected = Series([1, 1, 0, nan, nan], index=ix, name='na_na')
pdt.assert_series_equal(result['na_na'], expected)
# Missing values as string
expected = Series([1, 1, 0, 'str', 'str'], index=ix, name='na_str')
pdt.assert_series_equal(result['na_str'], expected)
def test_link_exact_disagree(self):
A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]})
B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.exact('col', 'col', label='d_')
comp.exact('col', 'col', disagree_value=0, label='d_0')
comp.exact('col', 'col', disagree_value=9, label='d_9')
comp.exact('col', 'col', disagree_value=nan, label='d_na')
comp.exact('col', 'col', disagree_value='str', label='d_str')
result = comp.compute(ix, A, B)
# disagree values as default
expected = Series([1, 1, 0, 0, 0], index=ix, name='d_')
pdt.assert_series_equal(result['d_'], expected)
# disagree values as 0
expected = Series([1, 1, 0, 0, 0], index=ix, name='d_0')
pdt.assert_series_equal(result['d_0'], expected)
# disagree values as 9
expected = Series([1, 1, 9, 0, 0], index=ix, name='d_9')
pdt.assert_series_equal(result['d_9'], expected)
# disagree values as nan
expected = Series([1, 1, nan, 0, 0], index=ix, name='d_na')
pdt.assert_series_equal(result['d_na'], expected)
# disagree values as string
expected = Series([1, 1, 'str', 0, 0], index=ix, name='d_str')
pdt.assert_series_equal(result['d_str'], expected)
# tests/test_compare.py:TestCompareNumeric
class TestCompareNumeric(TestData):
"""Test the numeric comparison methods."""
def test_numeric(self):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 2, 3, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', 'step', offset=2)
comp.numeric('col', 'col', method='step', offset=2)
comp.numeric('col', 'col', 'step', 2)
result = comp.compute(ix, A, B)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=1)
pdt.assert_series_equal(result[1], expected)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=2)
pdt.assert_series_equal(result[2], expected)
def test_numeric_with_missings(self):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', scale=2)
comp.numeric('col', 'col', scale=2, missing_value=0)
comp.numeric('col', 'col', scale=2, missing_value=123.45)
comp.numeric('col', 'col', scale=2, missing_value=nan)
comp.numeric('col', 'col', scale=2, missing_value='str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
# Missing values as 0
expected = Series(
[1.0, 1.0, 1.0, 0.0, 0.0], index=ix, dtype=np.float64, name=1)
pdt.assert_series_equal(result[1], expected)
# Missing values as 123.45
expected = Series([1.0, 1.0, 1.0, 123.45, 123.45], index=ix, name=2)
pdt.assert_series_equal(result[2], expected)
# Missing values as nan
expected = Series([1.0, 1.0, 1.0, nan, nan], index=ix, name=3)
pdt.assert_series_equal(result[3], expected)
# Missing values as string
expected = Series(
[1, 1, 1, 'str', 'str'], index=ix, dtype=object, name=4)
pdt.assert_series_equal(result[4], expected)
@pytest.mark.parametrize("alg", NUMERIC_SIM_ALGORITHMS)
def test_numeric_algorithms(self, alg):
A = DataFrame({'col': [1, 1, 1, 1, 1]})
B = DataFrame({'col': [1, 2, 3, 4, 5]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', method='step', offset=1, label='step')
comp.numeric(
'col', 'col', method='linear', offset=1, scale=2, label='linear')
comp.numeric(
'col', 'col', method='squared', offset=1, scale=2, label='squared')
comp.numeric(
'col', 'col', method='exp', offset=1, scale=2, label='exp')
comp.numeric(
'col', 'col', method='gauss', offset=1, scale=2, label='gauss')
result_df = comp.compute(ix, A, B)
result = result_df[alg]
# All values between 0 and 1.
assert (result >= 0.0).all()
assert (result <= 1.0).all()
if alg != 'step':
print(alg)
print(result)
# sim(scale) = 0.5
expected_bool = Series(
[False, False, False, True, False], index=ix, name=alg)
pdt.assert_series_equal(result == 0.5, expected_bool)
# sim(offset) = 1
expected_bool = Series(
[True, True, False, False, False], index=ix, name=alg)
pdt.assert_series_equal(result == 1.0, expected_bool)
# sim(scale) larger than 0.5
expected_bool = Series(
[False, False, True, False, False], index=ix, name=alg)
pdt.assert_series_equal((result > 0.5) & (result < 1.0),
expected_bool)
# sim(scale) smaller than 0.5
expected_bool = Series(
[False, False, False, False, True], index=ix, name=alg)
pdt.assert_series_equal((result < 0.5) & (result >= 0.0),
expected_bool)
@pytest.mark.parametrize("alg", NUMERIC_SIM_ALGORITHMS)
def test_numeric_algorithms_errors(self, alg):
# scale negative
if alg != "step":
with pytest.raises(ValueError):
comp = recordlinkage.Compare()
comp.numeric('age', 'age', method=alg, offset=2, scale=-2)
comp.compute(self.index_AB, self.A, self.B)
# offset negative
with pytest.raises(ValueError):
comp = recordlinkage.Compare()
comp.numeric('age', 'age', method=alg, offset=-2, scale=-2)
comp.compute(self.index_AB, self.A, self.B)
def test_numeric_does_not_exist(self):
# raise when algorithm doesn't exists
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', method='unknown_algorithm')
pytest.raises(ValueError, comp.compute, ix, A, B)
# tests/test_compare.py:TestCompareDates
class TestCompareDates(TestData):
"""Test the exact comparison method."""
def test_dates(self):
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col')
result = comp.compute(ix, A, B)[0]
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name=0)
pdt.assert_series_equal(result, expected)
def test_date_incorrect_dtype(self):
A = DataFrame({
'col':
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30']
})
B = DataFrame({
'col': [
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
A['col1'] = to_datetime(A['col'])
B['col1'] = to_datetime(B['col'])
comp = recordlinkage.Compare()
comp.date('col', 'col1')
pytest.raises(ValueError, comp.compute, ix, A, B)
comp = recordlinkage.Compare()
comp.date('col1', 'col')
pytest.raises(ValueError, comp.compute, ix, A, B)
def test_dates_with_missings(self):
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col', label='m_')
comp.date('col', 'col', missing_value=0, label='m_0')
comp.date('col', 'col', missing_value=123.45, label='m_float')
comp.date('col', 'col', missing_value=nan, label='m_na')
comp.date('col', 'col', missing_value='str', label='m_str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='m_')
pdt.assert_series_equal(result['m_'], expected)
# Missing values as 0
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='m_0')
pdt.assert_series_equal(result['m_0'], expected)
# Missing values as 123.45
expected = Series([1, 123.45, 0, 0.5, 0.5], index=ix, name='m_float')
pdt.assert_series_equal(result['m_float'], expected)
# Missing values as nan
expected = Series([1, nan, 0, 0.5, 0.5], index=ix, name='m_na')
pdt.assert_series_equal(result['m_na'], expected)
# Missing values as string
expected = Series(
[1, 'str', 0, 0.5, 0.5], index=ix, dtype=object, name='m_str')
pdt.assert_series_equal(result['m_str'], expected)
def test_dates_with_swap(self):
months_to_swap = [(9, 10, 123.45), (10, 9, 123.45), (1, 2, 123.45),
(2, 1, 123.45)]
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col', label='s_')
comp.date(
'col', 'col', swap_month_day=0, swap_months='default', label='s_1')
comp.date(
'col',
'col',
swap_month_day=123.45,
swap_months='default',
label='s_2')
comp.date(
'col',
'col',
swap_month_day=123.45,
swap_months=months_to_swap,
label='s_3')
comp.date(
'col',
'col',
swap_month_day=nan,
swap_months='default',
missing_value=nan,
label='s_4')
comp.date('col', 'col', swap_month_day='str', label='s_5')
result = comp.compute(ix, A, B)
# swap_month_day as default
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='s_')
pdt.assert_series_equal(result['s_'], expected)
# swap_month_day and swap_months as 0
expected = Series([1, 0, 0, 0, 0.5], index=ix, name='s_1')
pdt.assert_series_equal(result['s_1'], expected)
# swap_month_day 123.45 (float)
expected = Series([1, 0, 0, 123.45, 0.5], index=ix, name='s_2')
pdt.assert_series_equal(result['s_2'], expected)
# swap_month_day and swap_months 123.45 (float)
expected = Series([1, 0, 0, 123.45, 123.45], index=ix, name='s_3')
pdt.assert_series_equal(result['s_3'], expected)
# swap_month_day and swap_months as nan
expected = Series([1, nan, 0, nan, 0.5], index=ix, name='s_4')
pdt.assert_series_equal(result['s_4'], expected)
# swap_month_day as string
expected = Series(
[1, 0, 0, 'str', 0.5], index=ix, dtype=object, name='s_5')
pdt.assert_series_equal(result['s_5'], expected)
# tests/test_compare.py:TestCompareGeo
class TestCompareGeo(TestData):
"""Test the geo comparison method."""
def test_geo(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo(
'lat', 'lng', 'lat', 'lng', method='step',
offset=50) # 50 km range
result = comp.compute(ix, A, B)
# Missing values as default [36.639460, 54.765854, 44.092472]
expected = Series([1.0, 0.0, 1.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
def test_geo_batch(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo(
'lat', 'lng', 'lat', 'lng', method='step', offset=1, label='step')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='linear',
offset=1,
scale=2,
label='linear')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='squared',
offset=1,
scale=2,
label='squared')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='exp',
offset=1,
scale=2,
label='exp')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='gauss',
offset=1,
scale=2,
label='gauss')
result_df = comp.compute(ix, A, B)
print(result_df)
for alg in ['step', 'linear', 'squared', 'exp', 'gauss']:
result = result_df[alg]
# All values between 0 and 1.
assert (result >= 0.0).all()
assert (result <= 1.0).all()
def test_geo_does_not_exist(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo('lat', 'lng', 'lat', 'lng', method='unknown')
pytest.raises(ValueError, comp.compute, ix, A, B)
class TestCompareStrings(TestData):
"""Test the exact comparison method."""
def test_defaults(self):
# default algorithm is levenshtein algorithm
# test default values are indentical to levenshtein
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', label='default')
comp.string('col', 'col', method='levenshtein', label='with_args')
result = comp.compute(ix, A, B)
pdt.assert_series_equal(
result['default'].rename(None),
result['with_args'].rename(None)
)
def test_fuzzy(self):
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method='jaro', missing_value=0)
comp.string('col', 'col', method='q_gram', missing_value=0)
comp.string('col', 'col', method='cosine', missing_value=0)
comp.string('col', 'col', method='jaro_winkler', missing_value=0)
comp.string('col', 'col', method='dameraulevenshtein', missing_value=0)
comp.string('col', 'col', method='levenshtein', missing_value=0)
result = comp.compute(ix, A, B)
print(result)
assert result.notnull().all(1).all(0)
assert (result[result.notnull()] >= 0).all(1).all(0)
assert (result[result.notnull()] <= 1).all(1).all(0)
def test_threshold(self):
A = DataFrame({'col': [u"gretzky", u"gretzky99", u"gretzky", u"gretzky"]})
B = DataFrame({'col': [u"gretzky", u"gretzky", nan, u"wayne"]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string(
'col',
'col',
method="levenshtein",
threshold=0.5,
missing_value=2.0,
label="x_col1"
)
comp.string(
'col',
'col',
method="levenshtein",
threshold=1.0,
missing_value=0.5,
label="x_col2"
)
comp.string(
'col',
'col',
method="levenshtein",
threshold=0.0,
missing_value=nan,
label="x_col3"
)
result = comp.compute(ix, A, B)
expected = Series([1.0, 1.0, 2.0, 0.0], index=ix, name="x_col1")
pdt.assert_series_equal(result["x_col1"], expected)
expected = Series([1.0, 0.0, 0.5, 0.0], index=ix, name="x_col2")
pdt.assert_series_equal(result["x_col2"], expected)
expected = Series([1.0, 1.0, nan, 1.0], index=ix, name="x_col3")
pdt.assert_series_equal(result["x_col3"], expected)
@pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS)
def test_incorrect_input(self, alg):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
with pytest.raises(Exception):
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg)
comp.compute(ix, A, B)
@pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS)
def test_string_algorithms_nan(self, alg):
A = DataFrame({'col': [u"nan", nan, nan, nan, nan]})
B = DataFrame({'col': [u"nan", nan, nan, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg)
result = comp.compute(ix, A, B)[0]
expected = Series([1.0, 0.0, 0.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result, expected)
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg, missing_value=nan)
result = comp.compute(ix, A, B)[0]
expected = Series([1.0, nan, nan, nan, nan], index=ix, name=0)
pdt.assert_series_equal(result, expected)
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg, missing_value=9.0)
result = comp.compute(ix, A, B)[0]
expected = Series([1.0, 9.0, 9.0, 9.0, 9.0], index=ix, name=0)
pdt.assert_series_equal(result, expected)
@pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS)
def test_string_algorithms(self, alg):
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg, missing_value=0)
result = comp.compute(ix, A, B)[0]
assert result.notnull().all()
assert (result >= 0).all()
assert (result <= 1).all()
assert (result > 0).any()
assert (result < 1).any()
def test_fuzzy_does_not_exist(self):
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method='unknown_algorithm')
pytest.raises(ValueError, comp.compute, ix, A, B)
class TestCompareFreq(object):
def test_freq(self):
# data
array_repeated = np.repeat(np.arange(10), 10)
array_tiled = np.tile(np.arange(20), 5)
# convert to pandas data
A = DataFrame({'col': array_repeated})
B = DataFrame({'col': array_tiled})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# the part to test
from recordlinkage.compare import Frequency, FrequencyA, FrequencyB
comp = recordlinkage.Compare()
comp.add(Frequency(left_on='col'))
comp.add(FrequencyA('col'))
result = comp.compute(ix, A, B)
expected = Series(np.ones((100, )) / 10, index=ix)
pdt.assert_series_equal(result[0], expected.rename(0))
pdt.assert_series_equal(result[1], expected.rename(1))
comp = recordlinkage.Compare()
comp.add(Frequency(right_on='col'))
comp.add(FrequencyB('col'))
result = comp.compute(ix, A, B)
expected = Series(np.ones((100, )) / 20, index=ix)
pdt.assert_series_equal(result[0], expected.rename(0))
pdt.assert_series_equal(result[1], expected.rename(1))
def test_freq_normalise(self):
# data
array_repeated = np.repeat(np.arange(10), 10)
array_tiled = np.tile(np.arange(20), 5)
# convert to pandas data
A = DataFrame({'col': array_repeated})
B = DataFrame({'col': array_tiled})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# the part to test
from recordlinkage.compare import Frequency
comp = recordlinkage.Compare()
comp.add(Frequency(left_on='col', normalise=False))
result = comp.compute(ix, A, B)
expected = DataFrame(np.ones((100, )) * 10, index=ix)
pdt.assert_frame_equal(result, expected)
comp = recordlinkage.Compare()
comp.add(Frequency(right_on='col', normalise=False))
result = comp.compute(ix, A, B)
expected = DataFrame(np.ones((100, )) * 5, index=ix)
pdt.assert_frame_equal(result, expected)
@pytest.mark.parametrize('missing_value', [0.0, np.nan, 10.0])
def test_freq_nan(self, missing_value):
# data
array_repeated = np.repeat(np.arange(10, dtype=np.float64), 10)
array_repeated[90:] = np.nan
array_tiled = np.tile(np.arange(20, dtype=np.float64), 5)
# convert to pandas data
A = DataFrame({'col': array_repeated})
B = DataFrame({'col': array_tiled})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# the part to test
from recordlinkage.compare import Frequency
comp = recordlinkage.Compare()
comp.add(Frequency(left_on='col', missing_value=missing_value))
result = comp.compute(ix, A, B)
expected_np = np.ones((100, )) / 10
expected_np[90:] = missing_value
expected = DataFrame(expected_np, index=ix)
pdt.assert_frame_equal(result, expected)
class TestCompareVariable(object):
def test_variable(self):
# data
arrayA = np.random.random((100,))
arrayB = np.random.random((100,))
# convert to pandas data
A = DataFrame({'col': arrayA})
B = DataFrame({'col': arrayB})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# the part to test
from recordlinkage.compare import Variable, VariableA, VariableB
comp = recordlinkage.Compare()
comp.add(Variable(left_on='col'))
comp.add(VariableA('col'))
result = comp.compute(ix, A, B)
expected = Series(arrayA, index=ix)
pdt.assert_series_equal(result[0], expected.rename(0))
pdt.assert_series_equal(result[1], expected.rename(1))
comp = recordlinkage.Compare()
comp.add(Variable(right_on='col'))
comp.add(VariableB('col'))
result = comp.compute(ix, A, B)
expected = Series(arrayB, index=ix)
pdt.assert_series_equal(result[0], expected.rename(0))
pdt.assert_series_equal(result[1], expected.rename(1))
@pytest.mark.parametrize('missing_value', [0.0, np.nan, 10.0])
def test_variable_nan(self, missing_value):
# data
arrayA = np.random.random((100,))
arrayA[90:] = np.nan
arrayB = np.random.random((100,))
# convert to pandas data
A = | DataFrame({'col': arrayA}) | pandas.DataFrame |
import dash
import dash_core_components as dcc
from dash.dependencies import Input, Output
import dash_html_components as html
import numpy as np
import pandas as pd
import plotly.graph_objs as go
df = pd.read_csv("./data/kiva_loans.csv", parse_dates=True)
def split_borrower_gender(l):
m = 0
f = 0
if type(l) != list:
return np.nan
for i in l:
if i== 'male':
m += 1
else:
f += 1
if m == 0:
return 'female'
elif f == 0:
return 'male'
else:
return 'both'
df.borrower_genders = df.borrower_genders.str.split(', ').apply(split_borrower_gender)
df['disbursed_year'] = pd.to_datetime(df.disbursed_time).dt.year
top5 = df.groupby('activity').size().sort_values(ascending=False)[0:5] # lets look at top 5
top5_male = df.groupby('activity').size().sort_values(ascending=False)[0:5]
top5_female = df[df.borrower_genders == 'female'].groupby('activity').size().sort_values(ascending=False)[0:5]
# converting dates from string to DateTime objects gives nice tools
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import pandas as pd
import matplotlib.pyplot as plt
retail_data1 = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/10%25_original_randomstate%3D42/retail_data_from_1_until_3_reduce.csv')
retail_data2 = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/10%25_original_randomstate%3D42/retail_data_from_4_until_6_reduce.csv')
retail_data3 = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/10%25_original_randomstate%3D42/retail_data_from_7_until_9_reduce.csv')
retail_data4 = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/10%25_original_randomstate%3D42/retail_data_from_10_until_12_reduce.csv')
retail_table = pd.concat([retail_data1, retail_data2, retail_data3, retail_data4])
cek = retail_table.loc[(retail_table['item_price'] < 0) | (retail_table['total_price'] < 0)]
if cek.shape[0] != 0:
retail_table = retail_table.loc[(retail_table['item_price'] > 0) & (retail_table['total_price'] > 0)]
cek = retail_table.loc[retail_table['order_id'] == 'undefined']
if cek.shape[0] != 0:
retail_table = retail_table.loc[retail_table['order_id'] != 'undefined']
retail_table['order_id'] = retail_table['order_id'].astype('int64')
retail_table['order_date'] = | pd.to_datetime(retail_table['order_date']) | pandas.to_datetime |
# Energy and entropy map
# calculation for pdb files
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn import preprocessing
import matplotlib.pyplot as plt
import seaborn as sns
from MD_Analysis import *
import easygui as eg
# Getting the phi/psi angles
# pdb = eg.fileopenbox(msg="Select the pdb to calculate angles from")
# AC = Angle_Calc(pdb)
# AC.get_phi_psi()
# AC.angles.to_csv("sADP5_wt_metad100ns_phi_psi.csv")
# get the cos/sin data from phi/psi using pedros
name = 'sADP5_WT_3ms'
data = pd.read_csv(name + '.csv')
# del data['Unnamed: 0']
sc_df = | pd.DataFrame() | pandas.DataFrame |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 16 13:34:51 2019
@author: jaime
#"""
import h5py as h5
from circle_fit import least_squares_circle
import pandas as pd
import re as re
from sys import platform
import numpy as np
import os
cmy = 365 * 24 * 60 * 60. * 100
class UserChoice(Exception):
def __init__(self, message):
self.message = message
# Print iterations progress
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd="\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in per cent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end=printEnd)
# Print New Line on Complete
if iteration == total:
print()
def get_model_name(model_dir):
if 'win' in platform:
if model_dir[-1] == r'\\':
model_dir -= r'\\'
return re.split(r'\\', model_dir)[-1]
else:
if model_dir[-1] == '/':
model_dir -= '/'
return re.split('/', model_dir)[-1]
def velocity_rescale(df, scf):
df = df / scf * cmy
return df
def viscosity_rescale(df, scf):
df = np.log10(df * scf)
return df
def dim_eval(res):
# Not likely to be a 1D model.
if len(res) > 2:
return 3
else:
return 2
def get_res(model_dir):
# Make the file path
filename = model_dir + 'Mesh.linearMesh.00000.h5'
# Read everything
data = h5.File(filename, 'r')
res = data.attrs['mesh resolution']
# Get the dimensions:
ndims = dim_eval(res)
if ndims == 2:
return {'x': res[0] + 1, 'y': res[1] + 1}, ndims
else:
return {'x': res[0] + 1, 'y': res[1] + 1, 'z': res[2] + 1}, ndims
def ts_writer(ts_in):
# Making the timestep text:
return str(ts_in).zfill(5)
def get_time(mdir, ts):
data = h5.File(mdir + 'timeInfo.' + ts + '.h5', 'r')
time_out = data['currentTime'][0]
return time_out
def get_nproc(mdir):
data = h5.File(mdir + '/timeInfo.00000.h5', 'r')
return data['nproc'][0]
# %%
class UwLoader:
def __init__(self, model_dir, ts=0, scf=1e22, get_time_only=False):
if model_dir[-1] != '/':
self.model_dir = model_dir + '/'
else:
self.model_dir = model_dir
# Verify if the path is correct:
if not os.path.isdir(model_dir):
raise FileNotFoundError('No such model exists.')
self.res, self.dim = get_res(self.model_dir)
# Cores are not needed for now.
# Initiate a boundary coordinate
self.boundary = {}
# Set the default scaling:
self.scf = scf
# Save the model name
self.model_name = get_model_name(model_dir)
# Save an empty list/dict for any slicing that will be done
self.performed_slices = []
# Get the number of processors used
self.nproc = get_nproc(model_dir)
# set th initial timestep:
self.current_step = ts_writer(ts)
self.time_Ma = np.round(get_time(self.model_dir, self.current_step) * self.scf / (365 * 24 * 3600) / 1e6, 3)
if not get_time_only:
# Initiate a output dataframe
self.output = None
self._get_mesh()
# if get_all:
self.get_all()
self.starting_output = self.output # for slices
def set_current_ts(self, step):
"""
Function to reset the model output and replace the output object.
"""
# Reinstanciate the object with a new timestep:
self.__init__(model_dir=self.model_dir, ts=step, scf=self.scf)
##################################################
# RETRIEVING INFORMATION #
##################################################
def get_all(self):
"""
Function to get all existing variables from the current working directory.
"""
# print('Getting all variables...')
self.get_material()
self.get_velocity()
self.get_strain()
self.get_stress()
self.get_viscosity()
self.get_temperature()
# Get mesh information:
def _get_mesh(self):
# Set the file path:
filename = self.model_dir + 'Mesh.linearMesh.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
mesh_info = data['vertices'][()]
# Write the info accordingly:
if self.dim == 2:
self.output = pd.DataFrame(data=mesh_info, columns=['x', 'y'], dtype='float')
else:
# in 3D:
self.output = pd.DataFrame(data=mesh_info, columns=['x', 'y', 'z'], dtype='float')
# Save the model dimensions:
axes = self.output.columns.values
max_dim = self.output.max().values
min_dim = self.output.min().values
for axis, min_val, max_val in zip(axes, min_dim, max_dim):
self.boundary[axis] = [min_val, max_val]
def get_velocity(self):
try:
self.scf
except NameError:
raise ValueError('No Scaling Factor detected!')
if type(self.output) == dict:
self._get_mesh()
# Set the file path:
filename = self.model_dir + 'VelocityField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
vel_info = data['data'][()]
# Write the info accordingly:
if self.dim == 2:
velocity = pd.DataFrame(data=vel_info, columns=['vx', 'vy'])
else:
# in 3D:
velocity = pd.DataFrame(data=vel_info, columns=['vx', 'vy', 'vz'])
# Rescale
velocity = velocity_rescale(velocity, self.scf)
# Merge with the current output dataframe
self.output = self.output.merge(velocity, left_index=True, right_index=True)
def get_viscosity(self, convert_to_log=True):
try:
self.scf
except:
raise ValueError('No Scaling Factor detected!')
if self.output is None:
self._get_mesh()
# Set the file path:
filename = self.model_dir + 'ViscosityField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
mat_info = data['data'][()]
# Write the info accordingly:
viscosity = pd.DataFrame(data=mat_info,
columns=['eta'])
# Rescale
if convert_to_log:
viscosity = viscosity_rescale(viscosity, self.scf)
else:
viscosity *= self.scf
# Merge:
self.output = self.output.merge(viscosity, left_index=True, right_index=True)
def get_material(self):
# Set the file path:
filename = self.model_dir + 'MaterialIndexField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
mat_info = data['data'][()]
# Write the info accordingly:
material = | pd.DataFrame(data=mat_info, columns=['mat']) | pandas.DataFrame |
import sys
from sqlalchemy import create_engine
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from feature_extraction import StartingVerbExtractor, ResponseLengthExtractor, tokenize
from sklearn.ensemble import AdaBoostClassifier
import pickle
def load_data(database_filepath):
''' Loads the data from the passed path
and splits it into X and y
'''
# load data from database
engine = create_engine('sqlite:///{}'.format(database_filepath))
df = | pd.read_sql_table('Response', engine) | pandas.read_sql_table |
from kfp.components import InputPath, OutputPath
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics)
def get_full_tech_indi(
# tech_indi_dataset01_path: InputPath('DataFrame'),
# tech_indi_dataset02_path: InputPath('DataFrame'),
# tech_indi_dataset03_path: InputPath('DataFrame'),
# tech_indi_dataset04_path: InputPath('DataFrame'),
# tech_indi_dataset05_path: InputPath('DataFrame'),
# full_tech_indi_dataset_path: OutputPath('DataFrame')
tech_indi_dataset01: Input[Dataset],
tech_indi_dataset02: Input[Dataset],
tech_indi_dataset03: Input[Dataset],
tech_indi_dataset04: Input[Dataset],
tech_indi_dataset05: Input[Dataset],
tech_indi_dataset06: Input[Dataset],
tech_indi_dataset07: Input[Dataset],
tech_indi_dataset08: Input[Dataset],
tech_indi_dataset09: Input[Dataset],
tech_indi_dataset10: Input[Dataset],
tech_indi_dataset11: Input[Dataset],
full_tech_indi_dataset: Output[Dataset]
):
import pandas as pd
df_01 = pd.read_pickle(tech_indi_dataset01.path)
df_02 = pd.read_pickle(tech_indi_dataset02.path)
df_03 = pd.read_pickle(tech_indi_dataset03.path)
df_04 = pd.read_pickle(tech_indi_dataset04.path)
df_05 = pd.read_pickle(tech_indi_dataset05.path)
df_06 = pd.read_pickle(tech_indi_dataset06.path)
df_07 = pd.read_pickle(tech_indi_dataset07.path)
df_08 = pd.read_pickle(tech_indi_dataset08.path)
df_09 = pd.read_pickle(tech_indi_dataset09.path)
df_10 = pd.read_pickle(tech_indi_dataset10.path)
df_11 = | pd.read_pickle(tech_indi_dataset11.path) | pandas.read_pickle |
from flask import Flask,request, render_template, session, redirect, url_for, session
import numpy as np
import pickle
import pandas as pd
import datetime as dt
import bz2
app = Flask(__name__)
# data = bz2.BZ2File('model.pkl', 'rb')
# model = pickle.load(data)
# REMEMBER TO LOAD THE MODEL AND THE SCALER!
def conver_date(date):
#WeekOfYear = dt.datetime.strptime(date, "%Y-%m-%d").weekofyear
Year = dt.datetime.strptime(date, "%Y-%m-%d").year
Month = dt.datetime.strptime(date, "%Y-%m-%d").month
DayOfMonth = dt.datetime.strptime(date, "%Y-%m-%d").day
return { "Year":Year, "Month": Month, "DayOfMonth":DayOfMonth}
def one_hot(data):
if data == "0":
return 0
elif data == 'a':
return 2
elif data == 'b':
return 2
elif data == 'c':
return 3
elif data == 'd':
return 4
def radio_to_binary(data):
if data == 'on':
return 1
else:
return 0
@app.route('/', methods=['GET', 'POST'])
def index():
# # Create instance of the form.
return render_template('home.html')
@app.route('/prediction',methods=['POST'])
def prediction():
feature_dict = {"SchoolHoliday": [int(request.form['school'])],
"StoreType": [one_hot(request.form['store_type'])],
"PromoOpen": [int(request.form['PromoOpen'])],
"DayOfMonth": [conver_date(request.form['date'])['DayOfMonth']],
"Year": [conver_date(request.form['date'])['Year']],
"Month": [conver_date(request.form['date'])['Month']],
"CompetionDistance": [int(request.form['competion_distance'])],
"Assortment": [one_hot(request.form['assortment'])],
"StateHoliday": [one_hot(request.form['state_holiday'])],
"Store": [int(request.form['Store'])],
"Promo": [radio_to_binary(request.form['promo'])],
"Promo2": [radio_to_binary(request.form['promo2'])],
"DayOfWeek": [int(request.form['day_of_week'])]
}
test_frame = | pd.DataFrame.from_dict(feature_dict) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster.bicluster import SpectralCoclustering
from bokeh.plotting import figure, output_file, show
from bokeh.models import HoverTool, ColumnDataSource
from itertools import product
######## practice pt1
x = pd.Series([6, 3, 8, 6], index=["q", "w", "e", "r"])
# print(x.index)
# print(x)
x = x.reindex(sorted(x.index))
# print(x.index)
# print(x)
y = pd.Series([7, 3, 5, 2], index=["e", "q", "r", "t"])
# print(x + y)
######## practice pt2
data = {'name': ['Tim', 'Jim', 'Pam', 'Sam'],
'age': [29, 31, 27, 35],
'ZIP': ['02115', '02130', '67700', '00100']}
y = | pd.DataFrame(data, columns=["name", "age", "ZIP"]) | pandas.DataFrame |
import click
import pandas as pd
import os
import time
from binance.client import Client
@click.command()
@click.option('--pm', default=0.01, help='Profit Margin')
@click.option('--ci', default=60, help='Check interval in seconds')
def trade_coins(pm, ci):
if os.path.isdir('crypto-data'):
pass
else:
os.mkdir('crypto-data')
if os.path.exists('cypto-data/crypto-log.txt'):
os.remove('cypto-data/crypto-log.txt')
else:
pass
file = open('crypto-data/crypto-log.txt', 'w')
api_key = '<KEY>'
api_secret = '<KEY>'
client = Client(api_key, api_secret)
print('--- Client Instantiated ---')
file.write('--- Client Instantiated ---\n')
pd.options.mode.chained_assignment = None
coins = client.get_all_coins_info()
print('--- Coins retrieved ---')
file.write('--- Coins retrieved ---\n')
coins_dataframe = pd.DataFrame(columns=['coin','trading','isLegalMoney'])
for coin in coins:
coins_dataframe = coins_dataframe.append({'coin': coin['coin'], 'trading': coin['trading'],'isLegalMoney': coin['isLegalMoney']}, ignore_index=True)
coins_dataframe = coins_dataframe[coins_dataframe.trading > 0 ]
coins_dataframe = coins_dataframe[coins_dataframe.isLegalMoney == 0]
print('--- Retrieving Trade Fees ---')
file.write('--- Retrieving Trade Fees ---\n')
coins_dataframe['trade_fee'] = coins_dataframe.coin.apply(lambda x: client.get_trade_fee(symbol=x+'USDT'))
coins_dataframe.trade_fee = coins_dataframe.trade_fee.apply(lambda x: x if len(x)> 0 else None)
coins_dataframe = coins_dataframe[coins_dataframe.trade_fee.astype(str) != 'None']
coins_dataframe['trade_symbol'] = coins_dataframe.trade_fee.apply(lambda x: x[0]['symbol'])
print('--- Trade fees retrieved ---')
file.write('--- Trade fees retrieved ---\n')
coins_dataframe.reset_index(inplace=True,drop=True)
coins_dataframe['profit'] = 0
coins_dataframe['gained'] = 0
coins_dataframe['times_it_sold'] = 0
coins_dataframe['coin_status'] = 'initialized'
print('--- Statistics initialized ---')
file.write('--- Statistics initialized ---\n')
initial_buying_prices = os.path.exists('crypto-data/initial_buying_prices.csv')
coins_dataframe['initial_buy_price'] = None
coins_dataframe['initial_buy_cost'] = None
coins_dataframe.reset_index(drop=True,inplace=True)
if initial_buying_prices:
initial_buying_prices = pd.read_csv('crypto-data/initial_buying_prices.csv')
for coin in initial_buying_prices.coin.values:
if coin in coins_dataframe.coin.values:
index = coins_dataframe[coins_dataframe.coin == coin].index[0]
index_initial_buying_prices = initial_buying_prices[initial_buying_prices.coin == coin].index[0]
coins_dataframe.loc[index, 'initial_buy_price'] = initial_buying_prices.loc[index_initial_buying_prices,'initial_buy_price']
coins_dataframe.loc[index, 'initial_buy_cost'] = initial_buying_prices.loc[index_initial_buying_prices,'initial_buy_cost']
else:
prices = client.get_all_tickers()
for price in prices:
if price['symbol'] in coins_dataframe.trade_symbol.values:
index = coins_dataframe[coins_dataframe.trade_symbol == price['symbol']].index[0]
coins_dataframe.loc[index,'initial_buy_price'] = float(price['price'])
coins_dataframe.loc[index,'initial_buy_cost'] = float(coins_dataframe.loc[index,'trade_fee'][0]['makerCommission']) * float(price['price'])
coins_dataframe[['coin','initial_buy_price','initial_buy_cost']].to_csv('crypto-data/initial_buying_prices.csv',index=False)
print('--- Initial prices retrieved ---')
file.write('--- Initial prices retrieved ---\n')
print('--- Starting the updating of the prices loop ---')
file.write('--- Starting the updating of the prices loop ---\n')
coins_sold_history = os.path.exists('crypto-data/coins_sold_history.csv')
if coins_sold_history:
df_coins_sold = pd.read_csv('crypto-data/coins_sold_history.csv')
else:
df_coins_sold = pd.DataFrame(columns=['coin','initial_buy_price','initial_buy_cost','out_price','estimated_cost', 'profit'])
coins_rebought_history = os.path.exists('crypto-data/coins_rebought_history.csv')
if coins_rebought_history:
df_coins_rebought = | pd.read_csv('crypto-data/coins_rebought_history.csv') | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import yfinance as yf
yf.pdr_override()
import datetime as dt
symbol = 'AMD'
market = 'SPY'
num_of_years = 1
start = dt.date.today() - dt.timedelta(days=365*num_of_years)
end = dt.date.today()
dataset = yf.download(symbol,start,end)
benchmark = yf.download(market,start,end)
dataset['Returns'] = dataset['Adj Close'].pct_change().dropna()
PP = pd.Series((dataset['High'] + dataset['Low'] + dataset['Close']) / 3)
R1 = pd.Series(2 * PP - dataset['Low'])
S1 = pd.Series(2 * PP - dataset['High'])
R2 = pd.Series(PP + dataset['High'] - dataset['Low'])
S2 = pd.Series(PP - dataset['High'] + dataset['Low'])
R3 = pd.Series(dataset['High'] + 2 * (PP - dataset['Low']))
S3 = pd.Series(dataset['Low'] - 2 * (dataset['High'] - PP))
R4 = pd.Series(dataset['High'] + 3 * (PP - dataset['Low']))
S4 = pd.Series(dataset['Low'] - 3 * (dataset['High'] - PP))
R5 = | pd.Series(dataset['High'] + 4 * (PP - dataset['Low'])) | pandas.Series |
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from tqdm import tqdm
from kndetect.utils import extract_mimic_alerts_region
def get_feature_names(npcs=3):
"""
Create the list of feature names depending on the number of principal components.
Parameters
----------
npcs : int
number of principal components to use
Returns
-------
list
name of the features.
"""
names_root = ["coeff" + str(i + 1) + "_" for i in range(npcs)] + [
"residuo_",
"maxflux_",
]
return [i + j for j in ["g", "r"] for i in names_root]
def calc_prediction(coeff, pcs_arr):
"""
given the coefficients and PCs, it calculates the prediction as a linear combination
Parameters
----------
coeff: np.array of shape [num_pcs]
coefficients of the linear combinations for the PCs
pcs_arr: np.array of shape [num_pcs, num_prediction_points]
The PCs that are being used as templates
Returns
-------
predicted_lc: np.array of shape [num_prediction_points]
prediction as a linear comination of PCs
"""
predicted_lc = np.zeros_like(pcs_arr.shape[0])
for a, b in zip(pcs_arr, coeff):
predicted_lc = np.add(predicted_lc, b * a)
return predicted_lc
def calc_loss(
coeff,
pcs_arr,
light_curve_flux,
light_curve_err,
map_dates_to_arr_index,
regularization_weight,
low_var_indices=[1, 2],
):
"""
function to calculate the loss to be optimized
Parameters
----------
coeff: np.array of shape [num_of_pcs]
current value of coefficients
pcs_arr: np.array of shape [num_pcs, num_prediction_points]
principal components to the used for the prediction
light_curve_flux: pandas column of shape [num_recorded_points]
segment of lightcurve that is to be fitted
light_curve_err: pandas column of shape [num_recorded_points]
segment with corresponding error bars in the segment that is to be fitted.
map_dates_to_arr_index: np.array of shape [num_recorded_points]
maping that holds the index position corresponding to each point in the lightcurve
regularization_weight: float
weights given to the regularization term
low_var_indices: list
Indices along which variance is low.
Default value is set to [1, 2] which regularizes the 2nd and 3rd PCs
Returns
-------
loss: (float)
that is to be optimized
"""
# calculation of the reconstruction loss
y_pred = calc_prediction(coeff, pcs_arr)
real_flux = np.take(y_pred, map_dates_to_arr_index)
reconstruction_loss = np.sum(
np.divide(np.square(real_flux - light_curve_flux), np.square(light_curve_err))
)
# Calculate the regularization
# Regularize the second coefficient
regularization_term = 0
if low_var_indices is not None:
regularization_term = np.sum(np.square(coeff[low_var_indices[:]]))
# Regularize negative pcscoeff = 0
if coeff[0] < 0:
regularization_term = regularization_term + np.square(coeff[0])
loss = reconstruction_loss + regularization_term * regularization_weight
return loss
def calc_residual(
coeff, pcs_arr, light_curve_flux, light_curve_err, map_dates_to_arr_index
):
"""
function to calculate residual of the fit
Parameters
----------
coeff: np.array of shape [num_of_pcs]
current value of coefficients
pcs: np.array of shape [num_pcs, num_prediction_points]
principal components to the used for the prediction
light_curve_flux: pandas column of shape [num_recorded_points]
segment of lightcurve that is to be fitted
light_curve_err: pandas column of shape [num_recorded_points]
segment with corresponding error bars in the segment that is to be fitted.
map_dates_to_arr_index: np.array of shape [num_recorded_points]
maping that holds the index position corresponding to each point in the lightcurve
Returns
-------
residual: float
residual value
"""
y_pred = calc_prediction(coeff, pcs_arr)
real_flux = np.take(y_pred, map_dates_to_arr_index)
diff = real_flux - light_curve_flux
reconstruction_loss = np.mean(
np.divide(np.square(diff), np.square(light_curve_err))
)
residual = np.sqrt(reconstruction_loss)
return residual
def predict_band_features(
band_df, pcs, time_bin=0.25, flux_lim=200, low_var_indices=[1, 2]
):
"""
function to evaluate features for a band
Parameters
----------
band_df: pandas.DataFrame
dataframe with the data of only one band of a lightcurve
pcs: np.array of shape [num pc components, num prediction points/bins]
For example, pcs_arr[0] will correspond the the first principal component.
time_bin: float
Width of time gap between two elements in PCs.
flux_lim: float (optional)
Limit of minimum flux for prediction to be made in a band.
Note that all the points in the band is used for the fit provided that max flux in the band > flux_lim
low_var_indices: list
Indices along which variance is low.
Default value is set to [1, 2] which regularizes the 2nd and 3rd PCs
Returns
-------
features: list of features for the given band
The features are in the same order in which the classifier was trained:
coefficients of pcs, number of features, residual and maxflux.
"""
num_pcs = len(pcs)
num_prediction_points = len(pcs[0])
if len(band_df) == 0:
features = np.zeros(int(len(get_feature_names(num_pcs)) / 2)).tolist()
return features
max_loc = np.argmax(band_df["FLUXCAL"])
max_flux = band_df["FLUXCAL"].iloc[max_loc]
# extract the prediction region
mid_point_date = band_df["MJD"].iloc[max_loc]
prediction_duration = time_bin * (num_prediction_points - 1)
start_date = mid_point_date - prediction_duration / 2
end_date = mid_point_date + prediction_duration / 2
duration_index = (band_df["MJD"] > start_date) & (band_df["MJD"] < end_date)
band_df = band_df[duration_index]
if (max_flux > flux_lim) & (len(band_df) >= 2):
# update the location
max_loc = np.argmax(band_df["FLUXCAL"])
# create a mapping from JD to index in the prediction.
# For Example, midpoint is at index (num_prediction_points - 1) / 2. The middle of the prediction region.
map_dates_to_arr_index = np.around(
(band_df["MJD"].values - mid_point_date).astype(float) / time_bin
+ (num_prediction_points - 1) / 2
)
map_dates_to_arr_index = map_dates_to_arr_index.astype(int)
# Initil guess for coefficients.
initial_guess = np.zeros(num_pcs) + 0.5
# Calculating the regularization weight to make it comparable to reconstruction loss part.
err_bar_of_max_flux = band_df["FLUXCALERR"].iloc[max_loc]
regularization_weight = np.square(max_flux / err_bar_of_max_flux)
# normalize the flux and errorbars
normalized_flux = band_df["FLUXCAL"].values / max_flux
normalized_err_bars = band_df["FLUXCALERR"].values / max_flux
# bounds for the coefficient
bounds = []
for i in range(num_pcs):
bounds.append([-2, 2])
# minimize the cost function
result = minimize(
calc_loss,
initial_guess,
args=(
pcs,
normalized_flux,
normalized_err_bars,
map_dates_to_arr_index,
regularization_weight,
low_var_indices,
),
bounds=bounds,
)
# extract the coefficients
coeff = list(result.x)
# maximum flux in a band
max_band_flux = max_flux
# calculate residuals
residual = calc_residual(
result.x, pcs, normalized_flux, normalized_err_bars, map_dates_to_arr_index
)
else:
coeff = np.zeros(num_pcs).tolist()
residual = 0
max_band_flux = 0
# buid features list
features = coeff
features.append(residual)
features.append(max_band_flux)
return features
def extract_features_all_bands(pcs, filters, lc, flux_lim, time_bin):
"""
Extract features for all the bands of lightcurve
Parameters
----------
pcs: np.array of shape [num_pcs, num_prediction_points]
principal components to the used for the prediction
time_bin: float
Width of time gap between two elements in PCs.
filters: list
List of broad band filters.
lc: pd.DataFrame
Keys should be ['MJD', 'FLUXCAL', 'FLUXCALERR', 'FLT'].
flux_lim: float (optional)
Limit of minimum flux for prediction to be made in a band.
Note that all the points in the band is used for the fit provided that max flux in the band > flux_lim
low_var_indices: list
Indices along which variance is low.
Default value is set to [1, 2] which regularizes the 2nd, 3rd PCs
flux_lim: int/float
flux value above which no predictions are made for a band
time_bin:
duration of a time bin in days. For eg, .25 means 6 hours
Returns
-------
all_features: list
List of features for this object.
Order is all features from first filter, then all features from
second filters, etc.
"""
low_var_indices = [1, 2]
all_features = []
for band in filters:
band_df = lc[lc["FLT"] == band]
features = predict_band_features(
band_df=band_df,
pcs=pcs,
time_bin=time_bin,
flux_lim=flux_lim,
low_var_indices=low_var_indices,
)
all_features.extend(features)
return all_features
def extract_features_all_lightcurves(lc_df, key, pcs, filters, mimic_alerts=False):
"""
extracts features for all lightcurves in df
Parameters:
lc_df: pandas DataFrame
dataframe with data of differnet lightcurves.
Columns must include: "MJD", "FLT", "FLUXCAL", "FLUXCALERR" and a key
key: str
Column name to identify each lightcurve to be fitted.
pcs: np.array of shape [num_pcs, num_prediction_points]
principal components to the used for the prediction
filters: list
list of filters/bands present in the lightcurves
minic_alerts: bool
boolean value to choose beetween extracting features for complete light curves or partical lightcurves.
"""
time_bin = 0.25 # 6 hours
flux_lim = 200
object_ids = np.unique(lc_df[key])
feature_names = get_feature_names()
features_df = {k: [] for k in feature_names}
features_df["key"] = []
current_dates = []
for object_id in tqdm(object_ids):
object_lc = lc_df[lc_df[key] == object_id]
object_lc = object_lc[object_lc["FLUXCAL"] == object_lc["FLUXCAL"]]
if mimic_alerts:
object_lc, current_date = extract_mimic_alerts_region(object_lc, flux_lim)
current_dates.append(current_date)
features = extract_features_all_bands(
pcs=pcs, filters=filters, lc=object_lc, flux_lim=flux_lim, time_bin=time_bin
)
features_df["key"].append(object_id)
for i, feature_name in enumerate(feature_names):
features_df[feature_name].append(features[i])
if mimic_alerts:
features_df["current_dates"] = current_dates
return | pd.DataFrame.from_dict(features_df) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 30 20:25:08 2019
@author: alexandradarmon
"""
### RUN TIME SERIES
import pandas as pd
from punctuation.recognition.training_testing_split import (
get_nn_indexes
)
from punctuation.feature_operations.distances import d_KL
from punctuation.recognition.recognition_algorithms import (
launch_nearest_neighbour,
launch_neural_net
)
from punctuation.config import options
from punctuation.utils.utils import (
load_corpus,
int_or_nan
)
from punctuation.time_series.time_functions import (
get_temporal,
plot_histogram_years,
plot_freq_overtime,
plot_col_overtime
)
import pandas as pd
import numpy as np
import matplotlib.style
import matplotlib as mpl
mpl.style.use('seaborn-paper')
df = load_corpus()
df_temporal = get_temporal(df=df)
plot_histogram_years(df_temporal, show_middleyear=False,
to_show=True, print_legend=False)
plot_histogram_years(df_temporal,show_middleyear=True,
to_show=True, print_legend=False)
list_freq_pun_col = list(range(options.nb_signs))
freq_pun_col_1 = [1,4,5]
freq_pun_col_2 = [0,7]
freq_pun_col_3 = [2,3,6,8,9]
for f in [freq_pun_col_1,freq_pun_col_2,freq_pun_col_3]:
plot_freq_overtime(df_temporal, f,
col_date='author_middle_age',
min_date=1700, max_date=1950,
to_show=True, print_legend=True)
plot_freq_overtime(df_temporal, list_freq_pun_col,
col_date='author_middle_age',
min_date=1700, max_date=1950,
to_show=True, print_legend=False)
wells = pd.read_csv('data/Marya_Wells.csv').sort_values('Date')
wells = pd.merge(wells, df_temporal, how='inner', on='title')
wells['Date_bin'] = wells['Date']
plot_freq_overtime(wells, list_freq_pun_col,
col_date='Date',
min_date=min(wells['Date']),
max_date=1922,
print_legend=False, show_ci=True)
fleming = pd.read_csv('data/Alex_Fleming.csv').sort_values('Date')
fleming = pd.merge(fleming, df_temporal, how='left', on='title')
fleming['Date_bin'] = fleming['Date']
plot_freq_overtime(fleming, list_freq_pun_col,
col_date='Date',
min_date=min(fleming['Date']),
max_date=max(fleming['Date']),
print_legend=False, show_ci=True)
shakespeare = pd.read_csv('data/Alex_Shakespeare.csv').sort_values('Date')
shakespeare = pd.merge(shakespeare, df_temporal, how='inner', on='title')
shakespeare['Date_bin'] = shakespeare['Date']
plot_freq_overtime(shakespeare, list_freq_pun_col,
col_date='Date',
min_date=min(shakespeare['Date']),
max_date=max(shakespeare['Date']),
print_legend=False)
dickens = pd.read_csv('data/Alex_Dickens.csv').sort_values('Date')
dickens = | pd.merge(dickens, df_temporal, how='left', on='title') | pandas.merge |
import pandas as pd
import numpy as np
from pandas_datareader import data
import matplotlib.pyplot as plt
import yaml
import sys
import math
plt.style.use('ggplot')
def LoadConfig(
yamlpath: str)-> dict:
config = yaml.load(
open(yamlpath, 'r'),
Loader=yaml.FullLoader)
return config
def GetData(
ticker : str,
start_date : str,
end_date : str)-> pd.DataFrame:
"""Getting historic price data from yahoo finance.
Arguments:
ticker {str}
start_date {str}
end_date {str}
Returns:
pd.DataFrame --> the output price dataframe
"""
return data.DataReader(ticker,'yahoo', start_date, end_date)
def PlotOptimalSharpeRatio(Agent):
plt.plot(range(len(Agent.epoch_training)),Agent.epoch_training, color ="navy")
plt.title("Sharpe ratio optimization")
plt.xlabel("Number of Iterations")
plt.ylabel("Sharpe Ratio")
plt.grid(True)
plt.savefig("outputfiles/graphs/Sharpe ratio optimization {} SMA noFeatures.png".format(str(Agent.input_size)), dpi=300)
plt.close
def PlotTraining(Agent):
fig, ax = plt.subplots(nrows=3, figsize=(20, 10))
t = np.linspace(1, Agent.trading_periods, Agent.trading_periods)[::-1]
ax[0].plot(t, Agent.prices[:Agent.trading_periods], color="navy")
ax[0].set_xlabel("time")
ax[0].set_ylabel("SMA-S&P500")
ax[0].grid(True)
ax[1].plot(t, Agent.action_space[:Agent.trading_periods], color="navy")
ax[1].set_xlabel("time")
ax[1].set_ylabel("Trading Signal")
ax[1].grid(True)
ax[2].plot(t, Agent.sumR, color="navy", label="Optimised Policy")
ax[2].plot(range(len(Agent.returns[:Agent.trading_periods])), np.cumsum(Agent.returns[::-1][:Agent.trading_periods]), color="maroon", label="Benchmark")
ax[2].set_xlabel("time")
ax[2].set_ylabel("Cumulative Return")
ax[2].legend(loc="upper left")
ax[2].grid(True)
plt.savefig("outputfiles/graphs/rrl_train{}.png".format(str(Agent.input_size)), dpi=300)
fig.clear()
def PlotWeight(Agent):
plt.bar(range(len(Agent.w_opt)),Agent.w_opt, color ="navy")
plt.title("Optimal Weights")
plt.xlabel("Input Vector Order")
plt.ylabel("Weight")
plt.grid(True)
plt.savefig("outputfiles/graphs/weights{}.png".format(str(Agent.input_size)), dpi=300)
plt.close
def PlotSMA(Agent):
df = | pd.DataFrame(data=Agent.prices[::-1], index=None, columns=["a"]) | pandas.DataFrame |
import pandas as pd
import geopandas as gpd
import numpy as np
from .graph import Graph
from ..util import transform
import logging
from math import ceil, floor, sqrt
class BusSim:
def __init__(
self,
manager,
day,
start_time,
elapse_time,
avg_walking_speed=1.4,
max_walking_min=-1, # HACK
trip_delays=[]
):
"""The constructor of the BusSim class
Args:
day (str): the day in a week to perform simulation on
start_time (str): the starting time to perform simulation on
(HH:MM:SS)
elapse_time (str): the elapse time from starting time to perform
simulation on (HH:MM:SS)
avg_walking_speed (float): the assumed average walking speed
max_walking_min (float): the maximum allowed walking time minutes
trip_delays (list[Tuple], optional): the list
of trip-delay pairs to add the tuple should be in the format of
`(trip_id, delay in HH:MM:SS)`
"""
self._logger = logging.getLogger('app')
self._logger.info("Start initializing sim")
self.manager = manager
self.day = day
self.start_time = start_time
self.elapse_time = elapse_time
self.avg_walking_speed = avg_walking_speed
# HACK
if max_walking_min == -1:
max_walking_min = elapse_time
self.max_walking_min = max_walking_min
self.max_walking_distance = max_walking_min * 60.0 * avg_walking_speed
self.stopTimes_final_df = self._gen_final_df(trip_delays)
self.graph = Graph(self.stopTimes_final_df, start_time,
elapse_time, self.max_walking_distance, avg_walking_speed)
self._logger.info("Sim successfully initialized")
def get_access_grid(self, start_stop=None, start_point=None, grid_size_min=2, route_remove=[]):
max_x, min_x, max_y, min_y = self.manager.get_borders()
x_num, y_num, grid_size = self._get_grid_dimention(grid_size_min)
grid = np.zeros(x_num*y_num).reshape(y_num, -1)
self._logger.info("Start searching graph")
# first convert start_point into meters
if start_point is not None:
start_point = transform(start_point[0], start_point[1])
stops_radius_list = self.graph.search(
start_stop, start_point, route_remove)
if stops_radius_list is None or len(stops_radius_list) == 0:
return grid
self._logger.info("Start compressing")
for bubble in stops_radius_list:
min_x_idx = floor(
(bubble["stop_x"] - min_x - bubble["radius"]) / grid_size)
max_x_idx = floor(
(bubble["stop_x"] - min_x + bubble["radius"]) / grid_size)
min_y_idx = floor(
(bubble["stop_y"] - min_y - bubble["radius"]) / grid_size)
max_y_idx = floor(
(bubble["stop_y"] - min_y + bubble["radius"]) / grid_size)
# TODO: make this radius calculation more precise
radius_idx = (max_x_idx - min_x_idx + 1) / 2
mid_x_idx = (max_x_idx + min_x_idx) / 2
mid_y_idx = (max_y_idx + min_y_idx) / 2
start_y = max(0, min_y_idx)
start_x = max(0, min_x_idx)
end_y = min(max_y_idx+1, y_num)
end_x = min(max_x_idx+1, x_num)
for y in range(start_y, end_y):
for x in range(start_x, end_x):
# check 4 corners, if the distance for all of them is greater than radius, then this grid is not in the circle
if (y - mid_y_idx) ** 2 + (x - mid_x_idx) ** 2 < radius_idx ** 2 or \
(y - mid_y_idx) ** 2 + (x + 1 - mid_x_idx) ** 2 < radius_idx ** 2 or \
(y + 1 - mid_y_idx) ** 2 + (x - mid_x_idx) ** 2 < radius_idx ** 2 or \
(y + 1 - mid_y_idx) ** 2 + (x + 1 - mid_x_idx) ** 2 < radius_idx ** 2:
grid[y][x] = 1
self._logger.info("Finish generating grid")
return grid
def get_gdf(self, start_stop=None, start_point=None, route_remove=[]):
"""Given a starting point(lat, lon) or a starting stop_id, compute the region covered in geopandas.Geodataframe
Args:
start_stop (str, optional): The path to the directory of the data files
(contains both mmt_gtfs and plot subdirectories)
start_point (str, optional): the day in a week to perform simulation on
Returns:
geopandas.GeoDataFrame: the GeoDataFrame of the region covered
"""
self._logger.info("Start searching graph")
# first convert start_point into meters
if start_point is not None:
start_point = transform(start_point[0], start_point[1])
stops_radius_list = self.graph.search(
start_stop, start_point, route_remove)
if stops_radius_list is None or len(stops_radius_list) == 0:
return
self._logger.debug("start generating gdf")
df = | pd.DataFrame(stops_radius_list) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
pd.set_option('display.max_columns', 500)
def clean_features(data, type):
df = pd.DataFrame(data)
# df = df.drop("PassengerId", axis=1)
df.set_index("PassengerId")
df = df.drop(columns=['Cabin', 'Name', 'Ticket'])
print((df.Fare == 0).sum())
if type == True:
analyse_data(df)
return df
def analyse_data(df):
print(df.Survived.value_counts(normalize=True))
fig, axes = plt.subplots(2, 4, figsize=(16, 10))
df["Faily_count"] = df.SibSp + df.Parch
df["Faily_count"] = pd.cut(df["Faily_count"], bins=[-1, 0, 3, 7, 16], labels=["Alone", "Small Family", "Medium Family", "Big Family"])
alive_family = df[df["Survived"] == 1]["Faily_count"].value_counts()
dead_family = df[df["Survived"] == 0]["Faily_count"].value_counts()
family_df = pd.DataFrame([alive_family, dead_family])
family_df.index = ['Alive', 'Dead']
family_df.plot(kind="bar", stacked=True, ax=axes[0][1])
df["Age_Range"] = pd.cut(df["Age"], bins=[1, 14, 30, 50, 80], labels=["Child", "Adult", "MidAge", "Old"])
alive_age = df[df["Survived"] == 1]["Age_Range"].value_counts()
dead_age = df[df["Survived"] == 0]["Age_Range"].value_counts()
age_df = | pd.DataFrame([alive_age, dead_age]) | pandas.DataFrame |
from pandas import DataFrame, read_csv
from PyQt5 import uic, QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog, QTableView, QPushButton, QHeaderView
from Util import UI_DIR, resource_path
class TableModel(QtCore.QAbstractTableModel):
def __init__(self, data):
super(TableModel, self).__init__()
self._data = data
def data(self, index, role):
if role == Qt.DisplayRole:
value = self._data.iloc[index.row(), index.column()]
return str(value)
def rowCount(self, index):
return self._data.shape[0]
def columnCount(self, index):
return self._data.shape[1]
def headerData(self, section, orientation, role):
# section is the index of the column/row.
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
return str(self._data.columns[section])
if orientation == Qt.Vertical:
return str(self._data.index[section])
class LogWindow(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
uic.loadUi(UI_DIR + "work_timer_log.ui", self)
self.table: QTableView = self.findChild(QTableView, 'tableView')
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.btnClear = self.findChild(QPushButton, 'btnClearLog')
self.btnClear.clicked.connect(self.clearLog)
self.btnDone = self.findChild(QPushButton, 'btnDone')
self.btnDone.clicked.connect(self.dismissLog)
self.loadLog()
self.show()
def loadLog(self):
try:
data = read_csv(resource_path("work_timer_log.csv"), header=None)
data.columns = ['Date', 'Started Work At', 'Took Break For', 'Ended Work At', 'Total Work Time', 'Notes']
except:
data = | DataFrame() | pandas.DataFrame |
# coding: utf8
from .tsv_utils import complementary_list, add_demographics, baseline_df, chi2
from ..deep_learning.iotools import return_logger
from scipy.stats import ttest_ind
import shutil
import pandas as pd
from os import path
import numpy as np
import os
import logging
sex_dict = {'M': 0, 'F': 1}
def create_split(diagnosis, diagnosis_df, merged_df, n_test, age_name="age",
pval_threshold_ttest=0.80, t_val_chi2_threshold=0.0642,
logger=None):
"""
Split data at the subject-level in training and test set with equivalent age and sex distributions
:param diagnosis: (str) diagnosis on which the split is done
:param diagnosis_df: DataFrame with columns including ['participant_id', 'session_id', 'diagnosis']
:param merged_df: DataFrame with columns including ['age', 'sex'] and containing the same sessions as diagnosis_df
:param n_test: (float)
If >= 1 number of subjects to put in the test set.
If < 1 proportion of subjects to put in the test set.
:param age_name: (str) label of the age column in the dataset.
:param pval_threshold_ttest: (float) threshold for the t-test on age
:param t_val_chi2_threshold: (float) threshold for the chi2 test on sex
:param logger: Logger object from logging library
:return:
train_df (DataFrame) subjects in the train set
test_df (DataFrame) subjects in the test set
"""
if logger is None:
logger = logging
logger.basicConfig(level=logging.DEBUG)
diagnosis_baseline_df = baseline_df(diagnosis_df, diagnosis)
baseline_demographics_df = add_demographics(diagnosis_baseline_df, merged_df, diagnosis)
if n_test >= 1:
n_test = int(n_test)
else:
n_test = int(n_test * len(diagnosis_baseline_df))
sex = list(baseline_demographics_df.sex.values)
age = list(baseline_demographics_df[age_name].values)
idx = np.arange(len(diagnosis_baseline_df))
flag_selection = True
n_try = 0
while flag_selection:
idx_test = np.random.choice(idx, size=n_test, replace=False)
idx_test.sort()
idx_train = complementary_list(idx, idx_test)
# Find the a similar distribution for the age variable
if len(set(age)) != 1:
age_test = [float(age[idx]) for idx in idx_test]
age_train = [float(age[idx]) for idx in idx_train]
t_age, p_age = ttest_ind(age_test, age_train)
else:
p_age = 1
# Find the a similar distribution for the sex variable
if len(set(sex)) != 1:
sex_test = [sex_dict[sex[idx]] for idx in idx_test]
sex_train = [sex_dict[sex[idx]] for idx in idx_train]
T_sex = chi2(sex_test, sex_train)
else:
T_sex = 0
if T_sex < t_val_chi2_threshold and p_age > pval_threshold_ttest:
flag_selection = False
test_df = baseline_demographics_df.loc[idx_test]
train_df = baseline_demographics_df.loc[idx_train]
n_try += 1
logger.info("Split for diagnosis %s was found after %i trials" % (diagnosis, n_try))
return train_df, test_df
def split_diagnoses(merged_tsv, formatted_data_path,
n_test=100, age_name="age", subset_name="test", MCI_sub_categories=True,
t_val_threshold=0.0642, p_val_threshold=0.80, verbosity=0):
"""
Performs a single split for each label independently on the subject level.
The train folder will contain two lists per diagnosis (baseline and longitudinal),
whereas the test folder will only include the list of baseline sessions.
The age and sex distributions between the two sets must be non-significant (according to T-test and chi-square).
Args:
merged_tsv (str): Path to the file obtained by the command clinica iotools merge-tsv.
formatted_data_path (str): Path to the folder containing data extracted by clinicadl tsvtool getlabels.
n_test (float):
If > 1, number of subjects to put in set with name 'subset_name'.
If < 1, proportion of subjects to put in set with name 'subset_name'.
If 0, no training set is created and the whole dataset is considered as one set with name 'subset_name'.
age_name (str): Label of the age column in the dataset.
subset_name (str): Name of the subset that is complementary to train.
MCI_sub_categories (bool): If True, manages MCI sub-categories to avoid data leakage.
t_val_threshold (float): The threshold used for the chi2 test on sex distributions.
p_val_threshold (float): The threshold used for the T-test on age distributions.
verbosity (int): level of verbosity.
Returns:
writes three files per <label>.tsv file present in formatted_data_path:
- formatted_data_path/train/<label>.tsv
- formatted_data_path/train/<label>_baseline.tsv
- formatted_data_path/<subset_name>/<label>_baseline.tsv
"""
logger = return_logger(verbosity, "split")
# Read files
merged_df = pd.read_csv(merged_tsv, sep='\t')
merged_df.set_index(['participant_id', 'session_id'], inplace=True)
results_path = formatted_data_path
train_path = path.join(results_path, 'train')
if path.exists(train_path):
shutil.rmtree(train_path)
if n_test > 0:
os.makedirs(train_path)
test_path = path.join(results_path, subset_name)
if path.exists(test_path):
shutil.rmtree(test_path)
os.makedirs(test_path)
diagnosis_df_paths = os.listdir(results_path)
diagnosis_df_paths = [x for x in diagnosis_df_paths if x.endswith('.tsv')]
diagnosis_df_paths = [x for x in diagnosis_df_paths if not x.endswith('_baseline.tsv')]
MCI_special_treatment = False
if 'MCI.tsv' in diagnosis_df_paths and n_test > 0:
if MCI_sub_categories:
diagnosis_df_paths.remove('MCI.tsv')
MCI_special_treatment = True
elif 'sMCI.tsv' in diagnosis_df_paths or 'pMCI.tsv' in diagnosis_df_paths:
logger.warning("MCI special treatment was deactivated though MCI subgroups were found."
"Be aware that it may cause data leakage in transfer learning tasks.")
# The baseline session must be kept before or we are taking all the sessions to mix them
for diagnosis_df_path in diagnosis_df_paths:
diagnosis_df = pd.read_csv(path.join(results_path, diagnosis_df_path),
sep='\t')
diagnosis = diagnosis_df_path.split('.')[0]
if n_test > 0:
train_df, test_df = create_split(diagnosis, diagnosis_df, merged_df, age_name=age_name,
n_test=n_test, t_val_chi2_threshold=t_val_threshold,
pval_threshold_ttest=p_val_threshold, logger=logger)
# Save baseline splits
train_df = train_df[['participant_id', 'session_id', 'diagnosis']]
train_df.to_csv(path.join(train_path, str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
test_df = test_df[['participant_id', 'session_id', 'diagnosis']]
test_df.to_csv(path.join(test_path, str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
# Retrieve all sessions for the training set
complete_train_df = pd.DataFrame()
for idx in train_df.index.values:
subject = train_df.loc[idx, 'participant_id']
subject_df = diagnosis_df[diagnosis_df.participant_id == subject]
complete_train_df = pd.concat([complete_train_df, subject_df])
complete_train_df.to_csv(path.join(train_path, str(diagnosis) + '.tsv'), sep='\t', index=False)
else:
diagnosis_baseline_df = baseline_df(diagnosis_df, diagnosis)
test_df = diagnosis_baseline_df[['participant_id', 'session_id', 'diagnosis']]
test_df.to_csv(path.join(test_path, str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
if MCI_special_treatment:
# Extraction of MCI subjects without intersection with the sMCI / pMCI train
diagnosis_df = pd.read_csv(path.join(results_path, 'MCI.tsv'), sep='\t')
MCI_df = diagnosis_df.set_index(['participant_id', 'session_id'])
supplementary_diagnoses = []
logger.debug('Before subjects removal for MCI special treatment')
sub_df = diagnosis_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('%i subjects, %i scans' % (len(sub_df), len(diagnosis_df)))
if 'sMCI.tsv' in diagnosis_df_paths:
sMCI_baseline_train_df = pd.read_csv(path.join(train_path, 'sMCI_baseline.tsv'), sep='\t')
sMCI_baseline_test_df = pd.read_csv(path.join(test_path, 'sMCI_baseline.tsv'), sep='\t')
sMCI_baseline_df = pd.concat([sMCI_baseline_train_df, sMCI_baseline_test_df])
sMCI_baseline_df.reset_index(drop=True, inplace=True)
for idx in sMCI_baseline_df.index.values:
subject = sMCI_baseline_df.loc[idx, 'participant_id']
MCI_df.drop(subject, inplace=True)
supplementary_diagnoses.append('sMCI')
logger.debug('Removed %i subjects based on sMCI label' % len(sMCI_baseline_df))
sub_df = MCI_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('%i subjects, %i scans' % (len(sub_df), len(MCI_df)))
if 'pMCI.tsv' in diagnosis_df_paths:
pMCI_baseline_train_df = pd.read_csv(path.join(train_path, 'pMCI_baseline.tsv'), sep='\t')
pMCI_baseline_test_df = pd.read_csv(path.join(test_path, 'pMCI_baseline.tsv'), sep='\t')
pMCI_baseline_df = pd.concat([pMCI_baseline_train_df, pMCI_baseline_test_df])
pMCI_baseline_df.reset_index(drop=True, inplace=True)
for idx in pMCI_baseline_df.index.values:
subject = pMCI_baseline_df.loc[idx, 'participant_id']
MCI_df.drop(subject, inplace=True)
supplementary_diagnoses.append('pMCI')
logger.debug('Removed %i subjects based on pMCI label' % len(pMCI_baseline_df))
sub_df = MCI_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('%i subjects, %i scans' % (len(sub_df), len(MCI_df)))
if len(supplementary_diagnoses) == 0:
raise ValueError('The MCI_sub_categories flag is not needed as there are no intersections with'
'MCI subcategories.')
# Construction of supplementary train
supplementary_train_df = | pd.DataFrame() | pandas.DataFrame |
__author__ = 'saeedamen' # <NAME>
#
# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import numpy as np
import pandas as pd
from numba import guvectorize
from findatapy.timeseries import Calendar
from finmarketpy.util.marketconstants import MarketConstants
from finmarketpy.curve.abstractpricer import AbstractPricer
market_constants = MarketConstants()
@guvectorize(['void(f8[:], f8[:], f8[:,:], f8[:,:], intp, f8[:])'],
'(n),(n),(n,m),(n,m),()->(n)', cache=True, target="cpu", nopython=True)
def _forwards_interpolate_numba(spot_arr, spot_delivery_days_arr, quoted_delivery_days_arr,
forwards_points_arr, no_of_tenors, out):
for i, delivery_day in enumerate(spot_delivery_days_arr):
for j in range(no_of_tenors):
# curr_forward_pts = forwards_points_arr[i, j]
# curr_delivery_days = quoted_delivery_days_arr[i, j]
# next_forward_pts = forwards_points_arr[i, j+1]
# next_delivery_days = quoted_delivery_days_arr[i, j+1]
# In other words, SP (Spot)
if delivery_day == 0:
out[i] = spot_arr[i]
break
# Say if we're in between SP and 1W
elif delivery_day <= quoted_delivery_days_arr[i, j]:
out[i] = spot_arr[i] + (forwards_points_arr[i, j] / quoted_delivery_days_arr[i, j]) * delivery_day
break
# Eg. if we're in between 1W and 2W
elif delivery_day >= quoted_delivery_days_arr[i, j] and delivery_day <= quoted_delivery_days_arr[i, j + 1]:
# Alternative interpolation
# interpolated_forwards_arr[i] = spot_arr[i] + \
# delivery_day * (forwards_points_arr[i, j+1] - forwards_points_arr[i, j]) \
# / (quoted_delivery_days_arr[i, j+1] - quoted_delivery_days_arr[i, j])
forward_points_per_day = (forwards_points_arr[i, j + 1] - forwards_points_arr[i, j]) \
/ (quoted_delivery_days_arr[i, j + 1] - quoted_delivery_days_arr[i, j])
out[i] = spot_arr[i] + forward_points_per_day * delivery_day \
+ forwards_points_arr[i, j] - (forward_points_per_day * quoted_delivery_days_arr[i, j])
break
@guvectorize(['void(f8[:], f8[:,:], f8[:,:], f8[:,:], f8, f8, intp, f8[:,:])'],
'(n),(n,m),(n,m),(n,m),(),(),()->(n,m)', cache=True, target="cpu", nopython=True)
def _infer_base_currency_depo_numba(spot_arr, outright_forwards_arr, depo_arr, quoted_delivery_days_arr,
base_conv, terms_conv, no_of_tenors, out):
for i in range(len(out)):
for j in range(no_of_tenors):
out[i, j] = (((1 + depo_arr[i, j] * (quoted_delivery_days_arr[i, j] / terms_conv))
/ (outright_forwards_arr[i, j] / spot_arr[i])) - 1) \
/ (quoted_delivery_days_arr[i, j] / base_conv)
@guvectorize(['void(f8[:], f8[:,:], f8[:,:], f8[:,:], f8, f8, intp, f8[:,:])'],
'(n),(n,m),(n,m),(n,m),(),(),()->(n,m)', cache=True, target="cpu", nopython=True)
def _infer_terms_currency_depo_numba(spot_arr, outright_forwards_arr, depo_arr, quoted_delivery_days_arr,
base_conv, terms_conv, no_of_tenors, out):
for i in range(len(out)):
for j in range(no_of_tenors):
out[i,j] = ((outright_forwards_arr[i,j] / spot_arr[i]) *
(1 + depo_arr[i,j] * (quoted_delivery_days_arr[i,j] / base_conv)) - 1) \
/ (quoted_delivery_days_arr[i,j] / terms_conv)
def _forwards_interpolate(spot_arr, spot_delivery_days_arr, quoted_delivery_days_arr,
forwards_points_arr, no_of_tenors):
out = np.zeros(len(quoted_delivery_days_arr)) * np.nan
for i, delivery_day in enumerate(spot_delivery_days_arr):
for j in range(no_of_tenors):
# curr_forward_pts = forwards_points_arr[i, j]
# curr_delivery_days = quoted_delivery_days_arr[i, j]
# next_forward_pts = forwards_points_arr[i, j+1]
# next_delivery_days = quoted_delivery_days_arr[i, j+1]
# In other words, SP (Spot)
if delivery_day == 0:
out[i] = spot_arr[i]
break
# Say if we're in between SP and 1W
elif delivery_day <= quoted_delivery_days_arr[i, j]:
out[i] = spot_arr[i] + (forwards_points_arr[i, j] / quoted_delivery_days_arr[i, j]) * delivery_day
break
# Eg. if we're in between 1W and 2W
elif delivery_day >= quoted_delivery_days_arr[i, j] and delivery_day <= quoted_delivery_days_arr[i, j + 1]:
# Alternative interpolation
# interpolated_forwards_arr[i] = spot_arr[i] + \
# delivery_day * (forwards_points_arr[i, j+1] - forwards_points_arr[i, j]) \
# / (quoted_delivery_days_arr[i, j+1] - quoted_delivery_days_arr[i, j])
forward_points_per_day = (forwards_points_arr[i, j + 1] - forwards_points_arr[i, j]) \
/ (quoted_delivery_days_arr[i, j + 1] - quoted_delivery_days_arr[i, j])
out[i] = spot_arr[i] + forward_points_per_day * delivery_day \
+ forwards_points_arr[i, j] - (forward_points_per_day * quoted_delivery_days_arr[i, j])
break
return out
class FXForwardsPricer(AbstractPricer):
"""Prices forwards for odd dates which are not quoted using linear interpolation,
eg. if we have forward points for 1W and 1M, and spot date but we want to price a 3W forward, or any arbitrary horizon
date that lies in that interval
Also calculates the implied deposit rate from FX spot, FX forward points and deposit rate.
"""
def __init__(self, market_df=None, quoted_delivery_df=None):
self._calendar = Calendar()
self._market_df = market_df
self._quoted_delivery_df = quoted_delivery_df
def price_instrument(self, cross, horizon_date, delivery_date, option_expiry_date=None, market_df=None, quoted_delivery_df=None,
fx_forwards_tenor_for_interpolation=market_constants.fx_forwards_tenor_for_interpolation,
return_as_df=True):
"""Creates an interpolated outright FX forward (and the associated points), for horizon dates/delivery dates
given by the user from FX spot rates and FX forward points. This can be useful when we have an odd/broken date
which isn't quoted.
Uses linear interpolation between quoted dates to calculate the appropriate interpolated forward. Eg. if we
ask for a delivery date in between 1W and 1M, we will interpolate between those.
Parameters
----------
cross : str
Currency pair
horizon_date : DateTimeIndex
Horizon dates for forward contracts
delivery_date : DateTimeIndex
Delivery dates for forward contracts
market_df : DataFrame
Contains FX spot and FX forward points data
quoted_delivery_df : DataFrame (DateTimeIndex)
Delivery dates for every quoted forward point
fx_forwards_tenor_for_interpolation : str(list)
Which forwards to use for interpolation
Returns
-------
DataFrame
"""
if market_df is None: market_df = self._market_df
if quoted_delivery_df is None: quoted_delivery_df = self._quoted_delivery_df
if quoted_delivery_df is None:
quoted_delivery_df = self.generate_quoted_delivery(cross, market_df, quoted_delivery_df,
fx_forwards_tenor_for_interpolation, cross)
# Make horizon date and delivery date pandas DatetimeIndex
if isinstance(horizon_date, pd.Timestamp):
horizon_date = pd.DatetimeIndex([horizon_date])
delivery_date = | pd.DatetimeIndex([delivery_date]) | pandas.DatetimeIndex |
import zipfile
import io
import requests
import json
import pandas as pd
pd.options.mode.chained_assignment = None
import os, sys, yaml
try: modulepath = os.path.dirname(os.path.realpath(__file__)).replace('\\', '/') + '/'
except NameError: modulepath = 'facilitymatcher/'
output_dir = modulepath + 'output/'
data_dir = modulepath + 'data/'
stewi_inventories = ["NEI","TRI","eGRID","RCRAInfo"]
inventory_to_FRS_pgm_acronymn = {"NEI":"EIS","TRI":"TRIS","eGRID":"EGRID","GHGRP":"E-GGRT","RCRAInfo":"RCRAINFO","DMR":"NPDES"}
def config():
configfile = None
with open(modulepath + 'config.yaml', mode='r') as f:
configfile = yaml.load(f,Loader=yaml.FullLoader)
return configfile
def download_extract_FRS_combined_national(FRSpath):
_config = config()['databases']['FRS']
url = _config['url']
request = requests.get(url).content
zip_file = zipfile.ZipFile(io.BytesIO(request))
zip_file.extractall(FRSpath)
#Only can be applied before renaming the programs to inventories
def filter_by_program_list(df,program_list):
df = df[df['PGM_SYS_ACRNM'].isin(program_list)]
return df
#Only can be applied after renaming the programs to inventories
def filter_by_inventory_list(df,inventory_list):
df = df[df['Source'].isin(inventory_list)]
return df
#Only can be applied after renaming the programs to inventories
def filter_by_inventory_id_list(df,inventories_of_interest,base_inventory,id_list):
#Find FRS_IDs first
FRS_ID_list = list(df.loc[(df['Source'] == base_inventory) & (df['FacilityID'].isin(id_list)),"FRS_ID"])
#Now use that FRS_ID list and list of inventories of interest to get decired matches
df = df.loc[(df['Source'].isin(inventories_of_interest)) & (df['FRS_ID'].isin(FRS_ID_list))]
return df
def filter_by_facility_list(df,facility_list):
df = df[df['FRS_ID'].isin(facility_list)]
return df
def list_facilities_not_in_bridge(bridges, facility_list):
facilities = bridges[bridges['REGISTRY_ID'].isin(facility_list)]
return bridges
#Returns list of acronymns for inventories that correspond to
def get_programs_for_inventory_list(list_of_inventories):
program_list = []
for l in list_of_inventories:
pgm_acronym = inventory_to_FRS_pgm_acronymn[l]
program_list.append(pgm_acronym)
return program_list
def invert_inventory_to_FRS():
FRS_to_inventory_pgm_acronymn = {v: k for k, v in inventory_to_FRS_pgm_acronymn.items()}
return FRS_to_inventory_pgm_acronymn
#Function to return facility info from FRS web service
#Limitation - the web service only matches on facility at a time
##example
#id='2'
#program_acronym='EGRID'
def callFRSforProgramAcronymandIDfromAPI(program_acronym, id):
# base url
base = 'http://ofmpub.epa.gov/enviro/frs_rest_services'
facilityquery = base + '.get_facilities?'
pgm_sys_id = 'pgm_sys_id='
pgm_sys_acrnm = 'pgm_sys_acrnm='
output = 'output=JSON'
url = facilityquery + pgm_sys_acrnm + program_acronym + '&' + pgm_sys_id + id + '&' + output
facilityresponse = requests.get(url)
facilityjson = json.loads(facilityresponse.text)['Results']
facilityinfo = facilityjson['FRSFacility']
return facilityinfo
def getFRSIDfromAPIfaciltyinfo(facilityinfo):
FRSID = facilityinfo[0]['RegistryId']
return FRSID
def add_manual_matches(df_matches):
#Read in manual matches
manual_matches = | pd.read_csv(data_dir+'facilitymatches_manual.csv',header=0,dtype={'FacilityID':'str','FRS_ID':'str'}) | pandas.read_csv |
from . import filtertools
from .sound import Sound
from .library.voice_activity_detection import extract_voiced_segments
from .dataset import NoiseDataSet
import bisect
import librosa.core
import nltk
import numpy as np
import os
import pandas as pd
import scipy.fft
import scipy.signal
import time
class Test(object):
def __init__(self, folder='', store_wavs=False, sentences_per_condition=100):
self.folder = folder
self.id = int(time.time())
self._download_nltk_dependencies()
self.results = dict()
self.results_counter = dict()
self.store_wavs = store_wavs
self.to_store = []
self.other_data = []
self.sentences_per_condition = sentences_per_condition
self.parameters = dict()
def run(self, asr_system, dataset):
raise NotImplementedError()
def _extract_keywords(self, sentence):
def is_keyword(word, pos):
is_key_word = pos[:2] in ['NN', 'VB', 'JJ', 'CD',
'MD', 'RP', 'PR', 'RB',
'WR', 'IN', 'DT', 'EX',
'WP']
#is_posessive = '$' in pos
banned = word in ['be', 'am', 'are', 'is',
'was', 'been', 'for',
'a', 'the', 'so', 'will',
'from', 'can', 'any', "n't"]
include = word in ['us', 'we', 'he']
long_word = len(word) > 2
return (is_key_word and not banned and long_word) or include
tokenized = nltk.word_tokenize(sentence)
keywords = [word for (word, pos) in nltk.pos_tag(tokenized) if is_keyword(word, pos)]
return keywords
def _keyword_accuracy(self, prediction, transcription):
"""
TODO(lweerts):
* Ensure every keyword can only count towards the accuracy as often as it occurs in the sentence.
* Find the maximum number of keywords that occur in the correct order
"""
keywords_prediction = self._extract_keywords(prediction.lower())
keywords_transcription = self._extract_keywords(transcription.lower())
# Measure how many of the keywords in the transcription were in the prediction
correct_keywords = [1 if k in keywords_prediction else 0 for k in keywords_transcription]
return np.sum(correct_keywords)/len(correct_keywords), correct_keywords
def _add_noise(self, speech_signal, noise_signal, snr, a_weighting=False, speech_volt_meter=False, speech_reference_level=None, noise_reference_level=None):
if noise_signal is None:
if speech_reference_level:
speech_signal.set_advanced_level(speech_reference_level, a_weighting, speech_volt_meter)
return speech_signal
samplerate = min(speech_signal.samplerate_Hz, noise_signal.samplerate_Hz)
if speech_signal.samplerate_Hz != noise_signal.samplerate_Hz:
if speech_signal.samplerate_Hz > samplerate:
speech_signal = Sound.resample(speech_signal, samplerate)
else:
noise_signal = Sound.resample(noise_signal, samplerate)
# Resize or lengthen noise signal to match with speech signal.
noise_aligned, noise_start, noise_end = self._align(speech_signal, noise_signal)
noise_signal = Sound(noise_aligned, noise_signal.samplerate_Hz)
if speech_reference_level is not None:
speech_signal.set_advanced_level(speech_reference_level, a_weighting, speech_volt_meter)
noise_signal.set_advanced_level(speech_reference_level - snr, a_weighting, False)
elif noise_reference_level is not None:
speech_signal.set_advanced_level(noise_reference_level + snr, a_weighting, speech_volt_meter)
noise_signal.set_advanced_level(noise_reference_level, a_weighting, False)
else:
speech_level = speech_signal.get_advanced_level(a_weighting, speech_volt_meter)
noise_level = noise_signal.get_advanced_level(a_weighting, False)
noise_signal *= 10**(((speech_level - snr) - noise_level)/20.)
return speech_signal + noise_signal
def _align(self, speech_signal, noise):
if speech_signal.samplerate_Hz != noise.samplerate_Hz:
noise = Sound.resample(noise, speech_signal.samplerate_Hz)
goal_len = len(speech_signal)
if(goal_len == len(noise)):
return noise, 0, goal_len
elif goal_len <= len(noise):
start = np.random.randint(0, len(noise) - goal_len)
aligned_noise = Sound(noise[start:start + goal_len], noise.samplerate_Hz)
return aligned_noise, start, start + goal_len
else:
raise ValueError('Noise needs to be longer than or equal to speech signal in duration.')
def _generate_longterm_speech_shaped_noise(self, sound, dataset):
samples = len(sound)
freqs = np.abs(scipy.fft.fftfreq(samples, 1/sound.samplerate_Hz))
power = dataset.longterm_fft(freqs)
power = np.array(power, dtype='complex')
Np = (len(power) - 1) // 2
phases = np.random.rand(Np) * 2 * np.pi
phases = np.cos(phases) + 1j * np.sin(phases)
power[1:Np+1] *= phases
power[-1:-1-Np:-1] = np.conj(power[1:Np+1])
return Sound(scipy.fft.ifft(power).real, sound.samplerate_Hz)
def _generate_speech_shaped_noise(self, sound, modulated, modulation_rate_Hz=8):
spectrogram = np.abs(scipy.fft.fft(sound))*np.exp(2j*np.pi*np.random.rand(len(sound)))
noise = np.real(scipy.fft.ifft(spectrogram))
if modulated:
t = np.arange(len(sound))/sound.samplerate_Hz
modulation = 10**(30*(np.sin(2*np.pi*modulation_rate_Hz*t)-1)/40)
noise = noise*modulation
return Sound(noise, sound.samplerate_Hz)
def _generate_gaussian_white_noise(self, duration_frames, samplerate):
return Sound(np.random.randn(duration_frames), samplerate)
def _generate_noise(self, sound, noise_type, longterm_dataset=None, kwargs={}):
if noise_type is None:
return None
if isinstance(noise_type, NoiseDataSet):
return Sound(noise_type.random_sample(len(sound), sound.samplerate_Hz), sound.samplerate_Hz)
elif 'speech_shaped_longterm' in noise_type:
return self._generate_longterm_speech_shaped_noise(sound, longterm_dataset, **kwargs)
elif 'speech_shaped' in noise_type:
if 'modulated' in noise_type:
return self._generate_speech_shaped_noise(sound, True, **kwargs)
else:
return self._generate_speech_shaped_noise(sound, False, **kwargs)
elif 'white_noise' in noise_type:
return self._generate_gaussian_white_noise(len(sound), sound.samplerate_Hz, **kwargs)
def _download_nltk_dependencies(self):
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
try:
nltk.data.find('taggers/averaged_perceptron_tagger')
except LookupError:
nltk.download('averaged_perceptron_tagger')
return
def _allocate_result_dataframe(self, index_values, index_names, column_names, result_name='standard'):
midx = pd.MultiIndex.from_product(
index_values,
names=index_names
)
empty_results = np.full_like(np.zeros((len(midx), len(column_names))), np.nan)
self.results[result_name] = pd.DataFrame(empty_results, columns=column_names, index=midx)
self.results_counter[result_name] = 0
def _allocate_result_dataframe_from_dataframe(self, old_results, result_name):
self.results[result_name] = | pd.DataFrame(columns=old_results.columns, index=old_results.index) | pandas.DataFrame |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import stack_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000, 20000, 10000])
self.op = np.array([0, 1, -0.33333333])
self.prices = np.array([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""Test transaction cost calculated by rate with min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([ 9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4 , 10.87 ,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19 , 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97 ,
12.178, 11.95 , 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64 ,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3 , 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82 , 12.67 , 12.876, 12.986, 13.271, 13.606, 13.82 ,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34 , 12.141, 11.687,
11.992, 12.458, 12.131, 11.75 , 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56 , 12.879, 12.861,
12.973, 13.235, 13.53 , 13.531, 13.137, 13.166, 13.31 , 13.103,
13.007, 12.643, 12.69 , 12.216, 12.385, 12.046, 12.321, 11.9 ,
11.772, 11.816, 11.871, 11.59 , 11.518, 11.94 , 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16 , 11.741, 11.26 , 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62 , 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89 ,
10.728, 11.191, 11.646, 11.62 , 11.195, 11.178, 11.18 , 10.956,
11.205, 10.87 , 11.098, 10.639, 10.487, 10.507, 10.92 , 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77 , 11.225, 10.92 , 10.824, 11.096, 11.542,
11.06 , 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55 , 9.008,
9.138, 9.088, 9.434, 9.156, 9.65 , 9.431, 9.654, 10.079,
10.411, 10.865, 10.51 , 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72 , 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11 , 13.53 ,
13.123, 13.138, 13.57 , 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86 , 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11 , 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32 , 16.59 , 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06 , 17.36 , 17.108,
17.348, 17.596, 17.46 , 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64 ,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67 , 15.911,
16.077, 16.17 , 15.722, 15.258, 14.877, 15.138, 15. , 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71 , 16.327, 16.605, 16.486, 16.846,
16.935, 17.21 , 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43 , 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([ 9.7 , 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59 , 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55 ,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91 ,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97 , 14.228,
13.84 , 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41 , 14.74 , 15.03 , 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86 , 15.097, 15.178, 15.293, 15.238, 15. , 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81 , 17.192, 16.86 , 16.745, 16.707,
16.552, 16.133, 16.301, 16.08 , 15.81 , 15.75 , 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57 , 16.778, 16.928, 16.932, 17.22 , 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95 ,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36 , 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79 , 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72 , 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12 , 15.442, 15.476, 15.789,
15.36 , 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2 , 15.994, 15.86 , 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49 , 17.768, 17.509,
17.795, 18.147, 18.63 , 18.945, 19.021, 19.518, 19.6 , 19.744,
19.63 , 19.32 , 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3 ,
17.894, 17.744, 17.5 , 17.083, 17.092, 16.864, 16.453, 16.31 ,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93 , 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67 , 14.797, 14.42 , 14.681, 15.16 , 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32 ,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71 , 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39 , 11.723, 12.084, 11.8 , 11.471,
11.33 , 11.504, 11.295, 11.3 , 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94 ,
10.521, 10.36 , 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72 , 10.54 , 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54 , 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39 , 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4 ,
9.332, 9.34 , 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63 , 8.831, 8.957, 9.18 , 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85 , 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06 , 10.188, 10.095, 9.739, 9.881,
9.7 , 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514 ,
0.40710639, 0.40708157, 0.40609006, 0.4073625 , 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593 , 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768 , 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592 , 0.42615335, 0.42526286,
0.4248906 , 0.42368986, 0.4232565 , 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645 , 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991 , 0.405011 , 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969 , 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559 , 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634 , 0.36539259, 0.36428672, 0.36502487,
0.3647148 , 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685 , 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281 , -0.02416067, -0.02763238,
-0.027579 , -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633 , -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756 , -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062 ,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977 ,
0.0474047 , 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686 , 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441 , 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094 ,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544 , 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123 , 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174 , 0.05051288, 0.0564852 , 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782 , 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908 , 0.08562706,
0.0839014 , 0.0849072 , 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347 , -0.0460858 , -0.0416761 , -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583 ,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841 , -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915 , -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592 , -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058 , -0.04533641, -0.0461183 , -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414 ,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265 , -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383 , -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499 , -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632 , -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571 ,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486 , -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195 , -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678 ,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565 , -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743 ,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428 ,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789 , -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945 , -0.04672356, -0.03581408, -0.0439215 ,
-0.03429495, -0.0260362 , -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908 , 0.11302115,
0.0909566 , 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445 , 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807 , 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069 , 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612 , 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943 ,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336 , 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809 , 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061 , 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356 , 0.70912003,
0.60328917, 0.6395092 , 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216 , 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253 , 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
self.op_signals = np.array([[0, 0, 0, 0, 0.25, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.1, 0.15],
[0.2, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0.1, 0, 0, 0, 0],
[0, 0, 0, 0, -0.75, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.333, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, -0.5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
[0, 0, 0, 0, 0.2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.5, 0, 0, 0.15, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.2, 0, -1, 0.2, 0],
[0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -0.5, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.15, 0, 0],
[-1, 0, 0.25, 0.25, 0, 0.25, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.25, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, -1, 0, 0, 0, 0, 0],
[-1, 0, 0.15, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.op_signal_df = pd.DataFrame(self.op_signals, index=self.dates, columns=self.shares)
self.history_list = pd.DataFrame(self.prices, index=self.dates, columns=self.shares)
self.res = np.array([[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0, 33323.836],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 33174.614],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35179.466],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34465.195],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34712.354],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35755.550],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37895.223],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37854.284],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37198.374],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35916.711],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35806.937],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36317.592],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37103.973],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35457.883],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36717.685],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37641.463],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36794.298],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37073.817],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35244.299],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37062.382],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37420.067],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 38089.058],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 39260.542],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42609.684],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 43109.309],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42283.408],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43622.444],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42830.254],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41266.463],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41164.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41797.937],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42440.861],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42113.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43853.588],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 46216.760],
[0.000, 0.000, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 5140.743, 0.000, 45408.737],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 47413.401],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44603.718],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44381.544]])
def test_loop_step(self):
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.zeros(7, dtype='float'),
op=self.op_signals[0],
prices=self.prices[0],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
self.assertAlmostEqual(value, 10000.00)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=5059.722222,
pre_amounts=np.array([0, 0, 0, 0, 555.5555556,
205.0653595, 321.0891813]),
op=self.op_signals[3],
prices=self.prices[3],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 1201.2775195, 5)
self.assertTrue(np.allclose(amounts, np.array([346.9824373, 416.6786936, 0, 0,
555.5555556, 205.0653595, 321.0891813])))
self.assertAlmostEqual(value, 9646.111756, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=6179.77423,
pre_amounts=np.array([115.7186428, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0]),
op=self.op_signals[31],
prices=self.prices[31],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0])))
self.assertAlmostEqual(value, 21133.50798, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 938.6967231, 1339.207325]),
op=self.op_signals[60],
prices=self.prices[60],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5001.424618, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811, 269.8495646,
1785.205494, 938.6967231, 1339.207325])))
self.assertAlmostEqual(value, 33323.83588, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[61],
prices=self.prices[61],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 416.6786936, 1290.69215, 719.9239224,
1785.205494, 2701.487958, 1339.207325])))
self.assertAlmostEqual(value, 32820.29007, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=915.6208259,
pre_amounts=np.array([0, 416.6786936, 1290.69215, 719.9239224,
0, 2701.487958, 4379.098907]),
op=self.op_signals[96],
prices=self.prices[96],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5140.742779, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 1290.69215, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 45408.73655, 4)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[97],
prices=self.prices[97],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 2027.18825, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 47413.40131, 4)
def test_loop(self):
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res.values, self.res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestOperatorSubFuncs(unittest.TestCase):
def setUp(self):
mask_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.5, 0.0, 0.3, 1.0],
[0.5, 0.0, 0.3, 0.5],
[0.5, 0.5, 0.3, 0.5],
[0.5, 0.5, 0.3, 1.0],
[0.3, 0.5, 0.0, 1.0],
[0.3, 1.0, 0.0, 1.0]]
signal_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, -0.5],
[0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.5],
[-0.4, 0.0, -1.0, 0.0],
[0.0, 0.5, 0.0, 0.0]]
mask_multi = [[[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[0, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 1]],
[[0, 0, 1, 0, 1],
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 1, 0],
[0, 1, 0, 1, 0]],
[[0, 0, 0., 0, 1],
[0, 0, 1., 0, 1],
[0, 0, 1., 0, 1],
[1, 0, 1., 0, 1],
[1, 1, .5, 1, 1],
[1, 0, .5, 1, 0],
[1, 1, .5, 1, 0],
[0, 1, 0., 0, 0],
[1, 0, 0., 0, 0],
[0, 1, 0., 0, 0]]]
signal_multi = [[[0., 0., 1., 1., 0.],
[0., 1., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.],
[0., 0., -1., 0., 0.],
[-1., 0., 0., -1., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., -1., 0., 0., 0.]],
[[0., 0., 1., 0., 1.],
[0., 1., 0., 1., 0.],
[1., 0., -1., 0., 0.],
[0., 0., 1., -1., -1.],
[0., 0., -1., 0., 0.],
[0., -1., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[-1., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 1., -0.5, 1., 0.],
[0., -1., 0., 0., -1.],
[0., 1., 0., 0., 0.],
[-1., 0., -1., -1., 0.],
[1., -1., 0., 0., 0.],
[-1., 1., 0., 0., 0.]]]
self.mask = np.array(mask_list)
self.multi_mask = np.array(mask_multi)
self.correct_signal = np.array(signal_list)
self.correct_multi_signal = np.array(signal_multi)
self.op = qt.Operator()
def test_ls_blend(self):
"""测试多空蒙板的混合器,三种混合方式均需要测试"""
ls_mask1 = [[0.0, 0.0, 0.0, -0.0],
[1.0, 0.0, 0.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[0.0, 1.0, 0.0, -1.0],
[0.0, 1.0, 0.0, -1.0]]
ls_mask2 = [[0.0, 0.0, 0.5, -0.5],
[0.0, 0.0, 0.5, -0.3],
[0.0, 0.5, 0.5, -0.0],
[0.5, 0.5, 0.3, -0.0],
[0.5, 0.5, 0.3, -0.3],
[0.5, 0.5, 0.0, -0.5],
[0.3, 0.5, 0.0, -1.0],
[0.3, 1.0, 0.0, -1.0]]
ls_mask3 = [[0.5, 0.0, 1.0, -0.4],
[0.4, 0.0, 1.0, -0.3],
[0.3, 0.0, 0.8, -0.2],
[0.2, 0.0, 0.6, -0.1],
[0.1, 0.2, 0.4, -0.2],
[0.1, 0.3, 0.2, -0.5],
[0.1, 0.4, 0.0, -0.5],
[0.1, 0.5, 0.0, -1.0]]
# result with blender 'avg'
ls_blnd_avg = [[0.16666667, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.16666667, 0.76666667, -0.4],
[0.56666667, 0.16666667, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'str-1.5'
ls_blnd_str_15 = [[0, 0, 1, 0],
[0, 0, 1, -1],
[0, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'pos-2' == 'pos-2-0'
ls_blnd_pos_2 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 0, -1],
[1, 1, 0, -1]]
# result with blender 'pos-2-0.25'
ls_blnd_pos_2_25 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'avg_pos-2' == 'pos-2-0'
ls_blnd_avg_pos_2 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, -0.4],
[0.56666667, 0.00000000, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'avg_pos-2-0.25'
ls_blnd_avg_pos_2_25 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, 0.00000000],
[0.56666667, 0.00000000, 0.63333333, 0.00000000],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.00000000, -0.66666667],
[0.00000000, 0.63333333, 0.00000000, -0.83333333],
[0.00000000, 0.83333333, 0.00000000, -1.]]
# result with blender 'combo'
ls_blnd_combo = [[0.5, 0., 1.5, -0.9],
[1.4, 0., 1.5, -1.6],
[1.3, 0.5, 2.3, -1.2],
[1.7, 0.5, 1.9, -1.1],
[1.6, 1.7, 1.7, -1.5],
[1.6, 1.8, 1.2, -2.],
[0.4, 1.9, 0., -2.5],
[0.4, 2.5, 0., -3.]]
ls_masks = np.array([np.array(ls_mask1), np.array(ls_mask2), np.array(ls_mask3)])
# test A: the ls_blender 'str-T'
self.op.set_blender('ls', 'str-1.5')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'test A: result of ls_blender: str-1.5: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_str_15))
# test B: the ls_blender 'pos-N-T'
self.op.set_blender('ls', 'pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-1: result of ls_blender: pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2))
self.op.set_blender('ls', 'pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-2: result of ls_blender: pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25))
# test C: the ls_blender 'avg_pos-N-T'
self.op.set_blender('ls', 'avg_pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-1: result of ls_blender: avg_pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2, 5))
self.op.set_blender('ls', 'avg_pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-2: result of ls_blender: avg_pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25, 5))
# test D: the ls_blender 'avg'
self.op.set_blender('ls', 'avg')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test D: result of ls_blender: avg: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_avg))
# test E: the ls_blender 'combo'
self.op.set_blender('ls', 'combo')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test E: result of ls_blender: combo: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_combo))
def test_sel_blend(self):
"""测试选股蒙板的混合器,包括所有的混合模式"""
# step2, test blending of sel masks
pass
def test_bs_blend(self):
"""测试买卖信号混合模式"""
# step3, test blending of op signals
pass
def test_unify(self):
print('Testing Unify functions\n')
l1 = np.array([[3, 2, 5], [5, 3, 2]])
res = qt.unify(l1)
target = np.array([[0.3, 0.2, 0.5], [0.5, 0.3, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
l1 = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
res = qt.unify(l1)
target = np.array([[0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
def test_mask_to_signal(self):
signal = qt.mask_to_signal(self.mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_signal))
signal = qt.mask_to_signal(self.multi_mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_multi_signal))
class TestLSStrategy(qt.RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(qt.SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
# self.op.info()
def test_operator_ready(self):
"""test the method ready of Operator"""
pass
# print(f'operator is ready? "{self.op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
pass
# self.assertIsInstance(self.op, qt.Operator)
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
# self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 3)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 1)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# print(f'test adding strategies into existing op')
# print('test adding strategy by string')
# self.op.add_strategy('macd', 'timing')
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.timing[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 4)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# self.op.add_strategy('random', 'selecting')
# self.assertIsInstance(self.op.selecting[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 5)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.selecting_blender, '0 or 1')
# self.op.add_strategy('none', 'ricon')
# self.assertIsInstance(self.op.ricon[0], qt.TimingDMA)
# self.assertIsInstance(self.op.ricon[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 6)
# self.assertEqual(self.op.ricon_count, 2)
# self.assertEqual(self.op.timing_count, 2)
# print('test adding strategy by list')
# self.op.add_strategy(['dma', 'macd'], 'timing')
# print('test adding strategy by object')
# test_ls = TestLSStrategy()
# self.op.add_strategy(test_ls, 'timing')
def test_operator_remove_strategy(self):
"""test removing strategies from Operator"""
pass
# self.op.remove_strategy(stg='macd')
def test_property_get(self):
self.assertIsInstance(self.op, qt.Operator)
self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
self.assertEqual(self.op.selecting_count, 1)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.ricon_count, 1)
self.assertEqual(self.op.timing_count, 1)
print(self.op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy: \n{self.op.strategies[0].info()}')
self.assertEqual(len(self.op.strategies), 3)
self.assertIsInstance(self.op.strategies[0], qt.TimingDMA)
self.assertIsInstance(self.op.strategies[1], qt.SelectingAll)
self.assertIsInstance(self.op.strategies[2], qt.RiconUrgent)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close'])
self.assertEqual(self.op.opt_space_par, ([], []))
self.assertEqual(self.op.max_window_length, 270)
self.assertEqual(self.op.ls_blender, 'pos-1')
self.assertEqual(self.op.selecting_blender, '0')
self.assertEqual(self.op.ricon_blender, 'add')
self.assertEqual(self.op.opt_types, [0, 0, 0])
def test_prepare_data(self):
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._selecting_history_data, list)
self.assertIsInstance(self.op._timing_history_data, list)
self.assertIsInstance(self.op._ricon_history_data, list)
self.assertEqual(len(self.op._selecting_history_data), 1)
self.assertEqual(len(self.op._timing_history_data), 1)
self.assertEqual(len(self.op._ricon_history_data), 1)
sel_hist_data = self.op._selecting_history_data[0]
tim_hist_data = self.op._timing_history_data[0]
ric_hist_data = self.op._ricon_history_data[0]
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
"""
:return:
"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
self.assertIsInstance(self.op, qt.Operator, 'Operator Creation Error')
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.info()
op_list = self.op.create_signal(hist_data=self.hp1)
print(f'operation list is created: as following:\n {op_list}')
self.assertTrue(isinstance(op_list, pd.DataFrame))
self.assertEqual(op_list.shape, (26, 3))
# 删除去掉重复信号的code后,信号从原来的23条变为26条,包含三条重复信号,但是删除重复信号可能导致将不应该删除的信号删除,详见
# operator.py的create_signal()函数注释836行
target_op_dates = ['2016/07/08', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/18', '2016/07/20', '2016/07/22', '2016/07/26',
'2016/07/27', '2016/07/28', '2016/08/02', '2016/08/03',
'2016/08/04', '2016/08/05', '2016/08/08', '2016/08/10',
'2016/08/16', '2016/08/18', '2016/08/24', '2016/08/26',
'2016/08/29', '2016/08/30', '2016/08/31', '2016/09/05',
'2016/09/06', '2016/09/08']
target_op_values = np.array([[0.0, 1.0, 0.0],
[0.5, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
target_op = pd.DataFrame(data=target_op_values, index=target_op_dates, columns=['000010', '000030', '000039'])
target_op = target_op.rename(index=pd.Timestamp)
print(f'target operation list is as following:\n {target_op}')
dates_pairs = [[date1, date2, date1 == date2]
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]
signal_pairs = [[list(sig1), list(sig2), all(sig1 == sig2)]
for sig1, sig2
in zip(list(target_op.values), list(op_list.values))]
print(f'dates side by side:\n '
f'{dates_pairs}')
print(f'signals side by side:\n'
f'{signal_pairs}')
print([item[2] for item in dates_pairs])
print([item[2] for item in signal_pairs])
self.assertTrue(np.allclose(target_op.values, op_list.values, equal_nan=True))
self.assertTrue(all([date1 == date2
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]))
def test_operator_parameter_setting(self):
"""
:return:
"""
new_op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
print(new_op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{new_op.strategies[0].info()}')
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=None,
opt_tag=1,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=None,
opt_tag=0,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.timing[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.max_window_length, 20)
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id=32, pars=(1, 2))
self.op.set_blender('selecting', '0 and 1 or 2')
self.op.set_blender('ls', 'str-1.2')
self.assertEqual(self.op.ls_blender, 'str-1.2')
self.assertEqual(self.op.selecting_blender, '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.assertEqual(self.op.ricon_blender, 'add')
self.assertRaises(ValueError, self.op.set_blender, 'select', '0and1')
self.assertRaises(TypeError, self.op.set_blender, 35, '0 and 1')
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.opt_types, [1, 1, 0])
def test_exp_to_blender(self):
self.op.set_blender('selecting', '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.op.set_blender('selecting', '0 and ( 1 or 2 )')
self.assertEqual(self.op.selecting_blender_expr, ['and', '0', 'or', '1', '2'])
self.assertRaises(ValueError, self.op.set_blender, 'selecting', '0 and (1 or 2)')
def test_set_opt_par(self):
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=(0.5,),
opt_tag=0,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=(9, -0.23),
opt_tag=1,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (9, -0.23))
self.assertEqual(self.op.opt_types, [1, 0, 1])
self.op.set_opt_par((5, 12, 9, 8, -0.1))
self.assertEqual(self.op.timing[0].pars, (5, 12, 9))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (8, -0.1))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
self.assertRaises(ValueError, self.op.set_opt_par, (5, 12, 9, 8))
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'TIMING'
self.stg_name = "CROSSLINE STRATEGY"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
| Timestamp('2020-01-04 00:00:00', freq='D') | pandas.Timestamp |
import os
import pandas as pd
import pytest
from requests_mock.mocker import Mocker
from upgini import FeaturesEnricher, SearchKey
from upgini.metadata import RuntimeParameters
from .utils import (
mock_default_requests,
mock_get_features_meta,
mock_get_metadata,
mock_initial_search,
mock_initial_summary,
mock_raw_features,
mock_validation_raw_features,
mock_validation_search,
mock_validation_summary,
)
def test_search_keys_validation(requests_mock: Mocker):
url = "http://fake_url2"
mock_default_requests(requests_mock, url)
with pytest.raises(Exception, match="Date and datetime search keys are presented simultaniously"):
FeaturesEnricher(
search_keys={"d1": SearchKey.DATE, "dt2": SearchKey.DATETIME},
endpoint=url,
)
with pytest.raises(Exception, match="COUNTRY search key should be provided if POSTAL_CODE is presented"):
FeaturesEnricher(search_keys={"postal_code": SearchKey.POSTAL_CODE}, endpoint=url)
def test_features_enricher(requests_mock: Mocker):
pd.set_option("mode.chained_assignment", "raise")
url = "http://fake_url2"
path_to_mock_features = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "test_data/binary/mock_features.parquet"
)
mock_default_requests(requests_mock, url)
search_task_id = mock_initial_search(requests_mock, url)
ads_search_task_id = mock_initial_summary(
requests_mock,
url,
search_task_id,
hit_rate=99.9,
auc=0.66,
uplift=0.1,
eval_set_metrics=[
{"eval_set_index": 1, "hit_rate": 1.0, "auc": 0.5},
{"eval_set_index": 2, "hit_rate": 0.99, "auc": 0.77},
],
)
mock_get_metadata(requests_mock, url, search_task_id)
mock_get_features_meta(
requests_mock,
url,
ads_search_task_id,
ads_features=[{"name": "feature", "importance": 10.1, "matchedInPercent": 99.0, "valueType": "NUMERIC"}],
etalon_features=[{"name": "SystemRecordId_473310000", "importance": 1.0, "matchedInPercent": 100.0}],
)
mock_raw_features(requests_mock, url, search_task_id, path_to_mock_features)
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/binary/data.csv")
df = pd.read_csv(path, sep=",")
train_df = df.head(10000)
train_features = train_df.drop(columns="target")
train_target = train_df["target"]
eval1_df = df[10000:11000]
eval1_features = eval1_df.drop(columns="target")
eval1_target = eval1_df["target"]
eval2_df = df[11000:12000]
eval2_features = eval2_df.drop(columns="target")
eval2_target = eval2_df["target"]
enricher = FeaturesEnricher(
search_keys={"phone_num": SearchKey.PHONE, "rep_date": SearchKey.DATE},
endpoint=url,
api_key="fake_api_key",
date_format="%Y-%m-%d",
)
enriched_train_features = enricher.fit_transform(
train_features,
train_target,
eval_set=[(eval1_features, eval1_target), (eval2_features, eval2_target)],
keep_input=True,
)
assert enriched_train_features.shape == (10000, 4)
metrics = enricher.calculate_metrics(
train_features, train_target, eval_set=[(eval1_features, eval1_target), (eval2_features, eval2_target)]
)
expected_metrics = pd.DataFrame(
[
{
"match_rate": 99.9,
"baseline roc_auc": 0.5,
"enriched roc_auc": 0.4926257640349131,
"uplift": -0.007374235965086906,
},
{"match_rate": 100.0, "baseline roc_auc": 0.5, "enriched roc_auc": 0.5, "uplift": 0.0},
{"match_rate": 99.0, "baseline roc_auc": 0.5, "enriched roc_auc": 0.5, "uplift": 0.0},
],
index=["train", "eval 1", "eval 2"],
)
print("Expected metrics: ")
print(expected_metrics)
print("Actual metrics: ")
print(metrics)
assert metrics is not None
for segment in expected_metrics.index:
for col in expected_metrics.columns:
assert metrics.loc[segment, col] == expected_metrics.loc[segment, col]
print(enricher.features_info)
assert enricher.feature_names_ == ["feature"]
assert enricher.feature_importances_ == [10.1]
assert len(enricher.features_info) == 2
first_feature_info = enricher.features_info.iloc[0]
assert first_feature_info["feature_name"] == "feature"
assert first_feature_info["shap_value"] == 10.1
second_feature_info = enricher.features_info.iloc[1]
assert second_feature_info["feature_name"] == "SystemRecordId_473310000"
assert second_feature_info["shap_value"] == 1.0
def test_features_enricher_fit_transform_runtime_parameters(requests_mock: Mocker):
pd.set_option("mode.chained_assignment", "raise")
url = "http://fake_url2"
path_to_mock_features = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "test_data/binary/mock_features.parquet"
)
mock_default_requests(requests_mock, url)
search_task_id = mock_initial_search(requests_mock, url)
ads_search_task_id = mock_initial_summary(
requests_mock,
url,
search_task_id,
hit_rate=99.9,
auc=0.66,
uplift=0.1,
eval_set_metrics=[
{"eval_set_index": 1, "hit_rate": 100, "auc": 0.5},
{"eval_set_index": 2, "hit_rate": 99, "auc": 0.77},
],
)
mock_get_metadata(requests_mock, url, search_task_id)
mock_get_features_meta(
requests_mock,
url,
ads_search_task_id,
ads_features=[{"name": "feature", "importance": 10.1, "matchedInPercent": 99.0, "valueType": "NUMERIC"}],
etalon_features=[{"name": "SystemRecordId_473310000", "importance": 1.0, "matchedInPercent": 100.0}],
)
mock_raw_features(requests_mock, url, search_task_id, path_to_mock_features)
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/binary/data.csv")
df = pd.read_csv(path, sep=",")
train_df = df.head(10000)
train_features = train_df.drop(columns="target")
train_target = train_df["target"]
eval1_df = df[10000:11000]
eval1_features = eval1_df.drop(columns="target")
eval1_target = eval1_df["target"]
eval2_df = df[11000:12000]
eval2_features = eval2_df.drop(columns="target")
eval2_target = eval2_df["target"]
enricher = FeaturesEnricher(
search_keys={"phone_num": SearchKey.PHONE, "rep_date": SearchKey.DATE},
date_format="%Y-%m-%d",
endpoint=url,
api_key="fake_api_key",
runtime_parameters=RuntimeParameters(properties={"runtimeProperty1": "runtimeValue1"}),
)
assert enricher.runtime_parameters is not None
enricher.fit(
train_features,
train_target,
eval_set=[(eval1_features, eval1_target), (eval2_features, eval2_target)],
)
fit_req = None
initial_search_url = url + "/public/api/v2/search/initial"
for elem in requests_mock.request_history:
if elem.url == initial_search_url:
fit_req = elem
# TODO: can be better with
# https://metareal.blog/en/post/2020/05/03/validating-multipart-form-data-with-requests-mock/
# It"s do-able to parse req with cgi module and verify contents
assert fit_req is not None
assert "runtimeProperty1" in str(fit_req.body)
assert "runtimeValue1" in str(fit_req.body)
validation_search_task_id = mock_validation_search(requests_mock, url, search_task_id)
mock_validation_summary(
requests_mock,
url,
search_task_id,
ads_search_task_id,
validation_search_task_id,
hit_rate=99.9,
auc=0.66,
uplift=0.1,
eval_set_metrics=[
{"eval_set_index": 1, "hit_rate": 100, "auc": 0.5},
{"eval_set_index": 2, "hit_rate": 99, "auc": 0.77},
],
)
mock_validation_raw_features(requests_mock, url, validation_search_task_id, path_to_mock_features)
transformed = enricher.transform(train_features, keep_input=True)
transform_req = None
transform_url = url + "/public/api/v2/search/validation?initialSearchTaskId=" + search_task_id
for elem in requests_mock.request_history:
if elem.url == transform_url:
transform_req = elem
assert transform_req is not None
assert "runtimeProperty1" in str(transform_req.body)
assert "runtimeValue1" in str(transform_req.body)
assert transformed.shape == (10000, 4)
def test_search_with_only_personal_keys(requests_mock: Mocker):
url = "https://some.fake.url"
mock_default_requests(requests_mock, url)
with pytest.raises(Exception):
FeaturesEnricher(search_keys={"phone": SearchKey.PHONE, "email": SearchKey.EMAIL}, endpoint=url)
def test_filter_by_importance(requests_mock: Mocker):
url = "https://some.fake.url"
path_to_mock_features = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "test_data/binary/mock_features.parquet"
)
mock_default_requests(requests_mock, url)
search_task_id = mock_initial_search(requests_mock, url)
ads_search_task_id = mock_initial_summary(
requests_mock,
url,
search_task_id,
hit_rate=99.9,
auc=0.66,
uplift=0.1,
eval_set_metrics=[
{"eval_set_index": 1, "hit_rate": 1.0, "auc": 0.5},
{"eval_set_index": 2, "hit_rate": 0.99, "auc": 0.77},
],
)
mock_get_metadata(requests_mock, url, search_task_id)
mock_get_features_meta(
requests_mock,
url,
ads_search_task_id,
ads_features=[{"name": "feature", "importance": 0.7, "matchedInPercent": 99.0, "valueType": "NUMERIC"}],
etalon_features=[{"name": "SystemRecordId_473310000", "importance": 0.3, "matchedInPercent": 100.0}],
)
mock_raw_features(requests_mock, url, search_task_id, path_to_mock_features)
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/binary/data.csv")
df = | pd.read_csv(path, sep=",") | pandas.read_csv |
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
plt.rcParams['font.size'] = 6
root_path = os.path.dirname(os.path.abspath('__file__'))
# root_path = os.path.abspath(os.path.join(root_path,os.path.pardir))
graphs_path = root_path+'/graphs/'
results_path = root_path+'/results_analysis/results/'
print("root path:{}".format(root_path))
sys.path.append(root_path)
from tools.results_reader import read_two_stage, read_pure_esvr,read_pure_arma
h_arma = read_pure_arma("Huaxian")
x_arma = read_pure_arma("Xianyang")
z_arma = read_pure_arma("Zhangjiashan")
h_svr_1 = pd.read_csv(root_path+'/Huaxian/projects/esvr/1_ahead_pacf_lag12/optimal_model_results.csv')
h_svr_3 = pd.read_csv(root_path+'/Huaxian/projects/esvr/3_ahead_pacf_lag12/optimal_model_results.csv')
h_svr_5 = pd.read_csv(root_path+'/Huaxian/projects/esvr/5_ahead_pacf_lag12/optimal_model_results.csv')
h_svr_7 = pd.read_csv(root_path+'/Huaxian/projects/esvr/7_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_1 = pd.read_csv(root_path+'/Xianyang/projects/esvr/1_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_3 = pd.read_csv(root_path+'/Xianyang/projects/esvr/3_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_5 = pd.read_csv(root_path+'/Xianyang/projects/esvr/5_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_7 = pd.read_csv(root_path+'/Xianyang/projects/esvr/7_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_1 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/1_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_3 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/3_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_5 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/5_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_7 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/7_ahead_pacf_lag12/optimal_model_results.csv')
h_lstm_1 = pd.read_csv(root_path+'/Huaxian/projects/lstm/1_ahead/optimal/opt_pred.csv')
h_lstm_3 = pd.read_csv(root_path+'/Huaxian/projects/lstm/3_ahead/optimal/opt_pred.csv')
h_lstm_5 = pd.read_csv(root_path+'/Huaxian/projects/lstm/5_ahead/optimal/opt_pred.csv')
h_lstm_7 = pd.read_csv(root_path+'/Huaxian/projects/lstm/7_ahead/optimal/opt_pred.csv')
x_lstm_1 = pd.read_csv(root_path+'/Xianyang/projects/lstm/1_ahead/optimal/opt_pred.csv')
x_lstm_3 = pd.read_csv(root_path+'/Xianyang/projects/lstm/3_ahead/optimal/opt_pred.csv')
x_lstm_5 = pd.read_csv(root_path+'/Xianyang/projects/lstm/5_ahead/optimal/opt_pred.csv')
x_lstm_7 = pd.read_csv(root_path+'/Xianyang/projects/lstm/7_ahead/optimal/opt_pred.csv')
z_lstm_1 = pd.read_csv(root_path+'/Zhangjiashan/projects/lstm/1_ahead/optimal/opt_pred.csv')
z_lstm_3 = pd.read_csv(root_path+'/Zhangjiashan/projects/lstm/3_ahead/optimal/opt_pred.csv')
z_lstm_5 = pd.read_csv(root_path+'/Zhangjiashan/projects/lstm/5_ahead/optimal/opt_pred.csv')
z_lstm_7 = pd.read_csv(root_path+'/Zhangjiashan/projects/lstm/7_ahead/optimal/opt_pred.csv')
h_dnn_1 = pd.read_csv(root_path+'/Huaxian/projects/dnn/1_ahead/optimal/opt_pred.csv')
h_dnn_3 = pd.read_csv(root_path+'/Huaxian/projects/dnn/3_ahead/optimal/opt_pred.csv')
h_dnn_5 = pd.read_csv(root_path+'/Huaxian/projects/dnn/5_ahead/optimal/opt_pred.csv')
h_dnn_7 = pd.read_csv(root_path+'/Huaxian/projects/dnn/7_ahead/optimal/opt_pred.csv')
x_dnn_1 = pd.read_csv(root_path+'/Xianyang/projects/dnn/1_ahead/optimal/opt_pred.csv')
x_dnn_3 = pd.read_csv(root_path+'/Xianyang/projects/dnn/3_ahead/optimal/opt_pred.csv')
x_dnn_5 = pd.read_csv(root_path+'/Xianyang/projects/dnn/5_ahead/optimal/opt_pred.csv')
x_dnn_7 = pd.read_csv(root_path+'/Xianyang/projects/dnn/7_ahead/optimal/opt_pred.csv')
z_dnn_1 = pd.read_csv(root_path+'/Zhangjiashan/projects/dnn/1_ahead/optimal/opt_pred.csv')
z_dnn_3 = pd.read_csv(root_path+'/Zhangjiashan/projects/dnn/3_ahead/optimal/opt_pred.csv')
z_dnn_5 = pd.read_csv(root_path+'/Zhangjiashan/projects/dnn/5_ahead/optimal/opt_pred.csv')
z_dnn_7 = pd.read_csv(root_path+'/Zhangjiashan/projects/dnn/7_ahead/optimal/opt_pred.csv')
h_d_1 = pd.read_csv(root_path+'/Huaxian_dwt/projects/esvr/db10-2/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
h_d_3 = pd.read_csv(root_path+'/Huaxian_dwt/projects/esvr/db10-2/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
h_d_5 = pd.read_csv(root_path+'/Huaxian_dwt/projects/esvr/db10-2/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
h_d_7 = pd.read_csv(root_path+'/Huaxian_dwt/projects/esvr/db10-2/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
h_e_1 = pd.read_csv(root_path+'/Huaxian_eemd/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
h_e_3 = pd.read_csv(root_path+'/Huaxian_eemd/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
h_e_5 = pd.read_csv(root_path+'/Huaxian_eemd/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
h_e_7 = pd.read_csv(root_path+'/Huaxian_eemd/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
h_s_1 = pd.read_csv(root_path+'/Huaxian_ssa/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
h_s_3 = pd.read_csv(root_path+'/Huaxian_ssa/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
h_s_5 = pd.read_csv(root_path+'/Huaxian_ssa/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
h_s_7 = pd.read_csv(root_path+'/Huaxian_ssa/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
h_v_1 = pd.read_csv(root_path+'/Huaxian_vmd/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
h_v_3 = pd.read_csv(root_path+'/Huaxian_vmd/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
h_v_5 = pd.read_csv(root_path+'/Huaxian_vmd/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
h_v_7 = pd.read_csv(root_path+'/Huaxian_vmd/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
h_m_1 = pd.read_csv(root_path+'/Huaxian_modwt/projects/esvr-wddff/db1-4/single_hybrid_1_ahead_lag12_mi_ts0.1/optimal_model_results.csv')
h_m_3 = pd.read_csv(root_path+'/Huaxian_modwt/projects/esvr-wddff/db1-4/single_hybrid_3_ahead_lag12_mi_ts0.1/optimal_model_results.csv')
h_m_5 = pd.read_csv(root_path+'/Huaxian_modwt/projects/esvr-wddff/db1-4/single_hybrid_5_ahead_lag12_mi_ts0.1/optimal_model_results.csv')
h_m_7 = pd.read_csv(root_path+'/Huaxian_modwt/projects/esvr-wddff/db1-4/single_hybrid_7_ahead_lag12_mi_ts0.1/optimal_model_results.csv')
x_d_1 = pd.read_csv(root_path+'/Xianyang_dwt/projects/esvr/db10-2/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
x_d_3 = pd.read_csv(root_path+'/Xianyang_dwt/projects/esvr/db10-2/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
x_d_5 = pd.read_csv(root_path+'/Xianyang_dwt/projects/esvr/db10-2/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
x_d_7 = pd.read_csv(root_path+'/Xianyang_dwt/projects/esvr/db10-2/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
x_e_1 = pd.read_csv(root_path+'/Xianyang_eemd/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
x_e_3 = pd.read_csv(root_path+'/Xianyang_eemd/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
x_e_5 = pd.read_csv(root_path+'/Xianyang_eemd/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
x_e_7 = pd.read_csv(root_path+'/Xianyang_eemd/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
x_s_1 = pd.read_csv(root_path+'/Xianyang_ssa/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
x_s_3 = pd.read_csv(root_path+'/Xianyang_ssa/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
x_s_5 = pd.read_csv(root_path+'/Xianyang_ssa/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
x_s_7 = pd.read_csv(root_path+'/Xianyang_ssa/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
x_v_1 = pd.read_csv(root_path+'/Xianyang_vmd/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
x_v_3 = pd.read_csv(root_path+'/Xianyang_vmd/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
x_v_5 = pd.read_csv(root_path+'/Xianyang_vmd/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
x_v_7 = pd.read_csv(root_path+'/Xianyang_vmd/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
x_m_1 = pd.read_csv(root_path+'/Xianyang_modwt/projects/esvr-wddff/db1-4/single_hybrid_1_ahead_lag12_mi_ts0.1/optimal_model_results.csv')
x_m_3 = pd.read_csv(root_path+'/Xianyang_modwt/projects/esvr-wddff/db1-4/single_hybrid_3_ahead_lag12_mi_ts0.1/optimal_model_results.csv')
x_m_5 = pd.read_csv(root_path+'/Xianyang_modwt/projects/esvr-wddff/db1-4/single_hybrid_5_ahead_lag12_mi_ts0.1/optimal_model_results.csv')
x_m_7 = pd.read_csv(root_path+'/Xianyang_modwt/projects/esvr-wddff/db1-4/single_hybrid_7_ahead_lag12_mi_ts0.1/optimal_model_results.csv')
z_d_1 = pd.read_csv(root_path+'/Zhangjiashan_dwt/projects/esvr/db10-2/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
z_d_3 = pd.read_csv(root_path+'/Zhangjiashan_dwt/projects/esvr/db10-2/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
z_d_5 = pd.read_csv(root_path+'/Zhangjiashan_dwt/projects/esvr/db10-2/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
z_d_7 = pd.read_csv(root_path+'/Zhangjiashan_dwt/projects/esvr/db10-2/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
z_e_1 = pd.read_csv(root_path+'/Zhangjiashan_eemd/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
z_e_3 = pd.read_csv(root_path+'/Zhangjiashan_eemd/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
z_e_5 = pd.read_csv(root_path+'/Zhangjiashan_eemd/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
z_e_7 = pd.read_csv(root_path+'/Zhangjiashan_eemd/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
z_s_1 = pd.read_csv(root_path+'/Zhangjiashan_ssa/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
z_s_3 = pd.read_csv(root_path+'/Zhangjiashan_ssa/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
z_s_5 = pd.read_csv(root_path+'/Zhangjiashan_ssa/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
z_s_7 = pd.read_csv(root_path+'/Zhangjiashan_ssa/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
z_v_1 = pd.read_csv(root_path+'/Zhangjiashan_vmd/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
z_v_3 = | pd.read_csv(root_path+'/Zhangjiashan_vmd/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv') | pandas.read_csv |
#!/usr/bin/env python3
import argparse
import configparser
import json
# import logging
import pandas as pd
# set logging
# logger=logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
# formatter=logging.Formatter('[%(asctime)s:%(levelname)s:%(lineno)d %(message)s', datefmt='%H:%M:%S') #time:levelname:message:line#
# file_handler=logging.FileHandler('log/merge-to-db.log')
# # file_handler=logging.FileHandler('log/merge_db-error.log')
# # file_handler.setLevel(logging.ERROR)
# file_handler.setFormatter(formatter)
# stream_handler=logging.StreamHandler()
# stream_handler.setFormatter(formatter)
# logger.addHandler(file_handler)
# logger.addHandler(stream_handler)
def parse_args():
parser=argparse.ArgumentParser(prog='concat-hitsum.py', conflict_handler='resolve')
parser.add_argument('-i', type=str, required=False, help='=> path/to/infile.csv')
parser.add_argument('-dbconfig', type=str, required=False, help='=> path/to/main_db.ini')
parser.add_argument('-o', type=str, required=False, help='=> path/to/merged_outfile.csv')
return(parser.parse_args())
def main():
"""
Appends column-specific data from a db.csv file to a designated .csv file
"""
args=parse_args()
# Set main input df to merge file
in_df= | pd.read_csv(args.i) | pandas.read_csv |
#
# Copyright 2018, Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Script to create time series plots visualising the tmask analysis for the corner pixels of the AOI
"""
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
from tools.folders_handle import PLOTS_FOLDER, COEFFICIENTS_FOLDER
def draw_plots(plots_folder, coeff_folder):
pixels = ['ul', 'll', 'ur', 'lr']
for pixel in pixels:
d = np.load(os.path.join(coeff_folder, 'tmask_date.npy'))
co = np.load(os.path.join(coeff_folder, 'tmask_coeffs_plot_%s.npy') %(pixel))
r = np.load(os.path.join(coeff_folder, 'tmask_analytic_plot_%s.npy') %(pixel))
juldate_start = int(d[0])
juldate_end = int(d[-1])
juldate = np.linspace(juldate_start, juldate_end, juldate_end - juldate_start)
num_days = juldate_end - juldate_start
daysPerYear = 365
constant = np.ones(juldate.shape)
cosT = np.cos(2.0 * np.pi * juldate / daysPerYear)
sinT = np.sin(2.0 * np.pi * juldate / daysPerYear)
cosNT = np.cos(2.0 * np.pi * juldate / num_days)
sinNT = np.sin(2.0 * np.pi * juldate / num_days)
y = []
ref = []
bands = 4
for band in range(bands):
constant_fitted = constant * co[band][0]
cosT_fitted = cosT * co[band][1]
sinT_fitted = sinT * co[band][2]
cosNT_fitted = cosNT * co[band][3]
sinNT_fitted = sinNT * co[band][4]
y.append(constant_fitted + cosT_fitted + sinT_fitted + cosNT_fitted + sinNT_fitted)
refc = r[:,band]
ref.append(refc)
df = pd.DataFrame({'julian':juldate})
df['date'] = | pd.to_datetime(df['julian'], unit='D', origin='julian') | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.