prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import dash
import dash_bootstrap_components as dbc
from newsapi import NewsApiClient
from dash import dcc, Input, Output, html, State
from IPython import display
import math
from pprint import pprint
import pandas as pd
import numpy as np
import nltk
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
from tweets import get_trending_tweets, get_countries
app = dash.Dash(
external_stylesheets=[dbc.themes.BOOTSTRAP]
)
countries = get_countries()
alpha_countries = []
for country in countries:
alpha_countries.append(country)
alpha_countries.sort()
items = []
for location in alpha_countries:
items.append(location)
items.pop()
api = NewsApiClient(api_key='77e164585ab1422893fc26ae68be9f05')
default_top = api.get_top_headlines()
top = api.get_top_headlines()
default_tweets = get_trending_tweets(1)
tweets = get_trending_tweets(1)
tweets_dict = {}
news_dict = {}
def generateChart(top):
headlines = set()
for title in top['articles']:
headlines.add(title['title'])
sia = SIA()
headlinesResults = []
for line in headlines:
pol_score_h = sia.polarity_scores(line)
pol_score_h['headline'] = line
headlinesResults.append(pol_score_h)
headlineDf = | pd.DataFrame.from_records(headlinesResults) | pandas.DataFrame.from_records |
#%load_ext autoreload
#%autoreload 2
import dataclasses
import glob
import logging
import os
import shutil
import warnings
from dataclasses import dataclass
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
from scipy.sparse.csr import csr_matrix
from psykoda import detection, feature_extraction, preprocess, utils
from psykoda.constants import COMMANDLINE_DATE_FORMAT, col
from psykoda.io import labeled, reporting
logger = logging.getLogger(__name__)
to_stderr = {"_log_err": True}
FILENAME_WEIGHT = "best_weight.h5"
FILENAME_IDF_SID = "idf_sid.csv"
FILENAME_IDF_DPORT = "idf_dport.csv"
FILENAME_PLOT_DETECTION = "plot_detection.png"
FILENAME_STATS = "stats.json"
FILENAME_REPORT = "report.csv"
FILENAME_FEATURE_MATRIX = "feature_matrix.csv"
def configure_logging(debug: bool):
"""
Configure execution log settings.
Parameters
----------
debug
Whether to log "debug levels".
"""
PATH_LOG = "./log/log_" + datetime.strftime(datetime.today(), "%Y-%m-%d") + ".log"
os.makedirs(os.path.dirname(PATH_LOG), exist_ok=True)
log_level = logging.INFO
if debug:
log_level = logging.DEBUG
# utilities
stderr_filter = lambda record: getattr(record, "_log_err", False)
# app config
stderr_handler = logging.StreamHandler()
stderr_handler.addFilter(stderr_filter)
stderr_handler.setLevel(logging.INFO)
stderr_handler.setFormatter(logging.Formatter("%(message)s"))
handlers = [stderr_handler]
logfile_handler = logging.FileHandler(PATH_LOG)
logfile_handler.setLevel(logging.DEBUG)
logfile_handler.setFormatter(
logging.Formatter(
"%(asctime)s %(levelname)-8s [%(module)s # %(funcName)s line %(lineno)d] %(message)s"
)
)
handlers.append(logfile_handler)
logging.basicConfig(handlers=handlers, level=log_level)
class Incomplete_Args_Exception(Exception):
pass
load_config = utils.load_json
@dataclass
class OutputConfig:
dir: str
share_dir: Optional[str]
subdir: Optional[str]
@dataclass
class PreprocessConfig:
exclude_lists: Optional[str]
screening: preprocess.ScreeningConfig
@dataclass
class InputConfig:
dir: str
@dataclasses.dataclass
class LoadPreviousConfigItem:
list: Optional[str]
ndate: int = 730
@dataclasses.dataclass
class LoadPreviousConfig:
"""
Log loading settings.
Parameters
----------
list
path to CSV file in which labeled IP addresses are listed
ndate
time range for labeled IP addresses, in days
"""
known_normal: Optional[LoadPreviousConfigItem]
known_anomaly: Optional[LoadPreviousConfigItem]
unknown: Optional[LoadPreviousConfigItem]
@dataclass
class PreviousConfig:
load: LoadPreviousConfig
log: labeled.Config
@dataclass
class IOConfig:
input: InputConfig
previous: PreviousConfig
output: OutputConfig
@dataclass
class Service:
"""Service definition: set of destination port numbers
Examples
--------
>>> all = Service()
>>> ssh = Service(include=[22])
>>> all_but_ssh = Service(exclude=[22])
>>> ssh_or_https = Service(include=[22, 443])
"""
include: Optional[List[int]]
exclude: Optional[List[int]]
@dataclass
class Subnet:
"""Subnet configuration: set of CIDR-formatted IP addresses with services to analyze
Examples
--------
>>> private_A = Subnet(["10.0.0.0/8"], get_names_of_services_from_config())
>>> private = Subnet(["private-A", "private-B", "private-C"], get_names_of_services_from_config()) # these constants are available for convenience and readability
>>> my_network = Subnet(["10.0.0.0/16", "10.1.1.0/24"], get_names_of_services_from_config())
"""
cidrs: List[str]
services: List[str]
@dataclass
class DetectionUnitConfig:
"""Detection unit configuration
Parameters
----------
services
map from names of service to service definitions
subnets
map from names of subnet to subnet configurations
"""
services: Dict[str, Service]
subnets: Dict[str, Subnet]
@dataclass
class TargetPeriod:
days: int = 30
@dataclass
class ArgumentsConfig:
"""Arguments modification configuration
Parameters
----------
target_period:
default target period used to determine date_from and date_to values if missing.
"""
target_period: TargetPeriod
def set_default_date_detect(args, config: ArgumentsConfig):
"""
Configure training from/to dates according to args and config.
Parameters
----------
args
Command line args.
config
Settings for arguments.
Returns
-------
args
Command line args with training from/to dates added.
"""
date_time_today = datetime.today()
if args.date_from is None:
args.date_from = date_time_today - timedelta(config.target_period.days)
if args.date_to is None:
args.date_to = date_time_today - timedelta(1)
args.date_from_training = args.date_from - timedelta(args.period_train)
args.date_to_training = args.date_from - timedelta(1)
return args
@dataclass
class SkipDetectionConfig:
train: int
test: int
@dataclass
class ThresholdConfig:
num_anomaly: int
min_score: float
@dataclass
class AnomalyDetectionConfig:
required_srcip: SkipDetectionConfig
deepsad: detection.DeepSAD.Config
train: detection.DeepSAD.TrainConfig
threshold: ThresholdConfig
@dataclasses.dataclass
class DetectConfig:
arguments: ArgumentsConfig
detection_units: DetectionUnitConfig
io: IOConfig
preprocess: PreprocessConfig
feature_extraction: feature_extraction.FeatureExtractionConfig
anomaly_detection: AnomalyDetectionConfig
def main_detection(args, config: DetectConfig, log: pd.DataFrame, label: pd.Series):
"""
Parameters
----------
args
config
log
:index:
:columns:
label
filled with 1
:index:
"""
dir_report = os.path.join(config.io.output.subdir, args.subnet, args.service)
os.makedirs(dir_report, exist_ok=True)
feature_label = main_detection_prepare_data(
args, config.feature_extraction, log, label
)
if feature_label is None:
return
feature_label.idf_sid.to_csv(os.path.join(dir_report, FILENAME_IDF_SID))
feature_label.idf_dport.to_csv(os.path.join(dir_report, FILENAME_IDF_DPORT))
train_test_splitted, x_train_labeled = main_detection_after_prepare_data(
args, label, feature_label
)
stats = main_detection_skip_or_detect(
args,
log,
label,
dir_report,
feature_label,
train_test_splitted,
x_train_labeled,
anomaly_detection_config=config.anomaly_detection,
previous_config=config.io.previous.log,
)
utils.save_json(stats, path=os.path.join(dir_report, FILENAME_STATS))
def main_detection_prepare_data(
args,
config: feature_extraction.FeatureExtractionConfig,
log: pd.DataFrame,
label: pd.Series,
) -> Optional[feature_extraction.FeatureLabel]:
"""Feature extraction"""
logger.info("start detect on subnet %s and service %s", args.subnet, args.service)
if len(log) == 0:
logger.info("skip analysis; no logs exist")
return None
logger.info("extracting features")
feature_label = feature_extraction.feature_extraction_all(
log=log,
iptable=pd.read_csv(config.address_to_location),
idf_config=config.idf,
)
if feature_label is None:
logger.info("skip analysis; feature matrix is None")
return None
feature_label.extract_nonzeros()
label = label.loc[label.index & feature_label.index]
feature_label.put_labels(labeled_samples=label)
feature_label.feature = feature_label.feature / feature_label.feature.max()
return feature_label
def main_detection_after_prepare_data(
args, label: pd.Series, feature_label: feature_extraction.FeatureLabel
):
"""Split data and construct labeled training feature."""
train_test_splitted = feature_label.split_train_test(args.date_to_training)
idx_labeled = [
feature_label.index.index(sample)
for sample in label.index
if sample in feature_label.index
]
x_train_labeled = feature_label.feature[idx_labeled]
return train_test_splitted, x_train_labeled
def main_detection_skip_or_detect(
args,
log: pd.DataFrame,
label: pd.Series,
dir_report: str,
feature_label: feature_extraction.FeatureLabel,
train_test_splitted,
x_train_labeled: csr_matrix,
anomaly_detection_config: AnomalyDetectionConfig,
previous_config: labeled.Config,
) -> dict:
"""Anomaly detection and output the result."""
x_train, y_train, x_test, index_test = train_test_splitted
stats = {
"subnet": args.subnet,
"service": args.service,
"date_from": args.date_from,
"date_to": args.date_to,
"num_samples_st_detection": len(index_test),
"num_samples_training": len(y_train),
"date_from_training": args.date_from_training,
"date_to_training": args.date_to_training,
"num_samples_labeled": x_train_labeled.shape[0],
"samples_labeled": label.index.tolist(),
}
logger.info("stats: %s", stats)
if len(y_train) < anomaly_detection_config.required_srcip.train:
skip_message = f"#src_ip[train] = {len(y_train)} < config.anomaly_detection.required_srcip.train = {anomaly_detection_config.required_srcip.train}"
logger.info(skip_message)
stats["skipped"] = skip_message
return stats
if len(index_test) < anomaly_detection_config.required_srcip.test:
skip_message = f"#src_ip[test] = {len(index_test)} < config.anomaly_detection.required_srcip.test = {anomaly_detection_config.required_srcip.test}"
logger.info(skip_message)
stats["skipped"] = skip_message
return stats
logger.info("training detector")
verbose = 1 if logger.root.level < 20 else 0
detector = detection.DeepSAD(anomaly_detection_config.deepsad)
detector.train(
X=x_train,
y=y_train,
path_model=os.path.join(dir_report, FILENAME_WEIGHT),
config=anomaly_detection_config.train,
verbose=verbose,
)
logger.info("outputting detection reports")
anomaly_score = detector.compute_anomaly_score(x_test, scale=True)
num_anomaly = min(
sum(anomaly_score > anomaly_detection_config.threshold.min_score),
anomaly_detection_config.threshold.num_anomaly,
)
idx_sorted = np.argsort(anomaly_score)[::-1].tolist()
idx_anomaly = idx_sorted[:num_anomaly]
anomaly_score_sorted = pd.Series(
anomaly_score[idx_sorted],
index=pd.MultiIndex.from_tuples(
[index_test[i] for i in idx_sorted],
names=(col.DATETIME_ROUNDED, col.SRC_IP),
),
name="anomaly_score",
)
x_test_embeddings = detector.compute_embeddings(x_test)
x_train_labeled_embeddings = detector.compute_embeddings(x_train_labeled)
shap_value_idx_sorted = detector.explain_anomaly(
x_test[idx_anomaly], background_samples=x_train
)
shap_value_idx_sorted = pd.DataFrame(
shap_value_idx_sorted,
index=pd.MultiIndex.from_tuples(
[index_test[i] for i in idx_anomaly],
names=(col.DATETIME_ROUNDED, col.SRC_IP),
),
columns=feature_label.columns,
)
stats = output_result(
args,
log,
label,
dir_report,
x_train_labeled_embeddings=x_train_labeled_embeddings,
x_test_embeddings=x_test_embeddings,
idx_anomaly=idx_anomaly,
shap_value_idx_sorted=shap_value_idx_sorted,
anomaly_score_sorted=anomaly_score_sorted,
stats=stats,
previous_config=previous_config,
)
if args.debug:
if isinstance(x_test, csr_matrix):
x_test = x_test.toarray()
ret = pd.DataFrame(x_test, index=index_test, columns=feature_label.columns)
ret = ret.iloc[idx_sorted]
ret.to_csv(os.path.join(dir_report, FILENAME_FEATURE_MATRIX))
return stats
def output_result(
args,
log: pd.DataFrame,
label: pd.Series,
dir_report: str,
*,
x_train_labeled_embeddings,
x_test_embeddings,
idx_anomaly,
shap_value_idx_sorted,
anomaly_score_sorted,
stats: dict,
previous_config: labeled.Config,
):
"""Plot the detection result and output the report."""
reporting.plot.plot_detection(
X=x_test_embeddings,
idx_anomaly=idx_anomaly,
name_anomaly=shap_value_idx_sorted.index,
X_labeled=x_train_labeled_embeddings,
name_labeled=label.index,
path_saved=os.path.join(dir_report, FILENAME_PLOT_DETECTION),
no_plot=args.no_plot,
)
detection.detection_report(
anomaly_score_sorted,
shap_value_idx_sorted,
shap_top_k=5,
).to_csv(os.path.join(dir_report, FILENAME_REPORT))
labeled.factory(previous_config)[1].save_previous_log(
df=log,
entries=shap_value_idx_sorted.index,
)
stats["num_anomaly"] = len(idx_anomaly)
stats["name_anomaly"] = shap_value_idx_sorted.index.tolist()
logger.info(
"successfully finish detection on subnet %s and service %s\n",
args.subnet,
args.service,
)
return stats
def report_all(path_list_stats: List[str], path_save: str):
"""
Summarizing all reports and save it.
Parameters
----------
path_list_stats : list
List of stats file paths
path_save : str
File path where the report will be saved
"""
os.makedirs(os.path.dirname(path_save), exist_ok=True)
logger.info("summarizing all reports...")
results_pd = pd.DataFrame(
[], columns=["datetime_rounded", "src_ip", "subnet", "service"]
)
idx = 0
for path in path_list_stats:
# Load stats
stats = utils.load_json(path)
subnet, service = stats["subnet"], stats["service"]
try:
anomaly_list = stats["name_anomaly"]
except (KeyError, TypeError):
continue
if not anomaly_list:
continue
# Load report
path_report = path.replace(FILENAME_STATS, FILENAME_REPORT)
report = pd.read_csv(path_report, index_col=[0, 1], parse_dates=[0])
logger.info(report.index)
# Store anomalies in the DataFrame
for (dt, src_ip) in anomaly_list:
logger.info((dt, src_ip))
results_pd.loc[idx] = [dt, src_ip, subnet, service]
if idx == 0:
results_shaps = pd.DataFrame([], columns=report.columns)
results_shaps.loc[idx] = report.loc[(dt, src_ip)]
idx += 1
anomaly_found = idx > 0
if anomaly_found:
# Anomaly found
results_pd = | pd.concat([results_pd, results_shaps], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
import inspect
import json
import os
import urllib.request
from functools import reduce
from glob import glob
from time import sleep
from urllib.parse import quote
import jieba
import numpy as np
import pandas as pd
import seaborn as sns
from icecream import ic
from snorkel.labeling import PandasLFApplier, labeling_function
from snorkel.labeling.model import LabelModel
| pd.set_option("display.max_rows", 200) | pandas.set_option |
"""
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by <NAME> as part of PyDTA.
It has been extended and improved by <NAME> from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
https://www.statsmodels.org/devel/
"""
from __future__ import annotations
from collections import abc
import datetime
from io import BytesIO
import os
import struct
import sys
from typing import (
Any,
AnyStr,
Hashable,
Sequence,
cast,
)
import warnings
from dateutil.relativedelta import relativedelta
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas._libs.writers import max_len_string_array
from pandas._typing import (
Buffer,
CompressionOptions,
FilePathOrBuffer,
StorageOptions,
)
from pandas.util._decorators import (
Appender,
doc,
)
from pandas.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
)
from pandas import (
Categorical,
DatetimeIndex,
NaT,
Timestamp,
concat,
isna,
to_datetime,
to_timedelta,
)
from pandas.core import generic
from pandas.core.frame import DataFrame
from pandas.core.indexes.base import Index
from pandas.core.series import Series
from pandas.io.common import get_handle
_version_error = (
"Version of given Stata file is {version}. pandas supports importing "
"versions 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), "
"114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16),"
"and 119 (Stata 15/16, over 32,767 variables)."
)
_statafile_processing_params1 = """\
convert_dates : bool, default True
Convert date variables to DataFrame time values.
convert_categoricals : bool, default True
Read value labels and convert columns to Categorical/Factor variables."""
_statafile_processing_params2 = """\
index_col : str, optional
Column to set as index.
convert_missing : bool, default False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : bool, default True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns.
order_categoricals : bool, default True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines."""
_compression_params = f"""\
compression : str or dict, default None
If string, specifies compression mode. If dict, value at key 'method'
specifies compression mode. Compression mode must be one of {{'infer',
'gzip', 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer'
and `filepath_or_buffer` is path-like, then detect compression from
the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise
no compression). If dict and compression mode is one of
{{'zip', 'gzip', 'bz2'}}, or inferred as one of the above,
other entries passed as additional compression options.
{generic._shared_docs["storage_options"]}"""
_iterator_params = """\
iterator : bool, default False
Return StataReader object."""
_reader_notes = """\
Notes
-----
Categorical variables read through an iterator may not have the same
categories and dtype. This occurs when a variable stored in a DTA
file is associated to an incomplete set of value labels that only
label a strict subset of the values."""
_read_stata_doc = f"""
Read Stata file into DataFrame.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.dta``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_iterator_params}
{_compression_params}
Returns
-------
DataFrame or StataReader
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
DataFrame.to_stata: Export Stata data files.
{_reader_notes}
Examples
--------
Creating a dummy stata for this example
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> df.to_stata('animals.dta')
Read a Stata dta file:
>>> df = pd.read_stata('animals.dta')
Read a Stata dta file in 10,000 line chunks:
>>> values = np.random.randint(0, 10, size=(20_000, 1), dtype="uint8")
>>> df = pd.DataFrame(values, columns=["i"])
>>> df.to_stata('filename.dta')
>>> itr = pd.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
... # Operate on a single chunk, e.g., chunk.mean()
... pass
>>> import os
>>> os.remove("./filename.dta")
>>> os.remove("./animals.dta")
"""
_read_method_doc = f"""\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
{_statafile_processing_params1}
{_statafile_processing_params2}
Returns
-------
DataFrame
"""
_stata_reader_doc = f"""\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or object
implementing a binary read() functions.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_compression_params}
{_reader_notes}
"""
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
# TODO: Add typing. As of January 2020 it is not possible to type this function since
# mypy doesn't understand that a Series and an int can be combined using mathematical
# operations. (+, -).
def _stata_elapsed_date_to_datetime_vec(dates, fmt) -> Series:
"""
Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month) -> Series:
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas.
Otherwise it falls back to a slower but more robust method
using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format="%Y%m")
else:
index = getattr(year, "index", None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index
)
def convert_year_days_safe(year, days) -> Series:
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return to_datetime(year, format="%Y") + to_timedelta(days, unit="d")
else:
index = getattr(year, "index", None)
value = [
datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)
]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit) -> Series:
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, "index", None)
if unit == "d":
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == "ms":
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [
base + relativedelta(microseconds=(int(d) * 1000)) for d in deltas
]
return Series(values, index=index)
else:
raise ValueError("format not understood")
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt.startswith(("%tc", "tc")): # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, "ms")
elif fmt.startswith(("%tC", "tC")):
warnings.warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=object)
if has_bad_values:
conv_dates[bad_locs] = NaT
return conv_dates
# Delta days relative to base
elif fmt.startswith(("%td", "td", "%d", "d")):
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, "d")
# does not count leap days - 7 days is a week.
# 52nd week may have more than 7 days
elif fmt.startswith(("%tw", "tw")):
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt.startswith(("%tm", "tm")): # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base
year = stata_epoch.year + dates // 4
quarter_month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, quarter_month)
elif fmt.startswith(("%th", "th")): # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%ty", "ty")): # Years -- not delta
year = dates
first_month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, first_month)
else:
raise ValueError(f"Date fmt {fmt} not understood")
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates: Series, fmt: str) -> Series:
"""
Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.dtype):
if delta:
time_delta = dates - stata_epoch
d["delta"] = time_delta._values.view(np.int64) // 1000 # microseconds
if days or year:
date_index = DatetimeIndex(dates)
d["year"] = date_index._data.year
d["month"] = date_index._data.month
if days:
days_in_ns = dates.view(np.int64) - to_datetime(
d["year"], format="%Y"
).view(np.int64)
d["days"] = days_in_ns // NS_PER_DAY
elif infer_dtype(dates, skipna=False) == "datetime":
if delta:
delta = dates._values - stata_epoch
def f(x: datetime.timedelta) -> float:
return US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d["delta"] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d["year"] = year_month._values // 100
d["month"] = year_month._values - d["year"] * 100
if days:
def g(x: datetime.datetime) -> int:
return (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(g)
d["days"] = v(dates)
else:
raise ValueError(
"Columns containing dates must contain either "
"datetime64, datetime.datetime or null values."
)
return DataFrame(d, index=index)
bad_loc = isna(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
warnings.warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = 52 * (d.year - stata_epoch.year) + d.days // 7
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 12 * (d.year - stata_epoch.year) + d.month - 1
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError(f"Format {fmt} is not a known Stata date format")
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '{0}' does not satisfy this restriction. Use the
'version=117' parameter to write the newer (Stata 13 and later) format.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from {0} to {1}, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
class CategoricalConversionWarning(Warning):
pass
categorical_conversion_warning = """
One or more series with value labels are not fully labeled. Reading this
dataset with an iterator results in categorical variable with different
categories. This occurs since it is not possible to know all possible values
until the entire dataset has been read. To avoid this warning, you can either
read dataset without an iterator, or manually convert categorical data by
``convert_categoricals`` to False and then accessing the variable labels
through the value_labels method of the reader.
"""
def _cast_to_stata_types(data: DataFrame) -> DataFrame:
"""
Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint columns are converted to int of the
same size if there is no loss in precision, otherwise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ""
# original, if small, if large
conversion_data = (
(np.bool_, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64),
)
float32_max = struct.unpack("<f", b"\xff\xff\xff\x7e")[0]
float64_max = struct.unpack("<d", b"\xff\xff\xff\xff\xff\xff\xdf\x7f")[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.int64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc.format("uint64", "float64")
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if data[col].max() <= 2147483620 and data[col].min() >= -2147483647:
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -(2 ** 53):
ws = precision_loss_doc.format("int64", "float64")
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
raise ValueError(
f"Column {col} has a maximum value of infinity which is outside "
"the range supported by Stata."
)
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
raise ValueError(
f"Column {col} has a maximum value ({value}) outside the range "
f"supported by Stata ({float64_max})"
)
if ws:
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel:
"""
Parse a categorical column and prepare formatted output
Parameters
----------
catarray : Series
Categorical Series to encode
encoding : {"latin-1", "utf-8"}
Encoding to use for value labels.
"""
def __init__(self, catarray: Series, encoding: str = "latin-1"):
if encoding not in ("latin-1", "utf-8"):
raise ValueError("Only latin-1 and utf-8 are supported.")
self.labname = catarray.name
self._encoding = encoding
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = 0
self.txt: list[bytes] = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
offsets: list[int] = []
values: list[int] = []
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, str):
category = str(category)
warnings.warn(
value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch,
)
category = category.encode(encoding)
offsets.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
values.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError(
"Stata value labels for a single variable must "
"have a combined length less than 32,000 characters."
)
# Ensure int32
self.off = np.array(offsets, dtype=np.int32)
self.val = np.array(values, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def generate_value_label(self, byteorder: str) -> bytes:
"""
Generate the binary representation of the value labels.
Parameters
----------
byteorder : str
Byte order of the output
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
encoding = self._encoding
bio = BytesIO()
null_byte = b"\x00"
# len
bio.write(struct.pack(byteorder + "i", self.len))
# labname
labname = str(self.labname)[:32].encode(encoding)
lab_len = 32 if encoding not in ("utf-8", "utf8") else 128
labname = _pad_bytes(labname, lab_len + 1)
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack("c", null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + "i", self.n))
# textlen - int32
bio.write(struct.pack(byteorder + "i", self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + "i", offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + "i", value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(text + null_byte)
return bio.getvalue()
class StataMissingValue:
"""
An observation's missing value.
Parameters
----------
value : {int, float}
The Stata missing value code
Notes
-----
More information: <https://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES: dict[float, str] = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[b] = "."
for i in range(1, 27):
MISSING_VALUES[i + b] = "." + chr(96 + i)
float32_base = b"\x00\x00\x00\x7f"
increment = struct.unpack("<i", b"\x00\x08\x00\x00")[0]
for i in range(27):
key = struct.unpack("<f", float32_base)[0]
MISSING_VALUES[key] = "."
if i > 0:
MISSING_VALUES[key] += chr(96 + i)
int_value = struct.unpack("<i", struct.pack("<f", key))[0] + increment
float32_base = struct.pack("<i", int_value)
float64_base = b"\x00\x00\x00\x00\x00\x00\xe0\x7f"
increment = struct.unpack("q", b"\x00\x00\x00\x00\x00\x01\x00\x00")[0]
for i in range(27):
key = struct.unpack("<d", float64_base)[0]
MISSING_VALUES[key] = "."
if i > 0:
MISSING_VALUES[key] += chr(96 + i)
int_value = struct.unpack("q", struct.pack("<d", key))[0] + increment
float64_base = struct.pack("q", int_value)
BASE_MISSING_VALUES = {
"int8": 101,
"int16": 32741,
"int32": 2147483621,
"float32": struct.unpack("<f", float32_base)[0],
"float64": struct.unpack("<d", float64_base)[0],
}
def __init__(self, value: int | float):
self._value = value
# Conversion to int to avoid hash issues on 32 bit platforms #8968
value = int(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
@property
def string(self) -> str:
"""
The Stata representation of the missing value: '.', '.a'..'.z'
Returns
-------
str
The representation of the missing value.
"""
return self._str
@property
def value(self) -> int | float:
"""
The binary representation of the missing value.
Returns
-------
{int, float}
The binary representation of the missing value.
"""
return self._value
def __str__(self) -> str:
return self.string
def __repr__(self) -> str:
return f"{type(self)}({self})"
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, type(self))
and self.string == other.string
and self.value == other.value
)
@classmethod
def get_base_missing_value(cls, dtype: np.dtype) -> int | float:
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[signedinteger[Any]]")
if dtype == np.int8: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["int8"]
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[signedinteger[Any]]")
elif dtype == np.int16: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["int16"]
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[signedinteger[Any]]")
elif dtype == np.int32: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["int32"]
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[floating[Any]]")
elif dtype == np.float32: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["float32"]
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[floating[Any]]")
elif dtype == np.float64: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["float64"]
else:
raise ValueError("Unsupported dtype")
return value
class StataParser:
def __init__(self):
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = dict(
list(zip(range(1, 245), [np.dtype("a" + str(i)) for i in range(1, 245)]))
+ [
(251, np.dtype(np.int8)),
(252, np.dtype(np.int16)),
(253, np.dtype(np.int32)),
(254, np.dtype(np.float32)),
(255, np.dtype(np.float64)),
]
)
self.DTYPE_MAP_XML = {
32768: np.dtype(np.uint8), # Keys to GSO
65526: np.dtype(np.float64),
65527: np.dtype(np.float32),
65528: np.dtype(np.int32),
65529: np.dtype(np.int16),
65530: np.dtype(np.int8),
}
# error: Argument 1 to "list" has incompatible type "str";
# expected "Iterable[int]" [arg-type]
self.TYPE_MAP = list(range(251)) + list("bhlfd") # type: ignore[arg-type]
self.TYPE_MAP_XML = {
# Not really a Q, unclear how to handle byteswap
32768: "Q",
65526: "d",
65527: "f",
65528: "l",
65529: "h",
65530: "b",
}
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b"\xff\xff\xff\xfe"
float32_max = b"\xff\xff\xff\x7e"
float64_min = b"\xff\xff\xff\xff\xff\xff\xef\xff"
float64_max = b"\xff\xff\xff\xff\xff\xff\xdf\x7f"
self.VALID_RANGE = {
"b": (-127, 100),
"h": (-32767, 32740),
"l": (-2147483647, 2147483620),
"f": (
np.float32(struct.unpack("<f", float32_min)[0]),
np.float32(struct.unpack("<f", float32_max)[0]),
),
"d": (
np.float64(struct.unpack("<d", float64_min)[0]),
np.float64(struct.unpack("<d", float64_max)[0]),
),
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254, # float
100: 255, # double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
"b": 101,
"h": 32741,
"l": 2147483621,
"f": np.float32(struct.unpack("<f", b"\x00\x00\x00\x7f")[0]),
"d": np.float64(
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
),
}
self.NUMPY_TYPE_MAP = {
"b": "i1",
"h": "i2",
"l": "i4",
"f": "f4",
"d": "f8",
"Q": "u8",
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = (
"aggregate",
"array",
"boolean",
"break",
"byte",
"case",
"catch",
"class",
"colvector",
"complex",
"const",
"continue",
"default",
"delegate",
"delete",
"do",
"double",
"else",
"eltypedef",
"end",
"enum",
"explicit",
"export",
"external",
"float",
"for",
"friend",
"function",
"global",
"goto",
"if",
"inline",
"int",
"local",
"long",
"NULL",
"pragma",
"protected",
"quad",
"rowvector",
"short",
"typedef",
"typename",
"virtual",
"_all",
"_N",
"_skip",
"_b",
"_pi",
"str#",
"in",
"_pred",
"strL",
"_coef",
"_rc",
"using",
"_cons",
"_se",
"with",
"_n",
)
class StataReader(StataParser, abc.Iterator):
__doc__ = _stata_reader_doc
def __init__(
self,
path_or_buf: FilePathOrBuffer,
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: str | None = None,
convert_missing: bool = False,
preserve_dtypes: bool = True,
columns: Sequence[str] | None = None,
order_categoricals: bool = True,
chunksize: int | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
):
super().__init__()
self.col_sizes: list[int] = []
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index_col = index_col
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = ""
self._chunksize = chunksize
self._using_iterator = False
if self._chunksize is None:
self._chunksize = 1
elif not isinstance(chunksize, int) or chunksize <= 0:
raise ValueError("chunksize must be a positive integer when set.")
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype: np.dtype | None = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
with get_handle(
path_or_buf,
"rb",
storage_options=storage_options,
is_text=False,
compression=compression,
) as handles:
# Copy to BytesIO, and ensure no encoding
# Argument 1 to "BytesIO" has incompatible type "Union[Any, bytes, None,
# str]"; expected "bytes"
self.path_or_buf = BytesIO(handles.handle.read()) # type: ignore[arg-type]
self._read_header()
self._setup_dtype()
def __enter__(self) -> StataReader:
"""enter context manager"""
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
"""exit context manager"""
self.close()
def close(self) -> None:
"""close the handle if its open"""
self.path_or_buf.close()
def _set_encoding(self) -> None:
"""
Set string encoding which depends on file version
"""
if self.format_version < 118:
self._encoding = "latin-1"
else:
self._encoding = "utf-8"
def _read_header(self) -> None:
first_char = self.path_or_buf.read(1)
if struct.unpack("c", first_char)[0] == b"<":
self._read_new_header()
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = [self._calcsize(typ) for typ in self.typlist]
def _read_new_header(self) -> None:
# The first part of the header is common to 117 - 119.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118, 119]:
raise ValueError(_version_error.format(version=self.format_version))
self._set_encoding()
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == b"MSF" and ">" or "<"
self.path_or_buf.read(15) # </byteorder><K>
nvar_type = "H" if self.format_version <= 118 else "I"
nvar_size = 2 if self.format_version <= 118 else 4
self.nvar = struct.unpack(
self.byteorder + nvar_type, self.path_or_buf.read(nvar_size)
)[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self._data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 16
)
self._seek_varnames = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
self._seek_sortlist = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
self._seek_formats = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 9
)
self._seek_value_label_names = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 19
)
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 6
)
self.seek_strls = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 7
)
self.seek_value_labels = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 14
)
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-119.
def _get_dtypes(
self, seek_vartypes: int
) -> tuple[list[int | str], list[str | np.dtype]]:
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [
struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
for _ in range(self.nvar)
]
def f(typ: int) -> int | str:
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError as err:
raise ValueError(f"cannot convert stata types [{typ}]") from err
typlist = [f(x) for x in raw_typlist]
def g(typ: int) -> str | np.dtype:
if typ <= 2045:
return str(typ)
try:
# error: Incompatible return value type (got "Type[number]", expected
# "Union[str, dtype]")
return self.DTYPE_MAP_XML[typ] # type: ignore[return-value]
except KeyError as err:
raise ValueError(f"cannot convert stata dtype [{typ}]") from err
dtyplist = [g(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self) -> list[str]:
# 33 in order formats, 129 in formats 118 and 119
b = 33 if self.format_version < 118 else 129
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self) -> list[str]:
if self.format_version >= 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
# Returns the label list
def _get_lbllist(self) -> list[str]:
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
def _get_variable_labels(self) -> list[str]:
if self.format_version >= 118:
vlblist = [
self._decode(self.path_or_buf.read(321)) for _ in range(self.nvar)
]
elif self.format_version > 105:
vlblist = [
self._decode(self.path_or_buf.read(81)) for _ in range(self.nvar)
]
else:
vlblist = [
self._decode(self.path_or_buf.read(32)) for _ in range(self.nvar)
]
return vlblist
def _get_nobs(self) -> int:
if self.format_version >= 118:
return struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
def _get_data_label(self) -> str:
if self.format_version >= 118:
strlen = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._decode(self.path_or_buf.read(81))
else:
return self._decode(self.path_or_buf.read(32))
def _get_time_stamp(self) -> str:
if self.format_version >= 118:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._decode(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self) -> int:
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_labels>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version >= 118:
return struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char: bytes) -> None:
self.format_version = struct.unpack("b", first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error.format(version=self.format_version))
self._set_encoding()
self.byteorder = (
struct.unpack("b", self.path_or_buf.read(1))[0] == 0x1 and ">" or "<"
)
self.filetype = struct.unpack("b", self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self._data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1)) for _ in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except ValueError as err:
invalid_types = ",".join([str(x) for x in typlist])
raise ValueError(f"cannot convert stata types [{invalid_types}]") from err
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except ValueError as err:
invalid_dtypes = ",".join([str(x) for x in typlist])
raise ValueError(f"cannot convert stata dtypes [{invalid_dtypes}]") from err
if self.format_version > 108:
self.varlist = [
self._decode(self.path_or_buf.read(33)) for _ in range(self.nvar)
]
else:
self.varlist = [
self._decode(self.path_or_buf.read(9)) for _ in range(self.nvar)
]
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(
self.byteorder + "b", self.path_or_buf.read(1)
)[0]
if self.format_version > 108:
data_len = struct.unpack(
self.byteorder + "i", self.path_or_buf.read(4)
)[0]
else:
data_len = struct.unpack(
self.byteorder + "h", self.path_or_buf.read(2)
)[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _setup_dtype(self) -> np.dtype:
"""Map between numpy and state dtypes"""
if self._dtype is not None:
return self._dtype
dtypes = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
typ = cast(str, typ) # only strs in NUMPY_TYPE_MAP
dtypes.append(("s" + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ]))
else:
dtypes.append(("s" + str(i), "S" + str(typ)))
self._dtype = np.dtype(dtypes)
return self._dtype
def _calcsize(self, fmt: int | str) -> int:
if isinstance(fmt, int):
return fmt
return struct.calcsize(self.byteorder + fmt)
def _decode(self, s: bytes) -> str:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
try:
return s.decode(self._encoding)
except UnicodeDecodeError:
# GH 25960, fallback to handle incorrect format produced when 117
# files are converted to 118 files in Stata
encoding = self._encoding
msg = f"""
One or more strings in the dta file could not be decoded using {encoding}, and
so the fallback encoding of latin-1 is being used. This can happen when a file
has been incorrectly encoded by Stata or some other software. You should verify
the string values returned are correct."""
warnings.warn(msg, UnicodeWarning)
return s.decode("latin-1")
def _read_value_labels(self) -> None:
if self._value_labels_read:
# Don't read twice
return
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
self._value_labels_read = True
self.value_label_dict: dict[str, dict[float | int, str]] = {}
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
assert self._dtype is not None
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = {}
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b"</val": # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._decode(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
off = np.frombuffer(
self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n
)
val = np.frombuffer(
self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n
)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = {}
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
self.value_label_dict[labname][val[i]] = self._decode(txt[off[i] : end])
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self) -> None:
self.path_or_buf.seek(self.seek_strls)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO = {"0": ""}
while True:
if self.path_or_buf.read(3) != b"GSO":
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
v_size = 2 if self.format_version == 118 else 3
if self.byteorder == "<":
buf = buf[0:v_size] + buf[4 : (12 - v_size)]
else:
# This path may not be correct, impossible to test
buf = buf[0:v_size] + buf[(4 + v_size) :]
v_o = struct.unpack("Q", buf)[0]
typ = struct.unpack("B", self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
decoded_va = va[0:-1].decode(self._encoding)
else:
# Stata says typ 129 can be binary, so use str
decoded_va = str(va)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO[str(v_o)] = decoded_va
def __next__(self) -> DataFrame:
self._using_iterator = True
return self.read(nrows=self._chunksize)
def get_chunk(self, size: int | None = None) -> DataFrame:
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(
self,
nrows: int | None = None,
convert_dates: bool | None = None,
convert_categoricals: bool | None = None,
index_col: str | None = None,
convert_missing: bool | None = None,
preserve_dtypes: bool | None = None,
columns: Sequence[str] | None = None,
order_categoricals: bool | None = None,
) -> DataFrame:
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if index_col is None:
index_col = self._index_col
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (not self._value_labels_read):
self._can_read_value_labels = True
self._read_strls()
# Read data
assert self._dtype is not None
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(
self.path_or_buf.read(read_len), dtype=dtype, count=read_lines
)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist)
else:
data = DataFrame.from_records(data)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index_col is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(self._decode, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where([dtyp is not None for dtyp in self.dtyplist])[0]
# Convert columns (if needed) to match input type
ix = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], ix, self.dtyplist[i]))
)
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_dict(dict(data_formatted))
del data_formatted
data = self._do_convert_missing(data, convert_missing)
if convert_dates:
def any_startswith(x: str) -> bool:
return any(x.startswith(fmt) for fmt in _date_formats)
cols = np.where([any_startswith(x) for x in self.fmtlist])[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col], self.fmtlist[i]
)
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(
data, self.value_label_dict, self.lbllist, order_categoricals
)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.dtype(np.float16), np.dtype(np.float32)):
dtype = np.dtype(np.float64)
convert = True
elif dtype in (
np.dtype(np.int8),
np.dtype(np.int16),
np.dtype(np.int32),
):
dtype = np.dtype(np.int64)
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_dict(dict(retyped_data))
if index_col is not None:
data = data.set_index(data.pop(index_col))
return data
def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFrame:
# Check for missing values, and replace if found
replacements = {}
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
fmt = cast(str, fmt) # only strs in VALID_RANGE
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
# appreciably faster to do this with ndarray instead of Series
svals = series._values
missing = (svals < nmin) | (svals > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.nonzero(np.asarray(missing))[0]
umissing, umissing_loc = np.unique(series[missing], return_inverse=True)
replacement = Series(series, dtype=object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
# Note: operating on ._values is much faster than directly
# TODO: can we fix that?
replacement._values[missing] = np.nan
replacements[colname] = replacement
if replacements:
columns = data.columns
replacement_df = DataFrame(replacements, copy=False)
replaced = concat(
[data.drop(replacement_df.columns, axis=1), replacement_df],
axis=1,
copy=False,
)
data = replaced[columns]
return data
def _insert_strls(self, data: DataFrame) -> DataFrame:
if not hasattr(self, "GSO") or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != "Q":
continue
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFrame:
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError("columns contains duplicate entries")
unmatched = column_set.difference(data.columns)
if unmatched:
joined = ", ".join(list(unmatched))
raise ValueError(
"The following columns were not "
f"found in the Stata data set: {joined}"
)
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(
self,
data: DataFrame,
value_label_dict: dict[str, dict[float | int, str]],
lbllist: Sequence[str],
order_categoricals: bool,
) -> DataFrame:
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(value_label_dict.keys())
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
vl = value_label_dict[label]
keys = np.array(list(vl.keys()))
column = data[col]
key_matches = column.isin(keys)
if self._using_iterator and key_matches.all():
initial_categories: np.ndarray | None = keys
# If all categories are in the keys and we are iterating,
# use the same keys for all chunks. If some are missing
# value labels, then we will fall back to the categories
# varying across chunks.
else:
if self._using_iterator:
# warn is using an iterator
warnings.warn(
categorical_conversion_warning, CategoricalConversionWarning
)
initial_categories = None
cat_data = Categorical(
column, categories=initial_categories, ordered=order_categoricals
)
if initial_categories is None:
# If None here, then we need to match the cats in the Categorical
categories = []
for category in cat_data.categories:
if category in vl:
categories.append(vl[category])
else:
categories.append(category)
else:
# If all cats are matched, we can use the values
categories = list(vl.values())
try:
# Try to catch duplicate categories
cat_data.categories = categories
except ValueError as err:
vc = Series(categories).value_counts()
repeated_cats = list(vc.index[vc > 1])
repeats = "-" * 80 + "\n" + "\n".join(repeated_cats)
# GH 25772
msg = f"""
Value labels for column {col} are not unique. These cannot be converted to
pandas categoricals.
Either read the file with `convert_categoricals` set to False or use the
low level interface in `StataReader` to separately read the values and the
value_labels.
The repeated labels are:
{repeats}
"""
raise ValueError(msg) from err
# TODO: is the next line needed above in the data(...) method?
cat_series = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_series))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame(dict(cat_converted_data), copy=False)
return data
@property
def data_label(self) -> str:
"""
Return data label of Stata file.
"""
return self._data_label
def variable_labels(self) -> dict[str, str]:
"""
Return variable labels as a dict, associating each variable name
with corresponding label.
Returns
-------
dict
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self) -> dict[str, dict[float | int, str]]:
"""
Return a dict, associating each variable name a dict, associating
each value its corresponding label.
Returns
-------
dict
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
@Appender(_read_stata_doc)
def read_stata(
filepath_or_buffer: FilePathOrBuffer,
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: str | None = None,
convert_missing: bool = False,
preserve_dtypes: bool = True,
columns: Sequence[str] | None = None,
order_categoricals: bool = True,
chunksize: int | None = None,
iterator: bool = False,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> DataFrame | StataReader:
reader = StataReader(
filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index_col=index_col,
convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize,
storage_options=storage_options,
compression=compression,
)
if iterator or chunksize:
return reader
with reader:
return reader.read()
def _set_endianness(endianness: str) -> str:
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError(f"Endianness {endianness} not understood")
def _pad_bytes(name: AnyStr, length: int) -> AnyStr:
"""
Take a char string and pads it with null bytes until it's length chars.
"""
if isinstance(name, bytes):
return name + b"\x00" * (length - len(name))
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt: str) -> np.dtype:
"""
Convert from one of the stata date formats to a type in TYPE_MAP.
"""
if fmt in [
"tc",
"%tc",
"td",
"%td",
"tw",
"%tw",
"tm",
"%tm",
"tq",
"%tq",
"th",
"%th",
"ty",
"%ty",
]:
return np.dtype(np.float64) # Stata expects doubles for SIFs
else:
raise NotImplementedError(f"Format {fmt} not implemented")
def _maybe_convert_to_int_keys(convert_dates: dict, varlist: list[Hashable]) -> dict:
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int:
"""
Convert dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - for int8 byte
252 - for int16 int
253 - for int32 long
254 - for float32 float
255 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(ensure_object(column._values))
return max(itemsize, 1)
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[floating[Any]]")
elif dtype == np.float64: # type: ignore[comparison-overlap]
return 255
# Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[floating[Any]]")
elif dtype == np.float32: # type: ignore[comparison-overlap]
return 254
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[signedinteger[Any]]")
elif dtype == np.int32: # type: ignore[comparison-overlap]
return 253
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[signedinteger[Any]]")
elif dtype == np.int16: # type: ignore[comparison-overlap]
return 252
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[signedinteger[Any]]")
elif dtype == np.int8: # type: ignore[comparison-overlap]
return 251
else: # pragma : no cover
raise NotImplementedError(f"Data type {dtype} not supported.")
def _dtype_to_default_stata_fmt(
dtype, column: Series, dta_version: int = 114, force_strl: bool = False
) -> str:
"""
Map numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
strl -> "%9s"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dta_version < 117:
max_str_len = 244
else:
max_str_len = 2045
if force_strl:
return "%9s"
if dtype.type == np.object_:
itemsize = max_len_string_array(ensure_object(column._values))
if itemsize > max_str_len:
if dta_version >= 117:
return "%9s"
else:
raise ValueError(excessive_string_length_error.format(column.name))
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError(f"Data type {dtype} not supported.")
@doc(storage_options=generic._shared_docs["storage_options"])
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
is written.
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
compression : str or dict, default 'infer'
For on-the-fly compression of the output dta. If string, specifies
compression mode. If dict, value at key 'method' specifies compression
mode. Compression mode must be one of {{'infer', 'gzip', 'bz2', 'zip',
'xz', None}}. If compression mode is 'infer' and `fname` is path-like,
then detect compression from the following extensions: '.gz', '.bz2',
'.zip', or '.xz' (otherwise no compression). If dict and compression
mode is one of {{'zip', 'gzip', 'bz2'}}, or inferred as one of the above,
other entries passed as additional compression options.
.. versionadded:: 1.1.0
{storage_options}
.. versionadded:: 1.2.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Directly write a zip file
>>> compression = {{"method": "zip", "archive_name": "data_file.dta"}}
>>> writer = StataWriter('./data_file.zip', data, compression=compression)
>>> writer.write_file()
Save a DataFrame with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {{'date' : 'tw'}})
>>> writer.write_file()
"""
_max_string_length = 244
_encoding = "latin-1"
def __init__(
self,
fname: FilePathOrBuffer,
data: DataFrame,
convert_dates: dict[Hashable, str] | None = None,
write_index: bool = True,
byteorder: str | None = None,
time_stamp: datetime.datetime | None = None,
data_label: str | None = None,
variable_labels: dict[Hashable, str] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
):
super().__init__()
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
self._compression = compression
self._output_file: Buffer | None = None
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
self.storage_options = storage_options
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = fname
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
self._converted_names: dict[Hashable, str] = {}
def _write(self, to_write: str) -> None:
"""
Helper to call encode before writing to file for Python 3 compat.
"""
self.handles.handle.write(
to_write.encode(self._encoding) # type: ignore[arg-type]
)
def _write_bytes(self, value: bytes) -> None:
"""
Helper to assert file is open before writing.
"""
self.handles.handle.write(value) # type: ignore[arg-type]
def _prepare_categoricals(self, data: DataFrame) -> DataFrame:
"""
Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int
"""
is_cat = [is_categorical_dtype(data[col].dtype) for col in data]
self._is_col_cat = is_cat
self._value_labels: list[StataValueLabel] = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
svl = StataValueLabel(data[col], encoding=self._encoding)
self._value_labels.append(svl)
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError(
"It is not possible to export "
"int64-based categorical data to Stata."
)
values = data[col].cat.codes._values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_dict(dict(data_formatted))
def _replace_nans(self, data: DataFrame) -> DataFrame:
# return data
"""
Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)
"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES["f"]
else:
replacement = self.MISSING_VALUES["d"]
data[c] = data[c].fillna(replacement)
return data
def _update_strl_names(self) -> None:
"""No-op, forward compatibility"""
pass
def _validate_variable_name(self, name: str) -> str:
"""
Validate variable names for Stata export.
Parameters
----------
name : str
Variable name
Returns
-------
str
The validated name with invalid characters replaced with
underscores.
Notes
-----
Stata 114 and 117 support ascii characters in a-z, A-Z, 0-9
and _.
"""
for c in name:
if (
(c < "A" or c > "Z")
and (c < "a" or c > "z")
and (c < "0" or c > "9")
and c != "_"
):
name = name.replace(c, "_")
return name
def _check_column_names(self, data: DataFrame) -> DataFrame:
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names: dict[Hashable, str] = {}
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, str):
name = str(name)
name = self._validate_variable_name(name)
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = "_" + name
# Variable name may not start with a number
if "0" <= name[0] <= "9":
name = "_" + name
name = name[: min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = "_" + str(duplicate_var_id) + name
name = name[: min(len(name), 32)]
duplicate_var_id += 1
converted_names[orig_name] = name
columns[j] = name
data.columns = | Index(columns) | pandas.core.indexes.base.Index |
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numpy as np
import pandas as pd
from typing import Iterable
from pandas import DataFrame
from src.utils.logging_manager import LoggingManager, LoggingLevel
class BatchEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, pd.DataFrame):
return {"__dataframe__": obj.to_json()}
return json.JSONEncoder.default(self, obj)
def as_batch(d):
if "__dataframe__" in d:
return pd.read_json(d["__dataframe__"])
else:
return d
class Batch:
"""
Data model used for storing a batch of frames
Arguments:
frames (DataFrame): pandas Dataframe holding frames data
identifier_column (str): A column used to uniquely a row
"""
def __init__(self,
frames= | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import functions as f
from lightfm import LightFM
from scipy import sparse
import math
import operator
import collections as cl
from scipy.sparse import csr_matrix
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
import time
import xgboost as xgb
from numpy import sort
TRAINING_COLS = ['position','recent_index', 'user_bias', 'item_bias', 'lightfm_dot_product', 'lightfm_prediction', 'score']
def get_rec_matrix(df_train, df_test, inner_train, inner_gt, subm_csv, parameters = None, **kwargs):
hotel_prices_file = kwargs.get('file_metadata', None)
df_inner_train = pd.read_csv(inner_train)
df_inner_gt = pd.read_csv(inner_gt)
df_inner_gt = get_validation_set(df_test)
df_inner_train = f.get_interaction_actions(df_inner_train, actions = parameters.listactions)
df_inner_gt = f.get_interaction_actions(df_inner_gt, actions=parameters.listactions)
df_inner_gt = remove_single_clickout_actions(df_inner_gt)
df_inner_gt = create_recent_index(df_inner_gt)
df_inner_gt_clickout, df_inner_gt_no_clickout = split_clickout(df_inner_gt)
df_test_cleaned = f.get_interaction_actions(df_test, actions = parameters.listactions, clean_null = True)
df_test_cleaned = f.remove_null_clickout(df_test_cleaned)
test_interactions = create_recent_index(df_test_cleaned, grouped=True)
df_train = pd.concat([df_inner_train, df_inner_gt_no_clickout, df_test_cleaned], sort=False)
user_dict = create_user_dict(df_train)
if hotel_prices_file != None:
hotel_features, hotel_dict = get_hotel_prices(hotel_prices_file, df_train)
else:
hotel_features = None
df_test_user, df_test_nation = split_one_action(df_test)
mf_model = train_mf_model(df_train, parameters, item_features = hotel_features, hotel_dic = hotel_dict, user_dic = user_dict)
df_train_xg = get_lightFM_features(df_inner_gt_clickout, mf_model, user_dict, hotel_dict, item_f=hotel_features)
#df_train_xg = get_RNN_features(df_train_xg, 'rnn_test_sub_xgb_inner_100%_vanilla_opt_0,001lr.csv')
xg_model = xg_boost_training(df_train_xg)
df_test_xg = get_lightFM_features(df_test_user, mf_model, user_dict, hotel_dict, item_f=hotel_features, is_test = True)
df_test_xg = (df_test_xg.merge(test_interactions, left_on=['session_id'], right_on=['session_id'], how="left"))
df_test_xg['recent_index'] = df_test_xg.apply(lambda x : recent_index(x), axis=1)
del df_test_xg['all_interactions']
#df_test_xg = get_RNN_features(df_test_xg, 'rnn_test_sub_xgb_dev_100%_vanilla_opt_0,001lr.csv')
print(df_test_xg.head())
df_out = generate_submission(df_test_xg, xg_model)
df_out_nation = complete_prediction(df_test_nation)
df_out = pd.concat([df_out, df_out_nation])
df_out.to_csv(subm_csv, index=False)
return df_out
def get_validation_set(df):
sessions = get_non_null_clickout(df)
dft = df[df['session_id'].isin(sessions)]
dft = dft[~dft['reference'].isnull()]
return dft
def get_non_null_clickout(df_test):
print(df_test.head())
df_clickout = df_test[(~df_test['reference'].isnull()) & (df_test['action_type'] == 'clickout item')]
return df_clickout['session_id'].drop_duplicates()
def create_recent_index(df_orig, grouped=False):
df_list_int = df_orig.groupby('session_id').apply(lambda x: get_list_session_interactions(x)).reset_index(name='all_interactions')
df_list_int = df_list_int[['session_id', 'all_interactions']]
if(grouped):
return df_list_int
df_orig = (df_orig.merge(df_list_int, left_on=['session_id'], right_on=['session_id'], how="left"))
return df_orig
def recent_index(x):
#least_recent = len(x.all_interactions)
list_interactions = x.all_interactions.split(" ")
if str(x.item_id) in list_interactions:
i = list_interactions.index(str(x.item_id))
i = i / len(list_interactions)
else:
i = -999
#i = least_recent - i
return i
def get_list_session_interactions(group):
group.loc[:,'reference'] = group['reference'].apply(str)
list_values = group.reference.drop_duplicates()
joined = " ".join(list_values)
return joined
def get_RNN_features(df, filename):
df_rnn = pd.read_csv(filename)
df_rnn = df_rnn.rename(columns={'hotel_id':'item_id'})
df = (df.merge(df_rnn, left_on=['session_id', 'item_id'], right_on=['session_id', 'item_id'], how="left", suffixes=('_mf', '_rnn')))
df.fillna(0)
print(df.head())
return df
def generate_submission(df, xg_model, training_cols=TRAINING_COLS):
df = df.groupby(['user_id', 'session_id', 'timestamp', 'step']).apply(lambda x: calculate_rank(x, xg_model, t_cols=training_cols)).reset_index(name='item_recommendations')
df = df[['user_id', 'session_id', 'timestamp', 'step', 'item_recommendations']]
return df
def calculate_rank(group, model, t_cols=TRAINING_COLS):
#cols = ['user_id', 'session_id', 'timestamp', 'item_id', 'step']
#print(group)
df_test = group[t_cols]
#print(df_test)
xgtest = xgb.DMatrix(df_test)
prediction = model.predict(xgtest, ntree_limit=model.best_ntree_limit)
dic_pred = dict(zip(group['item_id'].apply(str), prediction))
sorted_x = sorted(dic_pred.items(), key=operator.itemgetter(1), reverse = True)
sorted_items = list(map(lambda x:x[0], sorted_x))
#df = group.iloc[0]
#df['item_recommendations'] = " ".join(sorted_items)
return " ".join(sorted_items)
def remove_single_clickout_actions(df):
print('Initial size: ' + str(df.shape[0]))
n_action_session = df.groupby('session_id').size().reset_index(name='n_actions')
print(n_action_session.head())
df = (df.merge(n_action_session, left_on='session_id', right_on='session_id', how="left"))
print(df.head())
df = df.drop(df[(df["action_type"] == "clickout item") & (df['n_actions'] == 1)].index)
del df['n_actions']
return df
def xg_boost_training(train):
train = train[TRAINING_COLS + ['label']]
df_train, df_val = train_test_split(train, test_size=0.2)
print(df_train.head())
cols = ['label']
xgtrain = xgb.DMatrix(df_train.drop(cols, axis=1), df_train.label)
xgval = xgb.DMatrix(df_val.drop(cols, axis=1), df_val.label)
params = {
'objective':'binary:logistic',
'eta':0.1,
'booster':'gbtree',
'predictor': 'cpu_predictor',
'max_depth':7,
'nthread':4,
'seed':1,
'eval_metric':'auc',
}
model = xgb.train(
params=list(params.items()),
early_stopping_rounds=30,
verbose_eval=10,
dtrain=xgtrain,
evals=[(xgtrain, 'train'), (xgval, 'test')],
num_boost_round=300,
)
return model
def get_lightFM_features(df, mf_model, user_dict, hotel_dict, item_f = None, user_f=None, is_test = False):
df_train_xg = f.explode_position_scalable(df, 'impressions')
if(is_test == False):
df_train_xg['recent_index'] = df_train_xg.apply(lambda x : recent_index(x), axis=1)
df_train_xg = df_train_xg[['user_id', 'session_id', 'timestamp', 'step', 'reference', 'position', 'item_id', 'recent_index']]
else:
df_train_xg = df_train_xg[['user_id', 'session_id', 'timestamp', 'step', 'reference', 'position', 'item_id']]
#df_train_xg = create_recent_index(df_train_xg)
if(is_test == False):
df_train_xg['label'] = df_train_xg.apply(lambda x: 1 if (str(x.item_id) == str(x.reference)) else 0, axis=1)
df_train_xg['user_id_enc'] = df_train_xg['user_id'].map(user_dict)
df_train_xg['item_id_enc'] = df_train_xg['item_id'].map(hotel_dict)
df_train_xg_null = df_train_xg[(df_train_xg['item_id_enc'].isnull())]
df_train_xg_not_null = df_train_xg[~(df_train_xg['item_id_enc'].isnull())]
#df_train_xg_not_null = df_train_xg[(~df_train_xg['item_id_enc'].isnull()) & (~df_train_xg['user_id_enc'].isnull())]
#df_train_xg_null = df_train_xg[(df_train_xg['item_id_enc'].isnull()) | (df_train_xg['user_id_enc'].isnull())]
print('Utenti nulli')
df_user_null = df_train_xg[df_train_xg['user_id_enc'].isnull()]
df_user_null = df_user_null['user_id'].drop_duplicates()
print(df_user_null)
print('There are # ' + str(df_train_xg_not_null.shape[0]) + ' not null pairs')
print('There are # ' + str(df_train_xg_null.shape[0]) + ' null pairs')
df_train_xg_not_null.loc[:,'user_id_enc'] = df_train_xg_not_null['user_id_enc'].apply(int)
df_train_xg_not_null.loc[:,'item_id_enc'] = df_train_xg_not_null['item_id_enc'].apply(int)
#df_train_xg = df_train_xg.fillna('no_data')
#df_train_xg_cleaned, df_train_xg_errors = split_no_info_hotel(df_train_xg)
df_train_xg_not_null.loc[:,'score'] = mf_model.predict(np.array(df_train_xg_not_null['user_id_enc']), np.array(df_train_xg_not_null['item_id_enc']), item_features=item_f, num_threads=4)
df_train_xg_null.loc[:,'score'] = -999
df_train_xg_not_null.loc[:,'user_bias'] = mf_model.user_biases[df_train_xg_not_null['user_id_enc']]
df_train_xg_null.loc[:,'user_bias'] = -999
df_train_xg_not_null.loc[:,'item_bias'] = mf_model.item_biases[df_train_xg_not_null['item_id_enc']]
df_train_xg_null.loc[:,'item_bias'] = -999
user_embeddings = mf_model.user_embeddings[df_train_xg_not_null.user_id_enc]
item_embeddings = mf_model.item_embeddings[df_train_xg_not_null.item_id_enc]
df_train_xg_not_null.loc[:,'lightfm_dot_product'] = (user_embeddings * item_embeddings).sum(axis=1)
df_train_xg_null.loc[:,'lightfm_dot_product'] = -999
df_train_xg_not_null.loc[:,'lightfm_prediction'] = df_train_xg_not_null['lightfm_dot_product'] + df_train_xg_not_null['user_bias'] + df_train_xg_not_null['item_bias']
df_train_xg_null.loc[:,'lightfm_prediction'] = -999
df_train_xg = pd.concat([df_train_xg_not_null, df_train_xg_null], ignore_index=True, sort=False)
df_train_xg = df_train_xg.sort_values(by=['user_id', 'session_id', 'timestamp', 'step'], ascending=False)
cols = ['reference', 'user_id_enc', 'item_id_enc']
df_train_xg = df_train_xg.drop(cols, axis=1)
return df_train_xg
def split_clickout(df):
print('Size iniziale: ' + str(df.shape[0]))
df_clickout = get_clickouts(df)
df_no_clickout = get_no_clickout(df)
print('There are #: ' + str(df_clickout.shape[0]) + ' clickout actions')
print('There are #: ' + str(df_no_clickout.shape[0]) + ' no clickout actions')
return df_clickout, df_no_clickout
def get_clickouts(df_test):
df_test['step_max'] = df_test.groupby(['user_id'])['step'].transform(max)
df_clickout = df_test[(df_test['step_max'] == df_test['step']) & (df_test['action_type'] == 'clickout item')]
del df_clickout['step_max']
return df_clickout
def get_no_clickout(df):
df['step_max'] = df.groupby(['user_id'])['step'].transform(max)
df_no_clickout = df[(~(df['step_max'] == df['step'])) | (~(df['action_type'] == 'clickout item'))]
del df_no_clickout['step_max']
return df_no_clickout
def complete_prediction(df_test_nation):
df_test_nation['item_recommendations'] = df_test_nation.apply(lambda x: fill_recs(x.impressions), axis=1)
df_out_nation = df_test_nation[['user_id', 'session_id', 'timestamp','step', 'item_recommendations']]
return df_out_nation
def fill_recs(imp):
l = imp.split('|')
return f.list_to_space_string(l)
def train_mf_model(df_train, params, item_features = None, user_features = None, hotel_dic = None, user_dic = None):
df_interactions = get_n_interaction(df_train, weight_dic = params.actionsweights)
if user_dic == None:
print('Null user dictionary. Creating it...')
user_dic = create_user_dict(df_interactions)
if hotel_dic == None:
print('Null hotel dictionary. Creating it...')
hotel_dic = create_item_dict(df_interactions)
interaction_matrix = f.create_sparse_interaction_matrix(df_interactions, user_dic, hotel_dic)
mf_model = runMF(interaction_matrix, params, n_jobs = 4, item_f = item_features, user_f = user_features)
return mf_model
def generate_prices_sparse_matrix(df, features_col='intervals'):
df['present'] = 1
hotel_dict = create_item_dict(df) #Controllare che sia uguale all'altro dizionario
feature_dict = create_item_dict(df, col_name='feature')
list_hotel = list(df['reference'])
list_features = list(df['feature'])
list_data = list(df['present'])
n_items = len(list_hotel)
n_features = len(list_features)
# Convert each list of string in a list of indexes
list_items = list(map(lambda x: hotel_dict[x], list_hotel))
list_features = list(map(lambda x: feature_dict[x], list_features))
# Generate the sparse matrix
row = np.array(list_items)
col = np.array(list_features)
data = np.array(list_data)
csr = csr_matrix((data, (row, col)), shape=(n_items, n_features))
return csr, hotel_dict
def get_hotel_prices(metadata_file, interactions, n_categories = 2000):
"""
Required Input -
- metadata_file = file with the average price for each hotel
"""
df_metadata = | pd.read_csv(metadata_file) | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from google.cloud import bigquery
client = bigquery.Client()
# %load_ext google.cloud.bigquery
# %reload_ext google.cloud.bigquery
# +
#######################################
print('Setting everything up...')
#######################################
import warnings
warnings.filterwarnings('ignore')
import pandas_gbq
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
import os
import sys
from datetime import datetime
from datetime import date
from datetime import time
from datetime import timedelta
import time
DATASET = ''
plt.style.use('ggplot')
pd.options.display.max_rows = 999
pd.options.display.max_columns = 999
pd.options.display.max_colwidth = 999
from IPython.display import HTML as html_print
def cstr(s, color='black'):
return "<text style=color:{}>{}</text>".format(color, s)
print('done.')
# -
cwd = os.getcwd()
cwd = str(cwd)
print(cwd)
# +
dic = {
'src_hpo_id': [
"saou_uab_selma", "saou_uab_hunt", "saou_tul", "pitt_temple",
"saou_lsu", "trans_am_meyers", "trans_am_essentia", "saou_ummc",
"seec_miami", "seec_morehouse", "seec_emory", "uamc_banner", "pitt",
"nyc_cu", "ipmc_uic", "trans_am_spectrum", "tach_hfhs", "nec_bmc",
"cpmc_uci", "nec_phs", "nyc_cornell", "ipmc_nu", "nyc_hh",
"ipmc_uchicago", "aouw_mcri", "syhc", "cpmc_ceders", "seec_ufl",
"saou_uab", "trans_am_baylor", "cpmc_ucsd", "ecchc", "chci", "aouw_uwh",
"cpmc_usc", "hrhc", "ipmc_northshore", "chs", "cpmc_ucsf", "jhchc",
"aouw_mcw", "cpmc_ucd", "ipmc_rush", "va", "saou_umc"
],
'HPO': [
"UAB Selma", "UAB Huntsville", "Tulane University", "Temple University",
"Louisiana State University",
"Reliant Medical Group (Meyers Primary Care)",
"Essentia Health Superior Clinic", "University of Mississippi",
"SouthEast Enrollment Center Miami",
"SouthEast Enrollment Center Morehouse",
"SouthEast Enrollment Center Emory", "Banner Health",
"University of Pittsburgh", "Columbia University Medical Center",
"University of Illinois Chicago", "Spectrum Health",
"Henry Ford Health System", "Boston Medical Center", "UC Irvine",
"Partners HealthCare", "Weill Cornell Medical Center",
"Northwestern Memorial Hospital", "Harlem Hospital",
"University of Chicago", "Marshfield Clinic",
"San Ysidro Health Center", "Cedars-Sinai", "University of Florida",
"University of Alabama at Birmingham", "Baylor", "UC San Diego",
"Eau Claire Cooperative Health Center", "Community Health Center, Inc.",
"UW Health (University of Wisconsin Madison)",
"University of Southern California", "HRHCare",
"NorthShore University Health System", "Cherokee Health Systems",
"UC San Francisco", "Jackson-Hinds CHC", "Medical College of Wisconsin",
"UC Davis", "Rush University",
"United States Department of Veterans Affairs - Boston",
"University Medical Center (UA Tuscaloosa)"
]
}
site_df = pd.DataFrame(data=dic)
site_df
# +
######################################
print('Getting the data from the database...')
######################################
site_map = pd.io.gbq.read_gbq('''
select distinct * from (
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_visit_occurrence`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_care_site`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_condition_occurrence`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_device_exposure`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_drug_exposure`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_location`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_measurement`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_note`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_observation`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_person`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_procedure_occurrence`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_provider`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_specimen`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_visit_occurrence`
)
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET, DATASET,
DATASET, DATASET, DATASET, DATASET, DATASET, DATASET, DATASET,
DATASET, DATASET, DATASET, DATASET, DATASET, DATASET, DATASET,
DATASET, DATASET, DATASET, DATASET, DATASET),
dialect='standard')
print(site_map.shape[0], 'records received.')
# -
site_df = pd.merge(site_map, site_df, how='outer', on='src_hpo_id')
site_df
Lipid = (40782589, 40795800, 40772572)
CBC = (40789356, 40789120, 40789179, 40772748, 40782735, 40789182, 40786033,
40779159)
CBCwDiff = (40785788, 40785796, 40779195, 40795733, 40795725, 40772531,
40779190, 40785793, 40779191, 40782561, 40789266)
CMP = (3049187, 3053283, 40775801, 40779224, 40782562, 40782579, 40785850,
40785861, 40785869, 40789180, 40789190, 40789527, 40791227, 40792413,
40792440, 40795730, 40795740, 40795754)
Physical_Measurement = (40654163, 40655804, 40654162, 40655805, 40654167,
40654164)
measurement_codes = Lipid + CBC + CBCwDiff + CMP + Physical_Measurement
# # Integration of Units for All Measurements:
#
# #### Getting the numbers for all of the unit concept IDs by site
# +
unit_concept_ids_by_site_query = """
CREATE TABLE `{DATASET}.sites_unit_counts`
OPTIONS (
expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 3 MINUTE)
)
AS
SELECT
DISTINCT
mm.src_hpo_id, COUNT(m.measurement_id) as number_total_units
FROM
`{DATASET}.unioned_ehr_measurement` m
JOIN
`{DATASET}._mapping_measurement` mm
ON
m.measurement_id = mm.measurement_id
GROUP BY 1
ORDER BY number_total_units DESC
""".format(DATASET = DATASET)
unit_concept_ids_by_site = pd.io.gbq.read_gbq(unit_concept_ids_by_site_query, dialect='standard')
# +
unit_concept_ids_by_site_query = """
SELECT
*
FROM
`{DATASET}.sites_unit_counts`
""".format(DATASET = DATASET)
unit_concept_ids_by_site = pd.io.gbq.read_gbq(unit_concept_ids_by_site_query, dialect='standard')
# -
unit_concept_ids_by_site
# #### Below are the "successful" unit concept IDs
# +
successful_unit_concept_ids_by_site_query = """
CREATE TABLE `{DATASET}.sites_successful_unit_counts`
OPTIONS (
expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 3 MINUTE)
)
AS
SELECT
DISTINCT
mm.src_hpo_id, COUNT(m.measurement_id) as number_valid_units
FROM
`{DATASET}.unioned_ehr_measurement` m
JOIN
`{DATASET}._mapping_measurement` mm
ON
m.measurement_id = mm.measurement_id
JOIN
`{DATASET}.concept` c
ON
m.unit_concept_id = c.concept_id
WHERE
c.standard_concept IN ('S')
AND
LOWER(c.domain_id) LIKE '%unit%'
GROUP BY 1
ORDER BY number_valid_units DESC
""".format(DATASET = DATASET)
successful_unit_concept_ids_by_site = pd.io.gbq.read_gbq(successful_unit_concept_ids_by_site_query, dialect='standard')
# +
successful_unit_concept_ids_by_site_query = """
SELECT
*
FROM
`{DATASET}.sites_successful_unit_counts`
""".format(DATASET = DATASET)
successful_unit_concept_ids_by_site = pd.io.gbq.read_gbq(successful_unit_concept_ids_by_site_query, dialect='standard')
# -
successful_unit_concept_ids_by_site
final_all_units_df = pd.merge(site_df, unit_concept_ids_by_site, on = 'src_hpo_id', how = 'left')
final_all_units_df = pd.merge(final_all_units_df, successful_unit_concept_ids_by_site, on = 'src_hpo_id', how = 'left')
final_all_units_df['total_unit_success_rate'] = round(final_all_units_df['number_valid_units'] / final_all_units_df['number_total_units'] * 100, 2)
final_all_units_df = final_all_units_df.fillna(0)
final_all_units_df = final_all_units_df.sort_values(by='total_unit_success_rate', ascending = False)
final_all_units_df
# # Integration of Units for Selected Measurements
#
# #### making the distinction because - according to the [AoU EHR Operations](https://sites.google.com/view/ehrupload/omop-tables/measurement?authuser=0) page (as of 03/11/2020) - the unit_concept_id are only required for the 'required labs'
# +
selected_unit_concept_ids_by_site_query = """
CREATE TABLE `{DATASET}.sites_unit_counts_selected_measurements`
OPTIONS (
expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 3 MINUTE)
)
AS
SELECT
DISTINCT
mm.src_hpo_id, COUNT(m.measurement_id) as number_sel_meas
FROM
`{DATASET}.unioned_ehr_measurement` m
JOIN
`{DATASET}._mapping_measurement` mm
ON
m.measurement_id = mm.measurement_id
JOIN
`{DATASET}.concept_ancestor` ca
ON
ca.descendant_concept_id = m.measurement_concept_id
JOIN
`{DATASET}.concept` c
ON
m.unit_concept_id = c.concept_id
WHERE
ca.ancestor_concept_id IN {selected_measurements}
GROUP BY 1
ORDER BY number_sel_meas DESC
""".format(DATASET = DATASET, selected_measurements = measurement_codes)
selected_unit_concept_ids_by_site = pd.io.gbq.read_gbq(selected_unit_concept_ids_by_site_query, dialect='standard')
# +
selected_unit_concept_ids_by_site_query = """
SELECT
*
FROM
`{DATASET}.sites_unit_counts_selected_measurements`
""".format(DATASET = DATASET)
selected_unit_concept_ids_by_site = pd.io.gbq.read_gbq(selected_unit_concept_ids_by_site_query, dialect='standard')
# -
selected_unit_concept_ids_by_site
# #### Below are the 'successful' unit_concept_ids
# +
successful_selected_unit_concept_ids_by_site_query = """
CREATE TABLE `{DATASET}.sites_successful_unit_counts_sel_meas`
OPTIONS (
expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 3 MINUTE)
)
AS
SELECT
DISTINCT
mm.src_hpo_id, COUNT(m.unit_concept_id) as number_valid_units_sel_meas
FROM
`{DATASET}.unioned_ehr_measurement` m
JOIN
`{DATASET}._mapping_measurement` mm
ON
m.measurement_id = mm.measurement_id
JOIN
`{DATASET}.concept_ancestor` ca
ON
ca.descendant_concept_id = m.measurement_concept_id
JOIN
`{DATASET}.concept` c
ON
m.unit_concept_id = c.concept_id
WHERE
c.standard_concept IN ('S')
AND
LOWER(c.domain_id) LIKE '%unit%'
AND
ca.ancestor_concept_id IN {selected_measurements}
GROUP BY 1
ORDER BY number_valid_units_sel_meas DESC
""".format(DATASET = DATASET, selected_measurements = measurement_codes)
successful_selected_unit_concept_ids_by_site = | pd.io.gbq.read_gbq(successful_selected_unit_concept_ids_by_site_query, dialect='standard') | pandas.io.gbq.read_gbq |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_PO Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for oil palm plantation. Source: Khasanah et al. (2015)
tf_palmoil = 26
a_nucleus = 2.8167
b_nucleus = 6.8648
a_plasma = 2.5449
b_plasma = 5.0007
c_cont_po_nucleus = 0.5448 #carbon content in biomass
c_cont_po_plasma = 0.5454
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
c_firewood_energy_S1nu = df1nu['Firewood_other_energy_use'].values
c_firewood_energy_S1pl = df1pl['Firewood_other_energy_use'].values
c_firewood_energy_Enu = df3nu['Firewood_other_energy_use'].values
c_firewood_energy_Epl = df3pl['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
c_pellets_Enu = dfEnu['Wood_pellets'].values
c_pellets_Epl = dfEpl['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
tf = 201
t = np.arange(tf)
def decomp(t,remainAGB):
return (1-(1-np.exp(-a*t))**b)*remainAGB
#set zero matrix
output_decomp = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp[i:,i] = decomp(t[:len(t)-i],remain_part)
print(output_decomp[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix[:,i] = np.diff(output_decomp[:,i])
i = i + 1
print(subs_matrix[:,:4])
print(len(subs_matrix))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix = subs_matrix.clip(max=0)
print(subs_matrix[:,:4])
#make the results as absolute values
subs_matrix = abs(subs_matrix)
print(subs_matrix[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix)
subs_matrix = np.vstack((zero_matrix, subs_matrix))
print(subs_matrix[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot = (tf,1)
decomp_emissions = np.zeros(matrix_tot)
i = 0
while i < tf:
decomp_emissions[:,0] = decomp_emissions[:,0] + subs_matrix[:,i]
i = i + 1
print(decomp_emissions[:,0])
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
#product lifetime
#building materials
B = 35
TestDSM1nu = DynamicStockModel(t = df1nu['Year'].values, i = df1nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM1pl = DynamicStockModel(t = df1pl['Year'].values, i = df1pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3nu = DynamicStockModel(t = df3nu['Year'].values, i = df3nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3pl = DynamicStockModel(t = df3pl['Year'].values, i = df3pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr1nu, ExitFlag1nu = TestDSM1nu.dimension_check()
CheckStr1pl, ExitFlag1nu = TestDSM1pl.dimension_check()
CheckStr3nu, ExitFlag3nu = TestDSM3nu.dimension_check()
CheckStr3pl, ExitFlag3pl = TestDSM3pl.dimension_check()
Stock_by_cohort1nu, ExitFlag1nu = TestDSM1nu.compute_s_c_inflow_driven()
Stock_by_cohort1pl, ExitFlag1pl = TestDSM1pl.compute_s_c_inflow_driven()
Stock_by_cohort3nu, ExitFlag3nu = TestDSM3nu.compute_s_c_inflow_driven()
Stock_by_cohort3pl, ExitFlag3pl = TestDSM3pl.compute_s_c_inflow_driven()
S1nu, ExitFlag1nu = TestDSM1nu.compute_stock_total()
S1pl, ExitFlag1pl = TestDSM1pl.compute_stock_total()
S3nu, ExitFlag3nu = TestDSM3nu.compute_stock_total()
S3pl, ExitFlag3pl = TestDSM3pl.compute_stock_total()
O_C1nu, ExitFlag1nu = TestDSM1nu.compute_o_c_from_s_c()
O_C1pl, ExitFlag1pl = TestDSM1pl.compute_o_c_from_s_c()
O_C3nu, ExitFlag3nu = TestDSM3nu.compute_o_c_from_s_c()
O_C3pl, ExitFlag3pl = TestDSM3pl.compute_o_c_from_s_c()
O1nu, ExitFlag1nu = TestDSM1nu.compute_outflow_total()
O1pl, ExitFlag1pl = TestDSM1pl.compute_outflow_total()
O3nu, ExitFlag3nu = TestDSM3nu.compute_outflow_total()
O3pl, ExitFlag3pl = TestDSM3pl.compute_outflow_total()
DS1nu, ExitFlag1nu = TestDSM1nu.compute_stock_change()
DS1pl, ExitFlag1pl = TestDSM1pl.compute_stock_change()
DS3nu, ExitFlag3nu = TestDSM3nu.compute_stock_change()
DS3pl, ExitFlag3pl = TestDSM3pl.compute_stock_change()
Bal1nu, ExitFlag1nu = TestDSM1nu.check_stock_balance()
Bal1pl, ExitFlag1pl = TestDSM1pl.check_stock_balance()
Bal3nu, ExitFlag3nu = TestDSM3nu.check_stock_balance()
Bal3pl, ExitFlag3pl = TestDSM3pl.check_stock_balance()
#print output flow
print(TestDSM1nu.o)
print(TestDSM1pl.o)
print(TestDSM3nu.o)
print(TestDSM3pl.o)
#plt.plot(TestDSM1.s)
#plt.xlim([0, 100])
#plt.ylim([0,50])
#plt.show()
#%%
#Step (5): Biomass growth
A = range(0,tf_palmoil,1)
#calculate the biomass and carbon content of palm oil trees over time
def Y_nucleus(A):
return (44/12*1000*c_cont_po_nucleus*(a_nucleus*A + b_nucleus))
output_Y_nucleus = np.array([Y_nucleus(Ai) for Ai in A])
print(output_Y_nucleus)
def Y_plasma(A):
return (44/12*1000*c_cont_po_plasma*(a_plasma*A + b_plasma))
output_Y_plasma = np.array([Y_plasma(Ai) for Ai in A])
print(output_Y_plasma)
##8 times 25-year cycle of new AGB of oil palm, one year gap between the cycle
#nucleus
counter = range(0,8,1)
y_nucleus = []
for i in counter:
y_nucleus.append(output_Y_nucleus)
flat_list_nucleus = []
for sublist in y_nucleus:
for item in sublist:
flat_list_nucleus.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_nucleus = flat_list_nucleus[:len(flat_list_nucleus)-7]
#plasma
y_plasma = []
for i in counter:
y_plasma.append(output_Y_plasma)
flat_list_plasma = []
for sublist in y_plasma:
for item in sublist:
flat_list_plasma.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_plasma = flat_list_plasma[:len(flat_list_plasma)-7]
#plotting
t = range (0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_nucleus)
plt.plot(t, flat_list_plasma, color='seagreen')
plt.fill_between(t, flat_list_nucleus, flat_list_plasma, color='darkseagreen', alpha=0.4)
plt.xlabel('Time (year)')
plt.ylabel('AGB (tCO2-eq/ha)')
plt.show()
###Yearly Sequestration
###Nucleus
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_nucleus(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_nucleus = [p - q for q, p in zip(flat_list_nucleus, flat_list_nucleus[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_nuclues' with 0 values
flat_list_nucleus = [0 if i < 0 else i for i in flat_list_nucleus]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_nucleus.insert(0,var)
#make 'flat_list_nucleus' elements negative numbers to denote sequestration
flat_list_nucleus = [ -x for x in flat_list_nucleus]
print(flat_list_nucleus)
#Plasma
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_plasma(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_plasma = [t - u for u, t in zip(flat_list_plasma, flat_list_plasma[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_plasma' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_plasma = [0 if i < 0 else i for i in flat_list_plasma]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_plasma.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_plasma = [ -x for x in flat_list_plasma]
print(flat_list_plasma)
#%%
#Step(6): post-harvest processing of wood/palm oil
#post-harvest wood processing
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_HWP_S1nu = df1nu['PH_Emissions_HWP'].values
PH_Emissions_HWP_S1pl = df1pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Enu = df3pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Epl = df3pl['PH_Emissions_HWP'].values
#post-harvest palm oil processing
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_PO_S1nu = df1nu['PH_Emissions_PO'].values
PH_Emissions_PO_S1pl = df1pl['PH_Emissions_PO'].values
PH_Emissions_PO_Enu = df3pl['PH_Emissions_PO'].values
PH_Emissions_PO_Epl = df3pl['PH_Emissions_PO'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1nu(t,remainAGB_CH4_S1nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1nu
#set zero matrix
output_decomp_CH4_S1nu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1nu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1nu[i:,i] = decomp_CH4_S1nu(t[:len(t)-i],remain_part_CH4_S1nu)
print(output_decomp_CH4_S1nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1nu[:,i] = np.diff(output_decomp_CH4_S1nu[:,i])
i = i + 1
print(subs_matrix_CH4_S1nu[:,:4])
print(len(subs_matrix_CH4_S1nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1nu = subs_matrix_CH4_S1nu.clip(max=0)
print(subs_matrix_CH4_S1nu[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1nu = abs(subs_matrix_CH4_S1nu)
print(subs_matrix_CH4_S1nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1nu)
subs_matrix_CH4_S1nu = np.vstack((zero_matrix_CH4_S1nu, subs_matrix_CH4_S1nu))
print(subs_matrix_CH4_S1nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1nu = (tf,1)
decomp_tot_CH4_S1nu = np.zeros(matrix_tot_CH4_S1nu)
i = 0
while i < tf:
decomp_tot_CH4_S1nu[:,0] = decomp_tot_CH4_S1nu[:,0] + subs_matrix_CH4_S1nu[:,i]
i = i + 1
print(decomp_tot_CH4_S1nu[:,0])
#S1pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1pl(t,remainAGB_CH4_S1pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1pl
#set zero matrix
output_decomp_CH4_S1pl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1pl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1pl[i:,i] = decomp_CH4_S1pl(t[:len(t)-i],remain_part_CH4_S1pl)
print(output_decomp_CH4_S1pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1pl[:,i] = np.diff(output_decomp_CH4_S1pl[:,i])
i = i + 1
print(subs_matrix_CH4_S1pl[:,:4])
print(len(subs_matrix_CH4_S1pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1pl = subs_matrix_CH4_S1pl.clip(max=0)
print(subs_matrix_CH4_S1pl[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1pl= abs(subs_matrix_CH4_S1pl)
print(subs_matrix_CH4_S1pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1pl)
subs_matrix_CH4_S1pl = np.vstack((zero_matrix_CH4_S1pl, subs_matrix_CH4_S1pl))
print(subs_matrix_CH4_S1pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1pl = (tf,1)
decomp_tot_CH4_S1pl = np.zeros(matrix_tot_CH4_S1pl)
i = 0
while i < tf:
decomp_tot_CH4_S1pl[:,0] = decomp_tot_CH4_S1pl[:,0] + subs_matrix_CH4_S1pl[:,i]
i = i + 1
print(decomp_tot_CH4_S1pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
tf = 201
t = np.arange(tf)
def decomp_CH4_Enu(t,remainAGB_CH4_Enu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_Enu
#set zero matrix
output_decomp_CH4_Enu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_Enu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_Enu[i:,i] = decomp_CH4_Enu(t[:len(t)-i],remain_part_CH4_Enu)
print(output_decomp_CH4_Enu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_Enu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_Enu[:,i] = np.diff(output_decomp_CH4_Enu[:,i])
i = i + 1
print(subs_matrix_CH4_Enu[:,:4])
print(len(subs_matrix_CH4_Enu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_Enu = subs_matrix_CH4_Enu.clip(max=0)
print(subs_matrix_CH4_Enu[:,:4])
#make the results as absolute values
subs_matrix_CH4_Enu = abs(subs_matrix_CH4_Enu)
print(subs_matrix_CH4_Enu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_Enu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_Enu)
subs_matrix_CH4_Enu = np.vstack((zero_matrix_CH4_Enu, subs_matrix_CH4_Enu))
print(subs_matrix_CH4_Enu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_Enu = (tf,1)
decomp_tot_CH4_Enu= np.zeros(matrix_tot_CH4_Enu)
i = 0
while i < tf:
decomp_tot_CH4_Enu[:,0] = decomp_tot_CH4_Enu[:,0] + subs_matrix_CH4_Enu[:,i]
i = i + 1
print(decomp_tot_CH4_Enu[:,0])
#Epl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
tf = 201
t = np.arange(tf)
def decomp_CH4_Epl(t,remainAGB_CH4_Epl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_Epl
#set zero matrix
output_decomp_CH4_Epl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_Epl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_Epl[i:,i] = decomp_CH4_Epl(t[:len(t)-i],remain_part_CH4_Epl)
print(output_decomp_CH4_Epl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_Epl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_Epl[:,i] = np.diff(output_decomp_CH4_Epl[:,i])
i = i + 1
print(subs_matrix_CH4_Epl[:,:4])
print(len(subs_matrix_CH4_Epl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_Epl = subs_matrix_CH4_Epl.clip(max=0)
print(subs_matrix_CH4_Epl[:,:4])
#make the results as absolute values
subs_matrix_CH4_Epl = abs(subs_matrix_CH4_Epl)
print(subs_matrix_CH4_Epl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_Epl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_Epl)
subs_matrix_CH4_Epl = np.vstack((zero_matrix_CH4_Epl, subs_matrix_CH4_Epl))
print(subs_matrix_CH4_Epl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_Epl = (tf,1)
decomp_tot_CH4_Epl = np.zeros(matrix_tot_CH4_Epl)
i = 0
while i < tf:
decomp_tot_CH4_Epl[:,0] = decomp_tot_CH4_Epl[:,0] + subs_matrix_CH4_Epl[:,i]
i = i + 1
print(decomp_tot_CH4_Epl[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CH4_S1nu,label='CH4_S1nu')
plt.plot(t,decomp_tot_CH4_S1pl,label='CH4_S1pl')
plt.plot(t,decomp_tot_CH4_Enu,label='CH4_Enu')
plt.plot(t,decomp_tot_CH4_Epl,label='CH4_Epl')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
tf = 201
t = np.arange(tf)
def decomp_CO2_S1nu(t,remainAGB_CO2_S1nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_S1nu
#set zero matrix
output_decomp_CO2_S1nu = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S1nu in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S1nu[i:,i] = decomp_CO2_S1nu(t[:len(t)-i],remain_part_CO2_S1nu)
print(output_decomp_CO2_S1nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S1nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S1nu[:,i] = np.diff(output_decomp_CO2_S1nu[:,i])
i = i + 1
print(subs_matrix_CO2_S1nu[:,:4])
print(len(subs_matrix_CO2_S1nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S1nu = subs_matrix_CO2_S1nu.clip(max=0)
print(subs_matrix_CO2_S1nu[:,:4])
#make the results as absolute values
subs_matrix_CO2_S1nu = abs(subs_matrix_CO2_S1nu)
print(subs_matrix_CO2_S1nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S1nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S1nu)
subs_matrix_CO2_S1nu = np.vstack((zero_matrix_CO2_S1nu, subs_matrix_CO2_S1nu))
print(subs_matrix_CO2_S1nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S1nu = (tf,1)
decomp_tot_CO2_S1nu = np.zeros(matrix_tot_CO2_S1nu)
i = 0
while i < tf:
decomp_tot_CO2_S1nu[:,0] = decomp_tot_CO2_S1nu[:,0] + subs_matrix_CO2_S1nu[:,i]
i = i + 1
print(decomp_tot_CO2_S1nu[:,0])
#S1pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
tf = 201
t = np.arange(tf)
def decomp_CO2_S1pl(t,remainAGB_CO2_S1pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_S1pl
#set zero matrix
output_decomp_CO2_S1pl = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S1pl in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S1pl[i:,i] = decomp_CO2_S1pl(t[:len(t)-i],remain_part_CO2_S1pl)
print(output_decomp_CO2_S1pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S1pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S1pl[:,i] = np.diff(output_decomp_CO2_S1pl[:,i])
i = i + 1
print(subs_matrix_CO2_S1pl[:,:4])
print(len(subs_matrix_CO2_S1pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S1pl = subs_matrix_CO2_S1pl.clip(max=0)
print(subs_matrix_CO2_S1pl[:,:4])
#make the results as absolute values
subs_matrix_CO2_S1pl= abs(subs_matrix_CO2_S1pl)
print(subs_matrix_CO2_S1pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S1pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S1pl)
subs_matrix_CO2_S1pl = np.vstack((zero_matrix_CO2_S1pl, subs_matrix_CO2_S1pl))
print(subs_matrix_CO2_S1pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S1pl = (tf,1)
decomp_tot_CO2_S1pl = np.zeros(matrix_tot_CO2_S1pl)
i = 0
while i < tf:
decomp_tot_CO2_S1pl[:,0] = decomp_tot_CO2_S1pl[:,0] + subs_matrix_CO2_S1pl[:,i]
i = i + 1
print(decomp_tot_CO2_S1pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
tf = 201
t = np.arange(tf)
def decomp_CO2_Enu(t,remainAGB_CO2_Enu):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_Enu
#set zero matrix
output_decomp_CO2_Enu = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_Enu in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_Enu[i:,i] = decomp_CO2_Enu(t[:len(t)-i],remain_part_CO2_Enu)
print(output_decomp_CO2_Enu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_Enu = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_Enu[:,i] = np.diff(output_decomp_CO2_Enu[:,i])
i = i + 1
print(subs_matrix_CO2_Enu[:,:4])
print(len(subs_matrix_CO2_Enu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_Enu = subs_matrix_CO2_Enu.clip(max=0)
print(subs_matrix_CO2_Enu[:,:4])
#make the results as absolute values
subs_matrix_CO2_Enu = abs(subs_matrix_CO2_Enu)
print(subs_matrix_CO2_Enu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_Enu = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_Enu)
subs_matrix_CO2_Enu = np.vstack((zero_matrix_CO2_Enu, subs_matrix_CO2_Enu))
print(subs_matrix_CO2_Enu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_Enu = (tf,1)
decomp_tot_CO2_Enu= np.zeros(matrix_tot_CO2_Enu)
i = 0
while i < tf:
decomp_tot_CO2_Enu[:,0] = decomp_tot_CO2_Enu[:,0] + subs_matrix_CO2_Enu[:,i]
i = i + 1
print(decomp_tot_CO2_Enu[:,0])
#Epl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
tf = 201
t = np.arange(tf)
def decomp_CO2_Epl(t,remainAGB_CO2_Epl):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_Epl
#set zero matrix
output_decomp_CO2_Epl = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_Epl in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_Epl[i:,i] = decomp_CO2_Epl(t[:len(t)-i],remain_part_CO2_Epl)
print(output_decomp_CO2_Epl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_Epl = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_Epl[:,i] = np.diff(output_decomp_CO2_Epl[:,i])
i = i + 1
print(subs_matrix_CO2_Epl[:,:4])
print(len(subs_matrix_CO2_Epl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_Epl = subs_matrix_CO2_Epl.clip(max=0)
print(subs_matrix_CO2_Epl[:,:4])
#make the results as absolute values
subs_matrix_CO2_Epl = abs(subs_matrix_CO2_Epl)
print(subs_matrix_CO2_Epl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_Epl = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_Epl)
subs_matrix_CO2_Epl = np.vstack((zero_matrix_CO2_Epl, subs_matrix_CO2_Epl))
print(subs_matrix_CO2_Epl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_Epl = (tf,1)
decomp_tot_CO2_Epl = np.zeros(matrix_tot_CO2_Epl)
i = 0
while i < tf:
decomp_tot_CO2_Epl[:,0] = decomp_tot_CO2_Epl[:,0] + subs_matrix_CO2_Epl[:,i]
i = i + 1
print(decomp_tot_CO2_Epl[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CO2_S1nu,label='CO2_S1nu')
plt.plot(t,decomp_tot_CO2_S1pl,label='CO2_S1pl')
plt.plot(t,decomp_tot_CO2_Enu,label='CO2_Enu')
plt.plot(t,decomp_tot_CO2_Epl,label='CO2_Epl')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated
#https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two
#C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO
Emissions_PF_PO_S1nu = [c_firewood_energy_S1nu, decomp_emissions[:,0], TestDSM1nu.o, PH_Emissions_PO_S1nu, PH_Emissions_HWP_S1nu, decomp_tot_CO2_S1nu[:,0]]
Emissions_PF_PO_S1pl = [c_firewood_energy_S1pl, decomp_emissions[:,0], TestDSM1pl.o, PH_Emissions_PO_S1pl, PH_Emissions_HWP_S1pl, decomp_tot_CO2_S1pl[:,0]]
Emissions_PF_PO_Enu = [c_firewood_energy_Enu, c_pellets_Enu, decomp_emissions[:,0], TestDSM3nu.o, PH_Emissions_PO_Enu, PH_Emissions_HWP_Enu, decomp_tot_CO2_Enu[:,0]]
Emissions_PF_PO_Epl = [c_firewood_energy_Epl, c_pellets_Epl, decomp_emissions[:,0], TestDSM3pl.o, PH_Emissions_PO_Epl, PH_Emissions_HWP_Epl, decomp_tot_CO2_Epl[:,0]]
Emissions_PF_PO_S1nu = [sum(x) for x in zip(*Emissions_PF_PO_S1nu)]
Emissions_PF_PO_S1pl = [sum(x) for x in zip(*Emissions_PF_PO_S1pl)]
Emissions_PF_PO_Enu = [sum(x) for x in zip(*Emissions_PF_PO_Enu)]
Emissions_PF_PO_Epl = [sum(x) for x in zip(*Emissions_PF_PO_Epl)]
#CH4_S1nu
Emissions_CH4_PF_PO_S1nu = decomp_tot_CH4_S1nu[:,0]
#CH4_S1pl
Emissions_CH4_PF_PO_S1pl = decomp_tot_CH4_S1pl[:,0]
#CH4_Enu
Emissions_CH4_PF_PO_Enu = decomp_tot_CH4_Enu[:,0]
#CH4_Epl
Emissions_CH4_PF_PO_Epl = decomp_tot_CH4_Epl[:,0]
#%%
#Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation
#print year column
year = []
for x in range (0, tf):
year.append(x)
print (year)
#print CH4 emission column
import itertools
lst = [0]
Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst))
print(Emissions_CH4)
#print emission ref
lst1 = [0]
Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1))
print(Emission_ref)
#replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation)
Emission_ref[0] = 1
print(Emission_ref)
Col1 = year
Col2_S1nu = Emissions_PF_PO_S1nu
Col2_S1pl = Emissions_PF_PO_S1pl
Col2_Enu = Emissions_PF_PO_Enu
Col2_Epl = Emissions_PF_PO_Epl
Col3_S1nu = Emissions_CH4_PF_PO_S1nu
Col3_S1pl = Emissions_CH4_PF_PO_S1pl
Col3_Enu = Emissions_CH4_PF_PO_Enu
Col3_Epl = Emissions_CH4_PF_PO_Epl
Col4 = flat_list_nucleus
Col5 = Emission_ref
Col6 = flat_list_plasma
#S1
df1_nu = | pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1nu,'kg_CH4':Col3_S1nu,'kg_CO2_seq':Col4,'emission_ref':Col5}) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.neural_network import MLPRegressor, MLPClassifier
from seaborn import lineplot
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import mean_squared_error as mse
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
### READ DATA INTO ONE DATAFRAME
a = pd.read_csv("T1YFFM.csv", index_col="DATE")
b = pd.read_csv("T5YFFM.csv", index_col="DATE")
c = pd.read_csv("T10YFFM.csv", index_col="DATE")
d = pd.read_csv("TB3SMFFM.csv", index_col="DATE")
r = pd.read_csv("USREC.csv", index_col="DATE")
data = pd.concat([a, b, c, d, r], axis=1)
data.index = pd.to_datetime(data.index)
data["T1YFFM_1"] = data["T1YFFM"].shift(1)
data["T5YFFM_1"] = data["T5YFFM"].shift(1)
data["T10YFFM_1"] = data["T10YFFM"].shift(1)
data["TB3SMFFM_1"] = data["TB3SMFFM"].shift(1)
data["T1YFFM_2"] = data["T1YFFM"].shift(2)
data["T5YFFM_2"] = data["T5YFFM"].shift(2)
data["T10YFFM_2"] = data["T10YFFM"].shift(2)
data["TB3SMFFM_2"] = data["TB3SMFFM"].shift(2)
# Create all target variables
for i in range(18):
data["future_rec_6m_{}".format(str(i))] = (
data["USREC"]
.shift(-1 * i)
.iloc[::-1]
.rolling(6, min_periods=0)
.sum()
.iloc[::-1]
)
data = data.fillna(0)
def bad_model_test(data, date_filter, feature_cols, target_col, graph=False):
# filter
data_filtered = data[data.index < date_filter].copy(deep=True)
features = data_filtered[feature_cols].copy(deep=True)
target = data_filtered[[target_col]].copy(deep=True)
# Scale features
scaler_feature = MinMaxScaler()
scaler_feature.fit(features)
scaled_features = scaler_feature.transform(features)
# Scale target
scaler_target = MinMaxScaler()
scaler_target.fit(target)
scaled_target = np.ravel(scaler_target.transform(target))
regr = MLPRegressor(hidden_layer_sizes=(10, 10, 10))
model = regr.fit(scaled_features, scaled_target)
predictions = model.predict(scaled_features)
print(mse(predictions, scaled_target))
if graph:
# graph_results(data_filtered[['preds', target_col]])
graph_results(
data_filtered.index,
np.ravel(scaler_target.inverse_transform([predictions])),
np.ravel(scaler_target.inverse_transform([scaled_target])),
)
def graph_results(index, predictions, scaled_target):
fig, ax = plt.subplots(figsize=(25, 12))
# myFmt = mdates.DateFormatter("%y-%m")
# ax.xaxis.set_major_formatter(myFmt)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=12))
ax.set_title("Preds", size=30)
# wide_df = data[['preds', target_col]]
wide_df = | pd.DataFrame(index=index) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from linearmodels import PanelOLS
import statsmodels.api as sm
import econtools as econ
import econtools.metrics as mt
import math
from statsmodels.stats.outliers_influence import variance_inflation_factor
from auxiliary.prepare import *
from auxiliary.table2 import *
from auxiliary.table3 import *
from auxiliary.table4 import *
from auxiliary.table5 import *
from auxiliary.table6 import *
from auxiliary.table7 import *
from auxiliary.extension import *
from auxiliary.table_formula import *
def calc_vif(X):
# Calculating VIF
vif = pd.DataFrame()
vif["variables"] = X.columns
vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
return(vif)
def table5_setting(data):
df = data
df = df[((df['turin_co_sample']==1) | (df['turin_pr_sample']==1)) & ((df['post_experience']>=5)|(df['post_experience'].isnull()==True)) & ((df['pre_experience']>=5)|(df['pre_experience'].isnull()==True))& (df['missing']==0)]
df = df[(df['ctrl_pop_turin_co_sample']==1) | (df['ctrl_pop_turin_pr_sample']==1) | (df['ctrl_exp_turin_co_sample']==1) | (df['ctrl_exp_turin_pr_sample']==1) | (df['ctrl_pop_exp_turin_co_sample']==1) | (df['ctrl_pop_exp_turin_pr_sample']==1)]
df = df.reset_index()
#re-construct trend-pa: setting
id_auth_remained = df['id_auth'].unique()
id_auth_remained_df = pd.DataFrame({'id_auth': [], 'group_num': []})
for i in range(len(id_auth_remained)):
id_auth_remained_df.loc[i,'id_auth'] = id_auth_remained[i]
id_auth_remained_df.loc[i,'group_num'] = i+1
for i in range(len(df)):
for j in range(len(id_auth_remained_df)):
if df.loc[i, 'id_auth'] == id_auth_remained_df.loc[j, 'id_auth']:
df.loc[i, 'id_auth_remained'] = j+1
id_auth_remained_dum = pd.get_dummies(df['id_auth_remained']).rename(columns=lambda x: 'id_auth_remained' + str(x))
df = pd.concat([df, id_auth_remained_dum],axis = 1)
#re-construct trend-pa
for i in range(len(id_auth_remained_dum.columns)):
df['trend_pa_remained_'+str(i+1)] = 0
for j in range(len(df)):
if df.loc[j, id_auth_remained_dum.columns[i]]==1 and df.loc[j, 'authority_code']!=3090272 and df.loc[j, 'authority_code']!=3070001:
df.loc[j,'trend_pa_remained_'+str(i+1)] = 1
df.drop([id_auth_remained_dum.columns[i]],axis = 1)
return(df)
def table5_PanelA_odd(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_co_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
for k in auth_list:
reg_col.append(k)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency']
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
exog.remove('auth_dum_3.0')
exog.remove('auth_dum_1708.0')
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', addcons= True, check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code',addcons= True, check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelA_even(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_co_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency','trend','trend_treat']
for i in range(1,36):
exog_var.append('trend_pa_remained_'+str(i))
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
for i in [2,4,6,7,9,11,12,13,15,16,17,18,20,21,22,23,24,25,26,28,34,35]:
exog.remove('trend_pa_remained_'+str(i))
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code', check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelB_odd(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_pr_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
for k in auth_list:
reg_col.append(k)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency']
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
exog.remove('auth_dum_3.0')
exog.remove('auth_dum_1708.0')
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', addcons= True, check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code',addcons= True, check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelB_even(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_pr_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency','trend','trend_treat']
for i in range(1,36):
exog_var.append('trend_pa_remained_'+str(i))
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('year_dum_2006.0')
exog.remove('work_dum_OG01')
for i in [2,4,6,7,9,11,12,13,15,16,17,18,20,21,22,23,24,25,26,28,34,35]:
exog.remove('trend_pa_remained_'+str(i))
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code', check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = | pd.DataFrame((ci_1,ci_2)) | pandas.DataFrame |
import altair as alt
import pandas as pd
import seaborn as sns
import six
from .util import build_dataframe, size_chart, vega_palette
from .pyplot import fill_between, plot, scatter as pscatter
__all__ = ["regplot", "lmplot"]
def regplot(
x, y, data=None, x_estimator=None, x_bins=None, x_ci="ci",
x_range=None, y_range=None, truncate=False,
scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
order=1, logistic=False, lowess=False, robust=False, logx=False,
color=None, scatter_kws={}, line_kws={}, ax=None,
palette=None, height=None, aspect=1, color_scale=None
):
if data is None:
data, names = build_dataframe({"x": x, "y": y})
x, y = names["x"], names["y"]
if x_range is None:
x_raw_range = (data[x].min(), data[x].max())
x_pad = 0.05*(x_raw_range[1] - x_raw_range[0])
x_range = (x_raw_range[0] - x_pad, x_raw_range[1] + x_pad)
def plot_regression(data, color):
p = sns.regression._RegressionPlotter(
data[x], data[y], x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,
n_boot=n_boot, units=units, ci=ci, truncate=truncate,
order=order, logistic=logistic, lowess=lowess, robust=robust, logx=logx
)
layers = []
grid, yhat, err_bands = p.fit_regression(x_range=x_range)
layers.append(plot(grid, yhat, color=color, **line_kws))
if err_bands is not None:
area = fill_between(grid, *err_bands, color=color)
area.encoding.opacity = alt.value(0.15)
layers.append(area)
return layers
def plot_scatter(data, color):
p = sns.regression._RegressionPlotter(
data[x], data[y], x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,
n_boot=n_boot, units=units, ci=ci, truncate=truncate,
order=order, logistic=logistic, lowess=lowess, robust=robust, logx=logx
)
layers = []
if p.x_estimator is None:
layers.append(pscatter(x, y, data=data, color=color, **scatter_kws))
else:
xs, ys, cis = p.estimate_data
if [ci for ci in cis if ci is not None]:
for xval, cci in zip(xs, cis):
ci_df = | pd.DataFrame({x: [xval, xval], y: cci}) | pandas.DataFrame |
import imgaug.augmenters as iaa
import numpy as np
import torch
from pose_est_nets.utils.io import (
check_if_semi_supervised,
set_or_open_folder,
get_latest_version,
)
from pose_est_nets.models.heatmap_tracker import (
HeatmapTracker,
SemiSupervisedHeatmapTracker,
)
from pose_est_nets.models.regression_tracker import (
RegressionTracker,
SemiSupervisedRegressionTracker,
)
import matplotlib.pyplot as plt
import os
from typing import Callable, Optional, Tuple, List, Union, Literal
from typeguard import typechecked
from tqdm import tqdm
from omegaconf import DictConfig, OmegaConf
from pathlib import Path
def get_videos_in_dir(video_dir: str) -> List[str]:
# gather videos to process
# TODO: check if you're give a path to a single video?
assert os.path.isdir(video_dir)
all_files = [video_dir + "/" + f for f in os.listdir(video_dir)]
video_files = []
for f in all_files:
if f.endswith(".mp4"):
video_files.append(f)
if len(video_files) == 0:
raise IOError("Did not find any video files (.mp4) in %s" % video_dir)
return video_files
def get_model_class(map_type: str, semi_supervised: bool):
"""[summary]
Args:
map_type (str): "regression" | "heatmap"
semi_supervised (bool): True if you want to use unlabeled videos
Returns:
a ptl model class to be initialized outside of this function.
"""
if not (semi_supervised):
if map_type == "regression":
return RegressionTracker
elif map_type == "heatmap":
return HeatmapTracker
else:
raise NotImplementedError(
"%s is an invalid map_type for a fully supervised model" % map_type
)
else:
if map_type == "regression":
return SemiSupervisedRegressionTracker
elif map_type == "heatmap":
return SemiSupervisedHeatmapTracker
else:
raise NotImplementedError(
"%s is an invalid map_type for a semi-supervised model" % map_type
)
def load_model_from_checkpoint(cfg: DictConfig, ckpt_file: str, eval: bool = False):
"""this will have: path to a specific .ckpt file which we extract using other funcs
will also take the standard hydra config file"""
from pose_est_nets.utils.io import check_if_semi_supervised
semi_supervised = check_if_semi_supervised(cfg.model.losses_to_use)
# pick the right model class
ModelClass = get_model_class(
map_type=cfg.model.model_type,
semi_supervised=semi_supervised,
)
# initialize a model instance, with weights loaded from .ckpt file
if semi_supervised:
model = ModelClass.load_from_checkpoint(
ckpt_file,
semi_super_losses_to_use=OmegaConf.to_object(cfg.model.losses_to_use),
loss_params=OmegaConf.to_object(cfg.losses),
)
else:
model = ModelClass.load_from_checkpoint(ckpt_file)
if eval:
model.eval()
return model
def saveNumericalPredictions(model, datamod, threshold):
i = 0
# hardcoded for mouse data
rev_augmenter = []
rev_augmenter.append(
iaa.Resize({"height": 406, "width": 396})
) # get rid of this for the fish
rev_augmenter = iaa.Sequential(rev_augmenter)
model.eval()
full_dl = datamod.full_dataloader()
test_dl = datamod.test_dataloader()
final_gt_keypoints = np.empty(shape=(len(test_dl), model.num_keypoints, 2))
final_imgs = np.empty(
shape=(len(test_dl), 406, 396, 1)
) # TODO: specific to Rick data
final_preds = np.empty(shape=(len(test_dl), model.num_keypoints, 2))
# dpk_final_preds = np.empty(shape = (len(test_dl), model.num_keypoints, 2))
for idx, batch in enumerate(test_dl):
x, y = batch
heatmap_pred = model.forward(x)
if torch.cuda.is_available():
heatmap_pred = heatmap_pred.cuda()
y = y.cuda()
pred_keypoints, y_keypoints = model.computeSubPixMax(heatmap_pred, y, threshold)
# dpk_final_preds[i] = pred_keypoints
pred_keypoints = pred_keypoints.cpu()
y_keypoints = y_keypoints.cpu()
x = x[:, 0, :, :] # only taking one image dimension
x = np.expand_dims(x, axis=3)
final_imgs[i], final_gt_keypoints[i] = rev_augmenter(
images=x, keypoints=np.expand_dims(y_keypoints, axis=0)
)
final_imgs[i], final_preds[i] = rev_augmenter(
images=x, keypoints=np.expand_dims(pred_keypoints, axis=0)
)
# final_gt_keypoints[i] = y_keypoints
# final_preds[i] = pred_keypoints
i += 1
final_gt_keypoints = np.reshape(
final_gt_keypoints, newshape=(len(test_dl), model.num_targets)
)
final_preds = np.reshape(final_preds, newshape=(len(test_dl), model.num_targets))
# dpk_final_preds = np.reshape(dpk_final_preds, newshape = (len(test_dl), model.num_targets))
# np.savetxt('../../preds/mouse_gt.csv', final_gt_keypoints, delimiter = ',', newline = '\n')
folder_name = get_latest_version("lightning_logs")
csv_folder = set_or_open_folder(os.path.join("preds", folder_name))
np.savetxt(
os.path.join(csv_folder, "preds.csv"), final_preds, delimiter=",", newline="\n"
)
# np.savetxt('../preds/dpk_fish_predictions.csv', dpk_final_preds, delimiter = ',', newline = '\n')
return
def plotPredictions(model, datamod, save_heatmaps, threshold, mode):
folder_name = get_latest_version("lightning_logs")
img_folder = set_or_open_folder(os.path.join("preds", folder_name, "images"))
if save_heatmaps:
heatmap_folder = set_or_open_folder(
os.path.join("preds", folder_name, "heatmaps")
)
model.eval()
if mode == "train":
dl = datamod.train_dataloader()
else:
dl = datamod.test_dataloader()
i = 0
for idx, batch in enumerate(dl):
x, y = batch
heatmap_pred = model.forward(x)
if save_heatmaps:
plt.imshow(heatmap_pred[0, 4].detach().cpu().numpy())
plt.savefig(os.path.join(heatmap_folder, "pred_map_%i" % i + ".png"))
plt.clf()
plt.imshow(y[0, 4].detach().cpu().numpy())
plt.savefig(os.path.join(heatmap_folder, "gt_map_%i" % i + ".png"))
plt.clf()
# if torch.cuda.is_available():
# heatmap_pred = heatmap_pred.cuda()
# y = y.cuda()
# TODO: that works, but remove cuda calls! threshold is on model which is on cuda, heatmap_pred and y are on CPU after saving heatmaps
pred_keypoints, y_keypoints = model.computeSubPixMax(
heatmap_pred.cuda(), y.cuda(), threshold
)
plt.imshow(x[0][0])
pred_keypoints = pred_keypoints.cpu()
y_keypoints = y_keypoints.cpu()
plt.scatter(pred_keypoints[:, 0], pred_keypoints[:, 1], c="blue")
plt.scatter(y_keypoints[:, 0], y_keypoints[:, 1], c="orange")
plt.savefig(os.path.join(img_folder, "pred_%i" % i + ".png"))
plt.clf()
i += 1
def predict_videos(
video_dir: str,
ckpt_file: str,
cfg_file: Union[str, DictConfig],
save_dir: Optional[str] = None,
sequence_length: int = 16,
device: Literal["gpu", "cuda", "cpu"] = "gpu",
video_pipe_kwargs={},
):
"""Loop over a list of videos and process with tracker using DALI for fast inference.
Args:
video_dir (str): process all videos located in this directory
ckpt_file (str): .ckpt file for model
cfg_file (str): yaml file saved by hydra; must contain
- cfg_file.losses
- cfg_file.data.image_orig_dims
- cfg_file.data.image_resize_dims
- cfg_file.model.losses_to_use
- cfg_file.model.model_type
save_file (str): full filename of tracked points; currently supports hdf5 and csv; if
NoneType, the output will be saved in the video path
sequence_length (int)
device (str): "gpu" | "cpu"
video_pipe_kwargs (dict): extra keyword-value argument pairs for
`pose_est_nets.datasets.DALI.video_pipe` function
TODO: support different video formats
"""
import csv
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
from nvidia.dali.plugin.pytorch import LastBatchPolicy
import nvidia.dali.types as types
from omegaconf import OmegaConf
import pandas as pd
import time
from pose_est_nets.datasets.dali import video_pipe, LightningWrapper, count_frames
from pose_est_nets.datasets.datasets import BaseTrackingDataset, HeatmapDataset
from pose_est_nets.models.regression_tracker import (
RegressionTracker,
SemiSupervisedRegressionTracker,
)
from pose_est_nets.models.heatmap_tracker import (
HeatmapTracker,
SemiSupervisedHeatmapTracker,
)
from pose_est_nets.utils.io import (
set_or_open_folder,
get_latest_version,
)
if device == "gpu" or device == "cuda":
device_pt = "cuda"
device_dali = "gpu"
elif device == "cpu":
device_pt = "cpu"
device_dali = "cpu"
else:
raise NotImplementedError("must choose 'gpu' or 'cpu' for `device` argument")
# gather videos to process
assert os.path.exists(video_dir)
all_files = [video_dir + "/" + f for f in os.listdir(video_dir)]
video_files = []
for f in all_files:
if f.endswith(".mp4"):
video_files.append(f)
if len(video_files) == 0:
raise IOError("Did not find any video files (.mp4) in %s" % video_dir)
if isinstance(cfg_file, str):
# load configuration file
with open(cfg_file, "r") as f:
cfg = OmegaConf.load(f)
elif isinstance(cfg_file, DictConfig):
cfg = cfg_file
else:
raise ValueError("cfg_file must be str or DictConfig, not %s!" % type(cfg_file))
model = load_model_from_checkpoint(cfg=cfg, ckpt_file=ckpt_file, eval=True)
model.to(device_pt)
# set some defaults
batch_size = (
1 # don't change this, change sequence length (exposed to user) instead
)
video_pipe_kwargs_defaults = {"num_threads": 2, "device_id": 0}
for key, val in video_pipe_kwargs_defaults.items():
if key not in video_pipe_kwargs.keys():
video_pipe_kwargs[key] = val
# loop over videos
for video_file in video_files:
print("Processing video at %s" % video_file)
# build video loader/pipeline
pipe = video_pipe(
resize_dims=(
cfg.data.image_resize_dims.height,
cfg.data.image_resize_dims.width,
),
batch_size=batch_size,
sequence_length=sequence_length,
filenames=[video_file],
random_shuffle=False,
device=device_dali,
name="reader",
pad_sequences=True,
**video_pipe_kwargs
)
predict_loader = LightningWrapper(
pipe,
output_map=["x"],
last_batch_policy=LastBatchPolicy.FILL,
last_batch_padded=False,
auto_reset=False,
reader_name="reader",
)
# iterate through video
n_frames_ = count_frames(video_file) # total frames in video
n_frames = 0 # total frames processed
keypoints_np = np.zeros((n_frames_, model.num_keypoints * 2))
confidence_np = np.zeros((n_frames_, model.num_keypoints))
t_beg = time.time()
n = -1
# TODO: seperate it out from the function
with torch.no_grad():
for n, batch in enumerate(tqdm(predict_loader)):
outputs = model.forward(batch)
if cfg.model.model_type == "heatmap":
pred_keypoints, confidence = model.run_subpixelmaxima(outputs)
# send to cpu
pred_keypoints = pred_keypoints.detach().cpu().numpy()
confidence = confidence.detach().cpu().numpy()
else:
pred_keypoints = outputs.detach().cpu().numpy()
confidence = np.zeros((outputs.shape[0], outputs.shape[1] // 2))
n_frames_curr = pred_keypoints.shape[0]
if n_frames + n_frames_curr > n_frames_:
# final sequence
final_batch_size = n_frames_ - n_frames
keypoints_np[n_frames:] = pred_keypoints[:final_batch_size]
confidence_np[n_frames:] = confidence[:final_batch_size]
n_frames_curr = final_batch_size
else: # at every sequence except the final
keypoints_np[n_frames : n_frames + n_frames_curr] = pred_keypoints
confidence_np[n_frames : n_frames + n_frames_curr] = confidence
n_frames += n_frames_curr
t_end = time.time()
if n == -1:
print(
"WARNING: issue processing %s" % video_file
) # TODO: what can go wrong here?
continue
else:
print(
"inference speed: %1.2f fr/sec"
% ((n * sequence_length) / (t_end - t_beg))
)
# save csv file of predictions in DeepLabCut format
num_joints = model.num_keypoints
predictions = np.zeros((keypoints_np.shape[0], num_joints * 3))
predictions[:, 0] = np.arange(keypoints_np.shape[0])
# put x vals back in original pixel space
x_resize = cfg.data.image_resize_dims.width
x_og = cfg.data.image_orig_dims.width
predictions[:, 0::3] = keypoints_np[:, 0::2] / x_resize * x_og
# put y vals back in original pixel space
y_resize = cfg.data.image_resize_dims.height
y_og = cfg.data.image_orig_dims.height
predictions[:, 1::3] = keypoints_np[:, 1::2] / y_resize * y_og
predictions[:, 2::3] = confidence_np
# ugly but what can you do sometimes
import sys
sys.path.append('/home/eivinas/dev/dlc-frame-selection/scripts/')
from config import BODYPARTS
joint_labels = BODYPARTS[cfg.data.dataset]
# build data frame
xyl_labels = ["x", "y", "likelihood"]
pdindex = pd.MultiIndex.from_product(
[["%s_tracker" % cfg.model.model_type], joint_labels, xyl_labels],
names=["scorer", "bodyparts", "coords"],
)
df = | pd.DataFrame(predictions, columns=pdindex) | pandas.DataFrame |
# coding: utf-8
# # Visualize E-GEOD-33245 patterns
# This notebook will examine patterns of generic and experiment-specific genes using E-GEOD-33245 as the template experiment
#
# This experiment contains multiple comparisons/conditions:
#
# * grp_1v2 compares WT vs *crc* mutants
# * grp_1v3 compares WT vs *cbrB* mutants
# * grp_1v4 compares WT vs *crcZ* mutant
# * grp_1v5 compares WT in LB vs WT in BSM
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import pandas as pd
import seaborn as sns
import numpy as np
from generic_expression_patterns_modules import process
# In[2]:
# Load data
# Summary data using abs value of test statistic
grp_1v2_file = "generic_gene_summary_E-GEOD-33245_1v2.tsv"
grp_1v3_file = "generic_gene_summary_E-GEOD-33245_1v3.tsv"
grp_1v4_file = "generic_gene_summary_E-GEOD-33245_1v4.tsv"
grp_1v5_file = "generic_gene_summary_E-GEOD-33245_1v5.tsv"
# Summary data using raw value of test statistic to get directionality
grp_1v2_raw_file = "generic_gene_summary_E-GEOD-33245_1v2_raw.tsv"
grp_1v3_raw_file = "generic_gene_summary_E-GEOD-33245_1v3_raw.tsv"
grp_1v4_raw_file = "generic_gene_summary_E-GEOD-33245_1v4_raw.tsv"
grp_1v5_raw_file = "generic_gene_summary_E-GEOD-33245_1v5_raw.tsv"
# In[3]:
# User parameters
# FDR adjusted p-value cutoff to use to define DEGs
pvalue_threshold = 0.05
# Get predicted generic DEGs using z-score cutoff
# Z-score cutoff was found by calculating the score
# whose invnorm(0.05/5549). Here we are using a p-value = 0.05
# with a Bonferroni correction for 5549 tests, which are
# the number of P. aeruginosa genes
zscore_threshold = 4.44
# ## Create dataframe to compare trends
# We are going to merge data across different conditions. For example, we will merge `grp_1v2` and `grp_1v3` to use for plotting later in this notebook. The Hogan lab can look at these tables to find *things of interest* as we start looking into how to use our computational predictions of generic and specific genes.
# In[4]:
# Read data
grp_1v2 = pd.read_csv(grp_1v2_file, sep="\t", header=0, index_col=0)
grp_1v3 = pd.read_csv(grp_1v3_file, sep="\t", header=0, index_col=0)
grp_1v4 = pd.read_csv(grp_1v4_file, sep="\t", header=0, index_col=0)
grp_1v5 = pd.read_csv(grp_1v5_file, sep="\t", header=0, index_col=0)
grp_1v2_raw = pd.read_csv(grp_1v2_raw_file, sep="\t", header=0, index_col=0)
grp_1v3_raw = pd.read_csv(grp_1v3_raw_file, sep="\t", header=0, index_col=0)
grp_1v4_raw = pd.read_csv(grp_1v4_raw_file, sep="\t", header=0, index_col=0)
grp_1v5_raw = pd.read_csv(grp_1v5_raw_file, sep="\t", header=0, index_col=0)
# In[5]:
# Merge summary dfs using abs log2 FC and using raw values
merged_1v2s_df = process.merge_abs_raw_dfs(grp_1v2, grp_1v2_raw, '1v2')
merged_1v3s_df = process.merge_abs_raw_dfs(grp_1v3, grp_1v3_raw, '1v3')
merged_1v4s_df = process.merge_abs_raw_dfs(grp_1v4, grp_1v4_raw, '1v4')
merged_1v5s_df = process.merge_abs_raw_dfs(grp_1v5, grp_1v5_raw, '1v5')
# In[6]:
# Merge 1v2 and 1v3 summary dfs
merged_1v2_1v3_df = process.merge_two_conditions_df(merged_1v2s_df, merged_1v3s_df, '1v2', '1v3')
merged_1v2_1v3_df.head()
# In[7]:
# Merge 1v3 and 1v4 summaries
merged_1v3_1v4_df = process.merge_two_conditions_df(merged_1v3s_df, merged_1v4s_df, '1v3', '1v4')
merged_1v3_1v4_df.head()
# In[8]:
# Merge 1v2 and 1v4 summaries
merged_1v2_1v4_df = process.merge_two_conditions_df(merged_1v2s_df, merged_1v4s_df, '1v2', '1v4')
merged_1v2_1v4_df.head()
# In[9]:
# Save
merged_1v2_1v3_df.to_csv("merged_E-GEOD_1v2_1v3_directionality.tsv", sep="\t")
merged_1v2_1v4_df.to_csv("merged_E-GEOD_1v2_1v4_directionality.tsv", sep="\t")
merged_1v3_1v4_df.to_csv("merged_E-GEOD_1v3_1v4_directionality.tsv", sep="\t")
# ## Compare trends across different conditions
#
# We want to compare across different conditions. For example, given:
# * Group 1v2: WT vs *crc* mutant
# * Group 1v3: WT vs *cbr* mutant
#
# We can then compare the test statistic between these two groups above. We hope to see that,
# * Genes 1v3 > 1v2 are genes that change more in 1v3 than 1v2 and we guess are specific to 1v3 (high z-score)
# * Genes 1v3 < 1v2 are genes that change more in 1v2 than 1v3 and we guess are specific to 1v2 (high z-score)
# * Genes on the 1v3 = 1v2 line are those genes that change equally in both and we guess are generic genes (low z-score)
# ### 1v2 compared with 1v3
# In[10]:
process.plot_two_conditions(merged_1v2_1v3_df, "1v2", "1v3", "WT vs crc mutant", "WT vs cbrB mutant")
# ### 1v2 compared with 1v4
# In[11]:
process.plot_two_conditions(merged_1v2_1v4_df, "1v2", "1v4", "WT vs crc mutant", "WT vs crcZ mutant")
# ### 1v3 compared with 1v4
# In[12]:
process.plot_two_conditions(merged_1v3_1v4_df, "1v3", "1v4", "WT vs cbrB mutant", "WT vs crcZ mutant")
# **Takeaway:**
# Genes with high specificity score (i.e. genes with a high absolute value z-score) are located in the off x-y regions, as expected since these off diagonal regions represent those genes that are more differentially expressed in one condition versus the other. This shows some promise for using z-score to distinguish between generic and specific genes and we can start looking more into these trends.
# ## DEGs found using traditional criteria and using z-score
#
# When performing DE analysis, this can return hundreds of genes using traditional criteria (FDR adjusted p-value < 0.05 and log2 fold change > 2). We want to see what genes are selected when we add z-score as an additional criteria to indicate genes that are specific to the pertubagen in question.
# ### 1v2
# In[13]:
(DEGs_1v2_df,
degs_1v2_traditional,
degs_1v2_specific,
degs_1v2_generic,
degs_1v2_intersect,
degs_1v2_intersect_generic,
degs_1v2_diff) = process.get_and_save_DEG_lists(merged_1v2s_df, '1v2', pvalue_threshold, zscore_threshold)
# In[14]:
process.plot_venn(degs_1v2_traditional, degs_1v2_specific, degs_1v2_generic)
# In[15]:
process.plot_volcanos(degs_1v2_intersect, degs_1v2_diff, merged_1v2s_df, "1v2", "WT vs crc mutant")
# ### 1v3
# In[16]:
(DEGs_1v3_df,
degs_1v3_traditional,
degs_1v3_specific,
degs_1v3_generic,
degs_1v3_intersect,
degs_1v3_intersect_generic,
degs_1v3_diff) = process.get_and_save_DEG_lists(merged_1v3s_df, '1v3', pvalue_threshold, zscore_threshold)
# In[17]:
process.plot_venn(degs_1v3_traditional, degs_1v3_specific, degs_1v3_generic)
# In[18]:
process.plot_volcanos(degs_1v3_intersect, degs_1v3_diff, merged_1v3s_df, "1v3", "WT vs cbrB mutant")
# ### 1v4
# In[19]:
(DEGs_1v4_df,
degs_1v4_traditional,
degs_1v4_specific,
degs_1v4_generic,
degs_1v4_intersect,
degs_1v4_intersect_generic,
degs_1v4_diff) = process.get_and_save_DEG_lists(merged_1v4s_df, '1v4', pvalue_threshold, zscore_threshold)
# In[20]:
process.plot_venn(degs_1v4_traditional, degs_1v4_specific, degs_1v4_generic)
# In[21]:
process.plot_volcanos(degs_1v4_intersect, degs_1v4_diff, merged_1v4s_df, "1v4", "WT vs crcZ mutant")
# ### 1v5
# In[22]:
(DEGs_1v5_df,
degs_1v5_traditional,
degs_1v5_specific,
degs_1v5_generic,
degs_1v5_intersect,
degs_1v5_intersect_generic,
degs_1v5_diff) = process.get_and_save_DEG_lists(merged_1v5s_df, '1v5', pvalue_threshold, zscore_threshold)
# In[23]:
process.plot_venn(degs_1v5_traditional, degs_1v5_specific, degs_1v5_generic)
# In[24]:
process.plot_volcanos(degs_1v5_intersect, degs_1v5_diff, merged_1v5s_df, "1v5", "WT LB vs WT BSM")
# In[25]:
# Save DEGs to file to share with Hogan lab
degs_all_df = pd.DataFrame({'1v2 traditional + specific': pd.Series(degs_1v2_intersect),
'1v2 specific only': pd.Series(degs_1v2_diff),
'1v2 traditional + generic': pd.Series(degs_1v2_intersect_generic),
'1v3 traditional + specific': pd.Series(degs_1v3_intersect),
'1v3 specific only': | pd.Series(degs_1v3_diff) | pandas.Series |
import os
import pandas as pd
from typing import Any
from django.contrib.gis.geos import LineString, MultiLineString
def mission_planner_convert_log(url: str) -> list:
""" This function takes in a string url of the .waypoints, .txt or .json
file exported from the mission planner flight plan
It returns an array of coordinates for each point
Returns:
[array] -- [
[long, lat],
[long, lat],
...
]
"""
data = pd.read_table(str(url), delim_whitespace=True)
df = pd.DataFrame(data)
df.to_csv("me.csv",)
datatest = pd.DataFrame(( | pd.read_csv("me.csv", index_col=0) | pandas.read_csv |
import os
import random
import re
import sys
from shutil import copyfile
import cv2
import numpy as np
import pandas as pd
import pydicom as dicom
import torch
from PIL import Image
import cn.protect.quality as quality
from cn.protect.hierarchy import OrderHierarchy
from torch.utils.data import Dataset
from torchvision import transforms, datasets
from cn.protect import Protect
from cn.protect.privacy import KAnonymity
from functools import reduce
from logger import logPrint
class DatasetInterface(Dataset):
def __init__(self, labels):
self.labels = torch.tensor(labels, dtype=torch.long)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
raise Exception("Method should be implemented in subclass.")
def getInputSize(self):
raise Exception("Method should be implemented by subclasses where "
"models requires input size update (based on dataset).")
def zeroLabels(self):
self.labels = torch.zeros(len(self.labels), dtype=torch.long)
class DatasetLoader:
"""Parent class used for specifying the data loading workflow """
def getDatasets(self, percUsers, labels, size=(None, None)):
raise Exception("LoadData method should be override by child class, "
"specific to the loaded dataset strategy.")
@staticmethod
def _filterDataByLabel(labels, trainDataframe, testDataframe):
trainDataframe = trainDataframe[trainDataframe['labels'].isin(labels)]
testDataframe = testDataframe[testDataframe['labels'].isin(labels)]
return trainDataframe, testDataframe
@staticmethod
def _splitTrainDataIntoClientDatasets(percUsers, trainDataframe, DatasetType):
DatasetLoader._setRandomSeeds()
percUsers = percUsers / percUsers.sum()
dataSplitCount = (percUsers * len(trainDataframe)).floor().numpy()
_, *dataSplitIndex = [int(sum(dataSplitCount[range(i)])) for i in range(len(dataSplitCount))]
trainDataframes = np.split(trainDataframe, indices_or_sections=dataSplitIndex)
clientDatasets = [DatasetType(clientDataframe.reset_index(drop=True))
for clientDataframe in trainDataframes]
return clientDatasets
@staticmethod
def _setRandomSeeds(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When anonymizing the clients' datasets using _anonymizeClientDatasets the function passed as
# parameter should take as parameter the cn.protect object and set ds specific generalisations
@staticmethod
def _anonymizeClientDatasets(clientDatasets, columns, k, quasiIds, setHierarchiesMethod):
datasetClass = clientDatasets[0].__class__
resultDataframes = []
clientSyntacticMappings = []
dataframes = [pd.DataFrame(list(ds.dataframe['data']), columns=columns) for ds in clientDatasets]
for dataframe in dataframes:
anonIndex = dataframe.groupby(quasiIds)[dataframe.columns[0]].transform('size') >= k
anonDataframe = dataframe[anonIndex]
needProtectDataframe = dataframe[~anonIndex]
# Might want to ss those for the report:
# print(anonDataframe)
# print(needProtectDataframe)
protect = Protect(needProtectDataframe, KAnonymity(k))
protect.quality_model = quality.Loss()
# protect.quality_model = quality.Classification()
protect.suppression = 0
for qid in quasiIds:
protect.itypes[qid] = 'quasi'
setHierarchiesMethod(protect)
protectedDataframe = protect.protect()
mappings = protectedDataframe[quasiIds].drop_duplicates().to_dict('records')
clientSyntacticMappings.append(mappings)
protectedDataframe = pd.get_dummies(protectedDataframe)
resultDataframe = pd.concat([anonDataframe, protectedDataframe]).fillna(0).sort_index()
resultDataframes.append(resultDataframe)
# All clients datasets should have same columns
allColumns = set().union(*[df.columns.values for df in resultDataframes])
for resultDataframe in resultDataframes:
for col in allColumns - set(resultDataframe.columns.values):
resultDataframe[col] = 0
# Create new datasets by adding the labels to
anonClientDatasets = []
for resultDataframe, initialDataset in zip(resultDataframes, clientDatasets):
labels = initialDataset.dataframe['labels'].values
labeledDataframe = pd.DataFrame(zip(resultDataframe.values, labels))
labeledDataframe.columns = ['data', 'labels']
anonClientDatasets.append(datasetClass(labeledDataframe))
return anonClientDatasets, clientSyntacticMappings, allColumns
def _anonymizeTestDataset(self, testDataset, clientSyntacticMappings, columns, generalizedColumns):
datasetClass = testDataset.__class__
dataframe = pd.DataFrame(list(testDataset.dataframe['data']), columns=columns)
domainsSize = dict()
quasiIds = clientSyntacticMappings[0][0].keys()
for quasiId in quasiIds:
domainsSize[quasiId] = dataframe[quasiId].max() - dataframe[quasiId].min()
generalisedDataframe = pd.DataFrame(dataframe)
ungeneralisedIndex = []
for i in range(len(dataframe)):
legitMappings = []
for clientMappings in clientSyntacticMappings:
legitMappings += [mapping for mapping in clientMappings
if self.__legitMapping(dataframe.iloc[i], mapping)]
if legitMappings:
# leastGeneralMapping = reduce(self.__leastGeneral, legitMappings)
leastGeneralMapping = legitMappings[0]
for legitMapping in legitMappings[1:]:
leastGeneralMapping = self.__leastGeneral(leastGeneralMapping, legitMapping, domainsSize)
for col in leastGeneralMapping:
generalisedDataframe[col][i] = leastGeneralMapping[col]
else:
ungeneralisedIndex.append(i)
generalisedDataframe = generalisedDataframe.drop(i)
generalisedDataframe = pd.get_dummies(generalisedDataframe)
ungeneralisedDataframe = dataframe.iloc[ungeneralisedIndex]
resultDataframe = pd.concat([ungeneralisedDataframe, generalisedDataframe]).fillna(0).sort_index()
for col in generalizedColumns - set(resultDataframe.columns.values):
resultDataframe[col] = 0
labels = testDataset.dataframe['labels'].values
labeledDataframe = pd.DataFrame(zip(resultDataframe.values, labels))
labeledDataframe.columns = ['data', 'labels']
return datasetClass(labeledDataframe)
@staticmethod
def __leastGeneral(map1, map2, domainSize):
map1Generality = map2Generality = 0
for col in map1:
if isinstance(map1[col], str):
interval = np.array(re.findall(r'\d+.\d+', map1[col]), dtype=np.float)
map1Generality += (interval[1] - interval[0]) / domainSize[col]
for col in map2:
if isinstance(map1[col], str):
interval = np.array(re.findall(r'\d+.\d+', map2[col]), dtype=np.float)
map2Generality += (interval[1] - interval[0]) / domainSize[col]
return map1 if map1Generality <= map2Generality else map2
@staticmethod
def __legitMapping(entry, mapping):
for col in mapping:
if not isinstance(mapping[col], str):
if entry[col] != mapping[col]:
return False
else:
interval = np.array(re.findall(r'\d+.\d+', mapping[col]), dtype=np.float)
if interval[0] < entry[col] or entry[col] >= interval[1]:
return False
return True
class DatasetLoaderMNIST(DatasetLoader):
def getDatasets(self, percUsers, labels, size=None):
logPrint("Loading MNIST...")
self._setRandomSeeds()
data = self.__loadMNISTData()
trainDataframe, testDataframe = self._filterDataByLabel(labels, *data)
clientDatasets = self._splitTrainDataIntoClientDatasets(percUsers, trainDataframe, self.MNISTDataset)
testDataset = self.MNISTDataset(testDataframe)
return clientDatasets, testDataset
@staticmethod
def __loadMNISTData():
trans = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (1.0,))])
# if not exist, download mnist dataset
trainSet = datasets.MNIST('data', train=True, transform=trans, download=True)
testSet = datasets.MNIST('data', train=False, transform=trans, download=True)
# Scale pixel intensities to [-1, 1]
xTrain = trainSet.train_data
xTrain = 2 * (xTrain.float() / 255.0) - 1
# list of 2D images to 1D pixel intensities
xTrain = xTrain.flatten(1, 2).numpy()
yTrain = trainSet.train_labels.numpy()
# Scale pixel intensities to [-1, 1]
xTest = testSet.test_data.clone().detach()
xTest = 2 * (xTest.float() / 255.0) - 1
# list of 2D images to 1D pixel intensities
xTest = xTest.flatten(1, 2).numpy()
yTest = testSet.test_labels.numpy()
trainDataframe = pd.DataFrame(zip(xTrain, yTrain))
testDataframe = pd.DataFrame(zip(xTest, yTest))
trainDataframe.columns = testDataframe.columns = ['data', 'labels']
return trainDataframe, testDataframe
class MNISTDataset(DatasetInterface):
def __init__(self, dataframe):
self.data = torch.stack([torch.from_numpy(data) for data in dataframe['data'].values], dim=0)
super().__init__(dataframe['labels'].values)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index], self.labels[index]
class DatasetLoaderCOVIDx(DatasetLoader):
def __init__(self, dim=(224, 224), assembleDatasets=True):
self.assembleDatasets = assembleDatasets
self.dim = dim
self.dataPath = './data/COVIDx'
self.testCSV = self.dataPath + '/test_split_v2.txt'
self.trainCSV = self.dataPath + '/train_split_v2.txt'
self.COVIDxLabelsDict = {'pneumonia': 0, 'normal': 1, 'COVID-19': 2}
def getDatasets(self, percUsers, labels, size=None):
logPrint("Loading COVIDx...")
self._setRandomSeeds()
data = self.__loadCOVIDxData(*size)
trainDataframe, testDataframe = self._filterDataByLabel(labels, *data)
clientDatasets = self._splitTrainDataIntoClientDatasets(percUsers, trainDataframe, self.COVIDxDataset)
testDataset = self.COVIDxDataset(testDataframe, isTestDataset=True)
return clientDatasets, testDataset
def __loadCOVIDxData(self, trainSize, testSize):
if self.__datasetNotFound():
logPrint("Can't find train|test split .txt files or "
"/train, /test files not populated accordingly.")
if not self.assembleDatasets:
sys.exit(0)
logPrint("Proceeding to assemble dataset from downloaded resources.")
self.__joinDatasets()
trainDataframe = self.__readDataframe(self.trainCSV, trainSize)
testDataframe = self.__readDataframe(self.testCSV, testSize)
return trainDataframe, testDataframe
def __datasetNotFound(self):
if not os.path.exists(self.dataPath + "/test_split_v2.txt") or \
not os.path.exists(self.dataPath + "/train_split_v2.txt") or \
not os.path.exists(self.dataPath + "/test") or \
not os.path.exists(self.dataPath + "/train") or \
not len(os.listdir(self.dataPath + "/test")) or \
not len(os.listdir(self.dataPath + "/train")):
# Might also want to check that files count of
# /test, /train folder match .txt files
return True
return False
def __readDataframe(self, file, size):
dataFrame = pd.read_csv(file, names=['id', 'fileNames', 'labels'],
sep=' ', header=None, usecols=[1, 2])
dataFrame['labels'] = dataFrame['labels'].map(lambda label: self.COVIDxLabelsDict[label])
return dataFrame.head(size)
def __joinDatasets(self):
dataSources = ['/covid-chestxray-dataset',
'/rsna-kaggle-dataset',
'/Figure1-covid-chestxray-dataset']
if not len(os.listdir(self.dataPath + dataSources[0])):
logPrint("You need to clone https://github.com/ieee8023/covid-chestxray-dataset to {}."
"".format(self.dataPath + dataSources[0]))
exit(0)
if not len(os.listdir(self.dataPath + dataSources[1])):
logPrint("You need to unzip (https://www.kaggle.com/c/rsna-pneumonia-detection-challenge) dataset to {}."
"".format(self.dataPath + dataSources[1]))
exit(0)
COPY_FILE = True
if COPY_FILE:
if not os.path.exists(self.dataPath + '/train'):
os.makedirs(self.dataPath + '/train')
if not os.path.exists(self.dataPath + '/test'):
os.makedirs(self.dataPath + '/test')
# path to covid-19 dataset from https://github.com/ieee8023/covid-chestxray-dataset
imgPath = self.dataPath + dataSources[0] + '/images'
csvPath = self.dataPath + dataSources[0] + '/metadata.csv'
# Path to https://www.kaggle.com/c/rsna-pneumonia-detection-challenge
kaggle_dataPath = self.dataPath + '/rsna-kaggle-dataset'
kaggle_csvname = 'stage_2_detailed_class_info.csv' # get all the normal from here
kaggle_csvname2 = 'stage_2_train_labels.csv' # get all the 1s from here since 1 indicate pneumonia
kaggle_imgPath = 'stage_2_train_images'
# parameters for COVIDx dataset
train = []
test = []
test_count = {'normal': 0, 'pneumonia': 0, 'COVID-19': 0}
train_count = {'normal': 0, 'pneumonia': 0, 'COVID-19': 0}
mapping = dict()
mapping['COVID-19'] = 'COVID-19'
mapping['SARS'] = 'pneumonia'
mapping['MERS'] = 'pneumonia'
mapping['Streptococcus'] = 'pneumonia'
mapping['Normal'] = 'normal'
mapping['Lung Opacity'] = 'pneumonia'
mapping['1'] = 'pneumonia'
# train/test split
split = 0.1
# adapted from https://github.com/mlmed/torchxrayvision/blob/master/torchxrayvision./datasets.py#L814
csv = pd.read_csv(csvPath, nrows=None)
idx_pa = csv["view"] == "PA" # Keep only the PA view
csv = csv[idx_pa]
pneumonias = ["COVID-19", "SARS", "MERS", "ARDS", "Streptococcus"]
pathologies = ["Pneumonia", "Viral Pneumonia", "Bacterial Pneumonia", "No Finding"] + pneumonias
pathologies = sorted(pathologies)
# get non-COVID19 viral, bacteria, and COVID-19 infections from covid-chestxray-dataset
# stored as patient id, image filename and label
filename_label = {'normal': [], 'pneumonia': [], 'COVID-19': []}
count = {'normal': 0, 'pneumonia': 0, 'COVID-19': 0}
print(csv.keys())
for index, row in csv.iterrows():
f = row['finding']
if f in mapping:
count[mapping[f]] += 1
entry = [int(row['patientid']), row['filename'], mapping[f]]
filename_label[mapping[f]].append(entry)
print('Data distribution from covid-chestxray-dataset:')
print(count)
# add covid-chestxray-dataset into COVIDx dataset
# since covid-chestxray-dataset doesn't have test dataset
# split into train/test by patientid
# for COVIDx:
# patient 8 is used as non-COVID19 viral test
# patient 31 is used as bacterial test
# patients 19, 20, 36, 42, 86 are used as COVID-19 viral test
for key in filename_label.keys():
arr = np.array(filename_label[key])
if arr.size == 0:
continue
# split by patients
# num_diff_patients = len(np.unique(arr[:,0]))
# num_test = max(1, round(split*num_diff_patients))
# select num_test number of random patients
if key == 'pneumonia':
test_patients = ['8', '31']
elif key == 'COVID-19':
test_patients = ['19', '20', '36', '42', '86'] # random.sample(list(arr[:,0]), num_test)
else:
test_patients = []
print('Key: ', key)
print('Test patients: ', test_patients)
# go through all the patients
for patient in arr:
if patient[0] in test_patients:
if COPY_FILE:
copyfile(os.path.join(imgPath, patient[1]),
os.path.join(self.dataPath, 'test', patient[1]))
test.append(patient)
test_count[patient[2]] += 1
else:
print("WARNING: passing copy file.")
break
else:
if COPY_FILE:
copyfile(os.path.join(imgPath, patient[1]),
os.path.join(self.dataPath, 'train', patient[1]))
train.append(patient)
train_count[patient[2]] += 1
else:
print("WARNING: passing copy file.")
break
print('test count: ', test_count)
print('train count: ', train_count)
# add normal and rest of pneumonia cases from https://www.kaggle.com/c/rsna-pneumonia-detection-challenge
print(kaggle_dataPath)
csv_normal = pd.read_csv(os.path.join(kaggle_dataPath, kaggle_csvname), nrows=None)
csv_pneu = pd.read_csv(os.path.join(kaggle_dataPath, kaggle_csvname2), nrows=None)
patients = {'normal': [], 'pneumonia': []}
for index, row in csv_normal.iterrows():
if row['class'] == 'Normal':
patients['normal'].append(row['patientId'])
for index, row in csv_pneu.iterrows():
if int(row['Target']) == 1:
patients['pneumonia'].append(row['patientId'])
for key in patients.keys():
arr = np.array(patients[key])
if arr.size == 0:
continue
# split by patients
# num_diff_patients = len(np.unique(arr))
# num_test = max(1, round(split*num_diff_patients))
# '/content/COVID-Net/'
test_patients = np.load(self.dataPath + '/COVID-Net/rsna_test_patients_{}.npy'
''.format(key)) # random.sample(list(arr), num_test)
# np.save('rsna_test_patients_{}.npy'.format(key), np.array(test_patients))
for patient in arr:
ds = dicom.dcmread(os.path.join(kaggle_dataPath, kaggle_imgPath, patient + '.dcm'))
pixel_array_numpy = ds.pixel_array
imgname = patient + '.png'
if patient in test_patients:
if COPY_FILE:
cv2.imwrite(os.path.join(self.dataPath, 'test', imgname), pixel_array_numpy)
test.append([patient, imgname, key])
test_count[key] += 1
else:
print("WARNING: passing copy file.")
break
else:
if COPY_FILE:
cv2.imwrite(os.path.join(self.dataPath, 'train', imgname), pixel_array_numpy)
train.append([patient, imgname, key])
train_count[key] += 1
else:
print("WARNING: passing copy file.")
break
print('test count: ', test_count)
print('train count: ', train_count)
# final stats
print('Final stats')
print('Train count: ', train_count)
print('Test count: ', test_count)
print('Total length of train: ', len(train))
print('Total length of test: ', len(test))
# export to train and test csv
# format as patientid, filename, label - separated by a space
train_file = open(self.dataPath + "/train_split_v2.txt", "w")
for sample in train:
info = str(sample[0]) + ' ' + sample[1] + ' ' + sample[2] + '\n'
train_file.write(info)
train_file.close()
test_file = open(self.dataPath + "/test_split_v2.txt", "w")
for sample in test:
info = str(sample[0]) + ' ' + sample[1] + ' ' + sample[2] + '\n'
test_file.write(info)
test_file.close()
class COVIDxDataset(DatasetInterface):
def __init__(self, dataframe, isTestDataset=False):
self.root = './data/COVIDx/' + ('test/' if isTestDataset else 'train/')
self.paths = dataframe['fileNames']
super().__init__(dataframe['labels'].values)
def __getitem__(self, index):
imageTensor = self.__load_image(self.root + self.paths[index])
labelTensor = self.labels[index]
return imageTensor, labelTensor
@staticmethod
def __load_image(img_path):
if not os.path.exists(img_path):
print("IMAGE DOES NOT EXIST {}".format(img_path))
image = Image.open(img_path).convert('RGB')
image = image.resize((224, 224)).convert('RGB')
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
# if(imageTensor.size(0)>1):
# #print(img_path," > 1 channels")
# imageTensor = imageTensor.mean(dim=0,keepdim=True)
imageTensor = transform(image)
return imageTensor
class DatasetLoaderDiabetes(DatasetLoader):
def __init__(self, requiresAnonymization=False):
self.requireDatasetAnonymization = requiresAnonymization
# Parameters required by k-anonymity enforcement
self.k = 4
self.quasiIds = ['Pregnancies', 'Age']
def getDatasets(self, percUsers, labels, size=None):
logPrint("Loading Diabetes data...")
self._setRandomSeeds()
trainDataframe, testDataframe, columns = self.__loadDiabetesData()
trainDataframe, testDataframe = self._filterDataByLabel(labels, trainDataframe, testDataframe)
clientDatasets = self._splitTrainDataIntoClientDatasets(percUsers, trainDataframe, self.DiabetesDataset)
testDataset = self.DiabetesDataset(testDataframe)
if self.requireDatasetAnonymization:
clientAnonymizationResults = self._anonymizeClientDatasets(clientDatasets, columns, 4,
self.quasiIds, self.__setHierarchies)
clientDatasets, syntacticMappings, generalizedColumns = clientAnonymizationResults
testDataset = self._anonymizeTestDataset(testDataset, syntacticMappings, columns, generalizedColumns)
return clientDatasets, testDataset
@staticmethod
def __loadDiabetesData(dataBinning=False):
data = pd.read_csv('data/Diabetes/diabetes.csv')
# Shuffle
data = data.sample(frac=1).reset_index(drop=True)
# Handling Missing Data¶
data['BMI'] = data.BMI.mask(data.BMI == 0, (data['BMI'].mean(skipna=True)))
data['BloodPressure'] = data.BloodPressure.mask(data.BloodPressure == 0,
(data['BloodPressure'].mean(skipna=True)))
data['Glucose'] = data.Glucose.mask(data.Glucose == 0, (data['Glucose'].mean(skipna=True)))
# data = data.drop(['Insulin'], axis=1)
# data = data.drop(['SkinThickness'], axis=1)
# data = data.drop(['DiabetesPedigreeFunction'], axis=1)
labels = data['Outcome']
data = data.drop(['Outcome'], axis=1)
if dataBinning:
data['Age'] = data['Age'].astype(int)
data.loc[data['Age'] <= 16, 'Age'] = 0
data.loc[(data['Age'] > 16) & (data['Age'] <= 32), 'Age'] = 1
data.loc[(data['Age'] > 32) & (data['Age'] <= 48), 'Age'] = 2
data.loc[(data['Age'] > 48) & (data['Age'] <= 64), 'Age'] = 3
data.loc[data['Age'] > 64, 'Age'] = 4
data['Glucose'] = data['Glucose'].astype(int)
data.loc[data['Glucose'] <= 80, 'Glucose'] = 0
data.loc[(data['Glucose'] > 80) & (data['Glucose'] <= 100), 'Glucose'] = 1
data.loc[(data['Glucose'] > 100) & (data['Glucose'] <= 125), 'Glucose'] = 2
data.loc[(data['Glucose'] > 125) & (data['Glucose'] <= 150), 'Glucose'] = 3
data.loc[data['Glucose'] > 150, 'Glucose'] = 4
data['BloodPressure'] = data['BloodPressure'].astype(int)
data.loc[data['BloodPressure'] <= 50, 'BloodPressure'] = 0
data.loc[(data['BloodPressure'] > 50) & (data['BloodPressure'] <= 65), 'BloodPressure'] = 1
data.loc[(data['BloodPressure'] > 65) & (data['BloodPressure'] <= 80), 'BloodPressure'] = 2
data.loc[(data['BloodPressure'] > 80) & (data['BloodPressure'] <= 100), 'BloodPressure'] = 3
data.loc[data['BloodPressure'] > 100, 'BloodPressure'] = 4
xTrain = data.head(int(len(data) * .8)).values
xTest = data.tail(int(len(data) * .2)).values
yTrain = labels.head(int(len(data) * .8)).values
yTest = labels.tail(int(len(data) * .2)).values
trainDataframe = pd.DataFrame(zip(xTrain, yTrain))
testDataframe = pd.DataFrame(zip(xTest, yTest))
trainDataframe.columns = testDataframe.columns = ['data', 'labels']
return trainDataframe, testDataframe, data.columns
@staticmethod
def __setHierarchies(protect):
protect.hierarchies.Age = OrderHierarchy('interval', 1, 5, 2, 2, 2)
protect.hierarchies.Pregnancies = OrderHierarchy('interval', 1, 2, 2, 2, 2)
class DiabetesDataset(DatasetInterface):
def __init__(self, dataframe):
self.dataframe = dataframe
self.data = torch.stack([torch.from_numpy(data) for data in dataframe['data'].values], dim=0).float()
super().__init__(dataframe['labels'].values)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index], self.labels[index]
def getInputSize(self):
return len(self.dataframe['data'][0])
class DatasetLoaderHeartDisease(DatasetLoader):
def __init__(self, requiresAnonymization=False):
self.requireDatasetAnonymization = requiresAnonymization
# Parameters required by k-anonymity enforcement
self.k = 2
self.quasiIds = ['age', 'sex']
def getDatasets(self, percUsers, labels, size=None):
logPrint("Loading Heart Disease data...")
self._setRandomSeeds()
trainDataframe, testDataframe, columns = self.__loadHeartDiseaseData()
trainDataframe, testDataframe = self._filterDataByLabel(labels, trainDataframe, testDataframe)
clientDatasets = self._splitTrainDataIntoClientDatasets(percUsers, trainDataframe, self.HeartDiseaseDataset)
testDataset = self.HeartDiseaseDataset(testDataframe)
if self.requireDatasetAnonymization:
clientAnonymizationResults = self._anonymizeClientDatasets(clientDatasets, columns, 4,
self.quasiIds, self.__setHierarchies)
clientDatasets, syntacticMappings, generalizedColumns = clientAnonymizationResults
testDataset = self._anonymizeTestDataset(testDataset, syntacticMappings, columns, generalizedColumns)
return clientDatasets, testDataset
@staticmethod
def __loadHeartDiseaseData():
trainData = pd.read_csv('data/HeartDisease/train.csv')
testData = | pd.read_csv('data/HeartDisease/test.csv') | pandas.read_csv |
import re
from typing import Optional
import warnings
import numpy as np
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_number,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
ABCMultiIndex,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0
from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters
from pandas.plotting._matplotlib.style import _get_standard_colors
from pandas.plotting._matplotlib.tools import (
_flatten,
_get_all_lines,
_get_xlim,
_handle_shared_axes,
_subplots,
format_date_labels,
table,
)
class MPLPlot:
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
@property
def _kind(self):
"""Specify kind str. Must be overridden in child class"""
raise NotImplementedError
_layout_type = "vertical"
_default_rot = 0
orientation: Optional[str] = None
_pop_attributes = [
"label",
"style",
"logy",
"logx",
"loglog",
"mark_right",
"stacked",
]
_attr_defaults = {
"logy": False,
"logx": False,
"loglog": False,
"mark_right": True,
"stacked": False,
}
def __init__(
self,
data,
kind=None,
by=None,
subplots=False,
sharex=None,
sharey=False,
use_index=True,
figsize=None,
grid=None,
legend=True,
rot=None,
ax=None,
fig=None,
title=None,
xlim=None,
ylim=None,
xticks=None,
yticks=None,
sort_columns=False,
fontsize=None,
secondary_y=False,
colormap=None,
table=False,
layout=None,
include_bool=False,
**kwds,
):
import matplotlib.pyplot as plt
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility
# setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else plt.rcParams["axes.grid"]
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop("xerr", None)
yerr = kwds.pop("yerr", None)
self.errors = {
kw: self._parse_errorbars(kw, err)
for kw, err in zip(["xerr", "yerr"], [xerr, yerr])
}
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndexClass)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if "cmap" in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif "cmap" in kwds:
self.colormap = kwds.pop("cmap")
else:
self.colormap = colormap
self.table = table
self.include_bool = include_bool
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
import matplotlib.colors
if (
"color" in self.kwds
and self.nseries == 1
and not is_list_like(self.kwds["color"])
):
# support series.plot(color='green')
self.kwds["color"] = [self.kwds["color"]]
if (
"color" in self.kwds
and isinstance(self.kwds["color"], tuple)
and self.nseries == 1
and len(self.kwds["color"]) in (3, 4)
):
# support RGB and RGBA tuples in series plot
self.kwds["color"] = [self.kwds["color"]]
if (
"color" in self.kwds or "colors" in self.kwds
) and self.colormap is not None:
warnings.warn(
"'color' and 'colormap' cannot be used simultaneously. Using 'color'"
)
if "color" in self.kwds and self.style is not None:
if is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
for char in s:
if char in matplotlib.colors.BASE_COLORS:
raise ValueError(
"Cannot pass 'style' string with a color symbol and "
"'color' keyword argument. Please use one or the other or "
"pass 'style' without a color symbol"
)
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
for col, values in data.items():
if keep_index is True:
yield col, values
else:
yield col, values.values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._adorn_subplots()
for ax in self.axes:
self._post_plot_logic_common(ax, self.data)
self._post_plot_logic(ax, self.data)
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return len(ax.lines) != 0 or len(ax.artists) != 0 or len(ax.containers) != 0
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, "right_ax"):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, "left_ax"):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
# TODO: use Matplotlib public API when available
new_ax._get_lines = orig_ax._get_lines
new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
if self.logy is True or self.loglog is True:
new_ax.set_yscale("log")
elif self.logy == "sym" or self.loglog == "sym":
new_ax.set_yscale("symlog")
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(
naxes=self.nseries,
sharex=self.sharex,
sharey=self.sharey,
figsize=self.figsize,
ax=self.ax,
layout=self.layout,
layout_type=self._layout_type,
)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
valid_log = {False, True, "sym", None}
input_log = {self.logx, self.logy, self.loglog}
if input_log - valid_log:
invalid_log = next(iter((input_log - valid_log)))
raise ValueError(
f"Boolean, None and 'sym' are valid options, '{invalid_log}' is given."
)
if self.logx is True or self.loglog is True:
[a.set_xscale("log") for a in axes]
elif self.logx == "sym" or self.loglog == "sym":
[a.set_xscale("symlog") for a in axes]
if self.logy is True or self.loglog is True:
[a.set_yscale("log") for a in axes]
elif self.logy == "sym" or self.loglog == "sym":
[a.set_yscale("symlog") for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (
is_list_like(self.secondary_y) and len(self.secondary_y) == self.nseries
)
if sec_true or all_sec:
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, ABCSeries):
label = self.label
if label is None and data.name is None:
label = "None"
data = data.to_frame(name=label)
# GH16953, _convert is needed as fallback, for ``Series``
# with ``dtype == object``
data = data._convert(datetime=True, timedelta=True)
include_type = [np.number, "datetime", "datetimetz", "timedelta"]
# GH23719, allow plotting boolean
if self.include_bool is True:
include_type.append(np.bool_)
# GH22799, exclude datatime-like type for boxplot
exclude_type = None
if self._kind == "box":
# TODO: change after solving issue 27881
include_type = [np.number]
exclude_type = ["timedelta"]
# GH 18755, include object and category type for scatter plot
if self._kind == "scatter":
include_type.extend(["object", "category"])
numeric_data = data.select_dtypes(include=include_type, exclude=exclude_type)
try:
is_empty = numeric_data.columns.empty
except AttributeError:
is_empty = not len(numeric_data)
# no non-numeric frames or series allowed
if is_empty:
raise TypeError("no numeric data to plot")
# GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to
# np.ndarray before plot.
numeric_data = numeric_data.copy()
for col in numeric_data:
numeric_data[col] = np.asarray(numeric_data[col])
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
if self.orientation == "vertical" or self.orientation is None:
self._apply_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, "right_ax"):
self._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
elif self.orientation == "horizontal":
self._apply_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, "right_ax"):
self._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
def _post_plot_logic(self, ax, data):
"""Post process for each axes. Overridden in child classes"""
pass
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(
axarr=all_axes,
nplots=len(all_axes),
naxes=nrows * ncols,
nrows=nrows,
ncols=ncols,
sharex=self.sharex,
sharey=self.sharey,
)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if len(self.title) != self.nseries:
raise ValueError(
"The length of `title` must equal the number "
"of columns if using `title` of type `list` "
"and `subplots=True`.\n"
f"length of title = {len(self.title)}\n"
f"number of columns = {self.nseries}"
)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if | is_list_like(self.title) | pandas.core.dtypes.common.is_list_like |
# -*- coding: utf-8 -*-
from abc import ABC
from pathlib import Path
import pandas as pd
import scrapy
from src.crawl.utils import cleanup
from settings import YEAR, CRAWLING_OUTPUT_FOLDER
BASE_URl = "https://www.helmo.be/Formations/{}"
PROG_DATA_PATH = Path(__file__).parent.absolute().joinpath(
f'../../../../{CRAWLING_OUTPUT_FOLDER}helmo_programs_{YEAR}.json')
LANGUAGES_DICT = {"Français": 'fr',
"Anglais": 'en'}
class HELMOCourseSpider(scrapy.Spider, ABC):
"""
Course crawler for Haute Ecole Libre Mosane
"""
name = "helmo-courses"
custom_settings = {
'FEED_URI': Path(__file__).parent.absolute().joinpath(
f'../../../../{CRAWLING_OUTPUT_FOLDER}helmo_courses_{YEAR}.json').as_uri()
}
def start_requests(self):
courses_df = pd.read_json(open(PROG_DATA_PATH, "r"))[["courses", "courses_urls"]]
# Combine lists of strings
courses_ids_list = courses_df["courses"].sum()
courses_urls_list = courses_df["courses_urls"].sum()
# Some courses are specified at two different urls which have exactly the same content
courses_ds = | pd.Series(courses_ids_list, courses_urls_list) | pandas.Series |
# ----------------
# IMPORT PACKAGES
# ----------------
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import sklearn.metrics as skm
import numpy as np
import matplotlib.pyplot as plt
# ----------------
# OBTAIN DATA
# ----------------
# Data Source: https://archive.ics.uci.edu/ml/machine-learning-databases/00240/
# ----------------
# PROFILE DATA
# ----------------
# Determine number of observations or data points in the training data set.
subjects = | pd.read_csv("train/subject_train.txt", header=None, delim_whitespace=True, index_col=False) | pandas.read_csv |
import json
import pandas as pd
import random
import os
import pyproj
import numpy as np
import geopandas as gpd
from pathlib import Path
from datetime import datetime
from copy import deepcopy
from shapely.geometry import Point
from shapely.ops import transform
from sklearn.preprocessing import OneHotEncoder
# load config file
with open(Path(os.path.dirname(os.path.realpath(__file__)), '../config.json')) as f:
config = json.load(f)
class DataLoader:
"""
Loads the combined HVP dataset containing POI data and URA land use data and performs data preparation.
"""
def __init__(self):
"""
Initialises the class object by loading the combined HVP dataset containing POI data and URA land use data.
"""
print('Loading batch data...')
batch1 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_1.xlsx'))
batch2 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_2.xlsx'))
batch3 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_3.xlsx'))
batch4 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_4.xlsx'))
batch5 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_5.xlsx'))
batch6 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_6.xlsx'))
batch7 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_7.xlsx'))
batch8 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_8.xlsx'))
self.data = pd.concat([batch1, batch2, batch3, batch4, batch5, batch6, batch7, batch8], ignore_index=True)
def check_stop_order(self, data):
"""
Checks if the stops made by each driver is in chronological order.
Parameters:
data: pd.Dataframe
Contains the combined HVP dataset.
"""
for driver_id in data['DriverID'].unique():
driver_data = deepcopy(data[data['DriverID'] == driver_id].reset_index(drop=True))
unix_time = np.array([datetime.strptime(time_str, '%Y-%m-%d %H-%M-%S').timestamp()
for time_str in driver_data['StartTime'].tolist()])
time_diff = unix_time[1:] - unix_time[:-1]
if len(driver_data) > 1:
assert np.any(time_diff < 0.0)
def _buffer_in_meters(self, lng, lat, radius):
"""
Converts a latitude, longitude coordinate pair into a buffer with user-defined radius.s
:param lng: float
Contains the longitude information.
:param lat: float
Contains the latitude information.
:param radius: float
Contains the buffer radius in metres.
:return:
buffer_latlng: Polygon
Contains the buffer.
"""
proj_meters = pyproj.CRS('EPSG:3414') # EPSG for Singapore
proj_latlng = pyproj.CRS('EPSG:4326')
project_to_metres = pyproj.Transformer.from_crs(proj_latlng, proj_meters, always_xy=True).transform
project_to_latlng = pyproj.Transformer.from_crs(proj_meters, proj_latlng, always_xy=True).transform
pt_meters = transform(project_to_metres, Point(lng, lat))
buffer_meters = pt_meters.buffer(radius)
buffer_latlng = transform(project_to_latlng, buffer_meters)
return buffer_latlng
def _extract_other_driver_activities(self, driver_data, other_driver_data):
"""
Extracts the activity information performed by other drivers in the same area.
Parameters:
driver_data: pd.Dataframe
Contains the combined HVP dataset for a particular driver.
other_driver_data: pd.Dataframe
Contains the combined HVP dataset for the other drivers.
Return:
driver: pd.Dataframe
Contains the combined HVP dataset for a particular driver + past activities of other drivers
"""
other_driver_activities = pd.DataFrame()
driver_data = gpd.GeoDataFrame(driver_data,
geometry=gpd.points_from_xy(driver_data['StopLon'],
driver_data['StopLat']))
other_driver_data = gpd.GeoDataFrame(other_driver_data,
geometry=gpd.points_from_xy(other_driver_data['StopLon'],
other_driver_data['StopLat']))
for i in range(len(driver_data)):
# create 100m circular buffer around stop
buffer = self._buffer_in_meters(driver_data.loc[i, 'StopLon'],
driver_data.loc[i, 'StopLat'], 50.0)
nearby_stops = other_driver_data[other_driver_data.intersects(buffer)].reset_index(drop=True)
if len(nearby_stops) == 0:
other_driver_activities = other_driver_activities.append(pd.Series(dtype=object), ignore_index=True)
else:
activity_cols = [col for col in nearby_stops.columns
if ('Activity.' in col)
and ('MappedActivity.' not in col)
and ('Other.' not in col)]
mapped_activity_cols = [col for col in nearby_stops.columns
if ('MappedActivity.' in col) and ('Other.' not in col)]
# calculate distribution of activities conducted near the stop
summed_activity = nearby_stops.sum()[activity_cols]
normalised_activity = (summed_activity) / (summed_activity.sum() + 1e-9)
# calculate distribution of mapped activities conducted near the stop
summed_mapped_activity = nearby_stops.sum()[mapped_activity_cols]
normalised_mapped_activity = (summed_mapped_activity) / (summed_mapped_activity.sum() + 1e-9)
# merge original and mapped activity types conducted by other drivers
other_driver_activities = other_driver_activities.append(pd.concat([normalised_activity,
normalised_mapped_activity]).T,
ignore_index=True)
assert len(driver_data) == len(other_driver_activities)
other_driver_activities_cols = ['Other.{}'.format(column) for column in other_driver_activities.columns]
other_driver_activities.columns = other_driver_activities_cols
driver_data = pd.concat([driver_data, other_driver_activities], axis=1)
driver_data.fillna(0, inplace=True)
return driver_data
def _extract_past_activities(self, data):
"""
Extracts past activities performed by each driver.
Parameters:
data: pd.Dataframe
Contains the combined HVP dataset.
Return:
new_data: pd.DataFrame
Contains the combined HVP dataset with past activities performed by each driver
"""
assert type(data) == gpd.GeoDataFrame
new_data = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import html
from bedrock.doc.relation import Relation
from bedrock.doc.annotation import Annotation
from bedrock.doc.token import Token
from bedrock.doc.layer import Layer
from bedrock.common import uima
import logging
from typing import Any
import warnings
class CAS2DataFrameConverter:
def __init__(self, mapping_fns: dict = None, appending_fns: dict = None):
if mapping_fns is None:
self.__mapping_fns = {}
else:
self.__mapping_fns = mapping_fns
if appending_fns is None:
self.__appending_fns = {}
else:
self.__appending_fns = appending_fns
def register_mapping_fn(self, layer_name: str, fn: Any):
self.__mapping_fns[layer_name] = fn
def register_appending_fn(self, layer_name: str, fn: Any):
self.__appending_fns[layer_name] = fn
def unregister_mapping_fn(self, layer_name: str):
self.__mapping_fns[layer_name] = None
def unregister_appending_fn(self, layer_name: str):
self.__appending_fns[layer_name] = None
# Generates panda df from the UIMA CAS: tokens, annotations, relations, uima (combined)
def get_dataframes(self, cas):
annotations = | pd.DataFrame(columns=Annotation.COLS) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import csv
import random
from time import time
from decimal import Decimal
#from faker import Faker
import boto3
import string
import random
import os
import os
import re
import collections
import nltk
import pandas as pd
#from nltk.corpus import stopwords
from io import StringIO # python3; python2: BytesIO
from nltk.corpus import stopwords
import google.cloud
from google.cloud import language_v1
import os
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('words')
#df_sentimentScore = pd.DataFrame(columns=['sentence','sentiment_score','sentiment_magnitude','language'])
df_sentimentScore = pd.DataFrame(columns=['text','label'])
sentiment_score = []
#sentiment_magnitude = []
sentence1 = []
content_list=[]
preprocessed_list=[]
#langue=[]
#stop_words = stopwords.words('english')
# Connect to Boto3
s3 = boto3.resource(
service_name='s3',
region_name='us-east-2')
# bucket_name = ''
# def upload_to_s3(filename):
# s3.Bucket(bucket_name).upload_file(Filename=filename, Key='demo/' + filename)
# print ('Upload Complete')
def upload_to_aws(local_file, bucket, s3_file):
import boto3
from botocore.exceptions import NoCredentialsError
ACCESS_KEY = 'AKIA5CUSOFRV64J75U7W'
SECRET_KEY = '<KEY>'
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY)
try:
s3.upload_file(local_file, bucket, s3_file)
return True
except FileNotFoundError:
print("The file was not found")
return False
except NoCredentialsError:
print("Credentials not available")
return False
#uploading all unstructured files to S3
def upload_files(path):
session = boto3.Session(
aws_access_key_id='AKIA5CUSOFRV64J75U7W',
aws_secret_access_key='<KEY>',
region_name='us-east-2'
)
s3 = session.resource('s3')
bucket = s3.Bucket('edgarteam3')
for subdir, dirs, files in os.walk(path):
for file in files:
full_path = os.path.join(subdir, file)
with open(full_path, 'rb') as data:
bucket.put_object(Key=full_path[len(path)+1:], Body=data)
def processed_file_to_s3():
bucket = 'edgarteam3processedfiles' # already created on S3
csv_buffer = StringIO()
ACCESS_KEY = 'AKIA5CUSOFRV64J75U7W'
SECRET_KEY = '<KEY>'
df = | pd.DataFrame(preprocessed_list) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04_evaluation.core.ipynb (unless otherwise specified).
__all__ = ['get_mean_probs', 'find_parens', 'mean_dist_probs', 'token_taxonomy', 'non_wordy', 'get_error_rates',
'get_error_rates_df', 'get_last_token_error_df', 'get_mean_cross_entropy', 'get_mean_probs',
'get_mean_cross_entropy_df', 'save_results', 'evaluate', 'TYPES']
# Cell
import re
import numpy as np
import pandas as pd
import tensorflow as tf
from collections import Counter, defaultdict
from ..data.core import replace_spec_toks_to_original, java_special_tokens
from ..data.transforms import (
code_token_randomizer,
line_randomizer,
java_comment_remover,
transform_df,
)
from ..model.core import Model, RNNModel, TransformerHFModel, WildModel, _loss
from pathlib import Path
from scipy import stats
from transformers import TFGPT2LMHeadModel
from tqdm.auto import tqdm
from typing import Dict, List, Optional
# Cell
def get_mean_probs(df: pd.DataFrame, model: Model, n: Optional[int] = None):
"""
Get the mean probability of each token that the model
should predict for an entire pandas dataframe.
:param df: the pandas dataframe containing each method to have the model predict on
:param model: the model used to generate the predictions
:param n: the number of methods to evaluate. If none, the entire dataframe will be used
:returns: returns a numpy array of the mean probability for each token in the model's vocab
"""
if n is None:
n = len(df)
# setup container lists for the number of occurrences and sum of probabilities for each token
counts = [0] * model.tokenizer.get_vocab_size()
sum_probs = [0.0] * model.tokenizer.get_vocab_size()
# loop through each method
for mthd in df.code.values[:n]:
# token the method and generate the probabilities for the model's predictions
inputs = model.tokenize(mthd)
probs = model.get_probs(inputs)[0].numpy()
# loop through each token and its probability and update the container lists
for idx, p in zip(inputs["input_ids"][0], probs):
counts[idx] += 1
sum_probs[idx] += p[idx]
# convert the lists to numpy lists and perform element wise division to get the mean probabilities for each token
counts = np.array(counts)
sum_probs = np.array(sum_probs)
# perform division, but not when denominator is zero. In those cases, just leave value as NAN.
nans = np.empty(counts.shape)
nans.fill(np.nan)
mean_probs = np.divide(sum_probs, counts, out=nans, where=counts != 0)
# TODO: convert to dictionary with keys as tokens
mean_probs = {
model.tokenizer.id_to_token(i): mean_probs[i] for i in range(len(mean_probs))
}
return mean_probs
# Cell
def find_parens(toks: List[str], opening: str, closing: str) -> Dict[int, int]:
"""
Get the indices for the opening and closing tokens.
From https://stackoverflow.com/a/29992065/5768407
by user Baltasarq (https://stackoverflow.com/users/266978/baltasarq).
:param toks: the tokenized version of a method
:param opening: the opening token that will be matched against the closing token
:param closing: the closing token that will be matched against the opening token
:returns: returns a dictionary with the opening token indices as the keys and the closing token indices as the values
"""
toret = {}
pstack = []
for i, tok in enumerate(toks):
if tok == opening:
pstack.append(i)
elif tok == closing:
if len(pstack) == 0:
raise IndexError("No matching closing parens at: " + str(i))
toret[pstack.pop()] = i
if len(pstack) > 0:
raise IndexError("No matching opening parens at: " + str(pstack.pop()))
return toret
def _get_dist_probs(
mthd: str, model: Model, opening: str, closing: str
) -> Dict[int, float]:
"""
Get the distances and mean probabilities between opening and closing tokens in a given method.
:param mthd: the method to get the ranges of the opening and closing tokens and their probabilities
:param model: the model used to generate the predictions
:param opening: the opening token used for calculating the distance between opening and closing tokens
:param closing: the closing token used for calculating the distance between opening and closing tokens as well as the token to get the mean probability of
:returns: returns a dictionary with the distance between the opening and closing tokens as keys and their mean probabilities as values
"""
# WARNING: Careful when using different tokenizers since HF tokenizers lib have diff API then HF transformers lib tokenizers... You will need to update this when using custom model and tokenizer...
# get the distances for the opening and closing tokens
toks = model.tokenizer.encode(mthd).tokens
idxs = find_parens(toks, opening, closing)
# get the model probabilities for the given method
inputs = model.tokenize(mthd)
probs = model.get_probs(inputs)[0].numpy()
# sum up the probabilities of the different distances for the closing token
dist_probs = defaultdict(float)
for open_id, close_id in idxs.items():
dist_probs[close_id - open_id] += probs[close_id][
inputs["input_ids"][0][close_id]
]
# get the mean of the summed probabilities
dist_cnts = Counter([close_id - open_id for open_id, close_id in idxs.items()])
dist_probs = {dist: dist_probs[dist] / n for dist, n in dist_cnts.items()}
return dist_probs
def mean_dist_probs(
df: pd.DataFrame,
model: Model,
opening: Optional[str] = "<{>",
closing: Optional[str] = "<}>",
n: Optional[int] = None,
) -> pd.DataFrame:
"""
Get the distance between opening and closing tokens and the mean probability of each closing token that the model should predict for an entire pandas dataframe.
:param df: the pandas dataframe containing each method to have the model predict on
:param model: the model used to generate the predictions
:param opening: the opening token used for calculating the distance between opening and closing tokens
:param closing: the closing token used for calculating the distance between opening and closing tokens as well as the token to get the mean probability of
:param n: the number of methods to evaluate. If none, the entire dataframe will be used
:returns: returns a dataframe with the distances between opening and closing tokens and their mean probabilities
"""
if n is None:
n = len(df)
# get the probabilities for the different distances for an entire dataframe
df = df.iloc[:n].copy()
dist_probs = df.code.apply(
lambda mthd: _get_dist_probs(mthd, model, opening, closing)
).values
# flatten the keys of the different distances into a list
dist_keys = []
for probs in dist_probs:
dist_keys.extend(probs.keys())
# merge dictionaries across methods by taking the mean of probs with the same distance. Modified from https://stackoverflow.com/a/10461916/5768407,
# users georg https://stackoverflow.com/users/989121/georg and <NAME> https://stackoverflow.com/users/12149730/r%c3%a9my-hosseinkhan-boucher
mean_dist_probs = {
k: np.nanmean(np.array([probs.get(k, np.nan) for probs in dist_probs]))
for k in set(dist_keys)
}
std_dist_probs = {
k: np.nanstd(np.array([probs.get(k, np.nan) for probs in dist_probs]))
for k in set(dist_keys)
}
med_dist_probs = {
k: np.nanmedian(np.array([probs.get(k, np.nan) for probs in dist_probs]))
for k in set(dist_keys)
}
mad_dist_probs = {
k: stats.median_abs_deviation(
np.array([probs.get(k, np.nan) for probs in dist_probs]), nan_policy="omit"
)
for k in set(dist_keys)
}
# TODO: convert to dictionary
df_dist = (
pd.DataFrame(
{
"dist": list(mean_dist_probs.keys()),
"mean_prob": list(mean_dist_probs.values()),
"std_prob": list(std_dist_probs.values()),
"med_prob": list(med_dist_probs.values()),
"mad_prob": list(mad_dist_probs.values()),
}
)
.sort_values("dist")
.reset_index(drop=True)
)
return df_dist
# Cell
token_taxonomy = {
"blocks": {
"<{>": "{",
"<}>": "}",
"<[>": "[",
"<]>": "]",
"<(>": "(",
"<)>": ")",
"<;>": ";",
"<return>": "return"
},
"exceptions": {
"<catch>": "catch",
"<try>": "try",
"<finally>": "finally",
"<throw>": "throw",
"<throws>": "throws"
},
"oop": {
"<class>": "class",
"<instanceof>": "instanceof",
"<interface>": "interface",
"<private>": "private",
"<protected>": "protected",
"<public>": "public",
"<abstract>": "abstract",
"<extends>": "extends",
"<package>": "package",
"<this>": "this",
"<implements>": "implements",
"<import>": "import",
"<new>": "new",
"<super>": "super"
},
"tests": {
"<assert>": "assert"
},
"declarations": {
"<native>": "native",
"<static>": "static",
"<synchronized>": "synchronized",
"<transient>": "transient",
"<volatile>": "volatile",
"<void>": "void",
"<final>": "final",
"<enum>": "enum"
},
"conditionals": {
"<else>": "else",
"<if>": "if",
"<switch>": "switch",
"<case>": "case",
"<default>": "default"
},
"loops": {
"<break>": "break",
"<do>": "do",
"<for>": "for",
"<while>": "while",
"<continue>": "continue"
},
"operators": {
"<=>": "=",
"<+>": "+",
"<->": "-",
"<*>": "*",
"</>": "/",
"<%>": "%",
"<++>": "++",
"<-->": "--",
"<!>": "!",
"<==>": "==",
"<!=>": "!=",
"<greater_equal>": ">=",
"<lesser_equal>": "<=",
"<&&>": "&&",
"<||>": "||",
"<?>": "?",
"<:>": ":",
"<~>": "~",
"<double_lesser>": "<<",
"<double_greater>": ">>",
"<triple_greater>": ">>>",
"<&>": "&",
"<^>": "^",
"<|>": "|"
},
"datatypes": {
"<byte>": "byte",
"<char>": "char",
"<float>": "float",
"<boolean>": "boolean",
"<double>": "double",
"<int>": "int",
"<long>": "long",
"<short>": "short",
"<strictfp>": "strictfp"
},
"extra_tokens": {
"<@>": "@",
"<...>": "...",
"<null>": "null",
"<true>": "true",
"<false>": "false",
"<n>": "\n"
}
}
# Cell
non_wordy = ["<n>", "<...>", "<@>", *token_taxonomy["operators"], *token_taxonomy["blocks"]]
non_wordy.remove("<return>")
# Cell
def get_error_rates(df: pd.DataFrame, model: Model, n: Optional[int] = None):
if n is None:
n = len(df)
# setup container lists for the number of occurrences and sum of probabilities for each token
cnts = [0] * model.tokenizer.get_vocab_size()
err_cnts = [0] * model.tokenizer.get_vocab_size()
# loop through each method
for mthd in df.code.values[:n]:
# token the method and generate the probabilities for the model's predictions
inputs = model.tokenize(mthd)
probs = model.get_probs(inputs)[0].numpy()
# loop through each token and its probability and update the container lists
for idx, p in zip(inputs["input_ids"][0], probs):
cnts[idx] += 1
err_cnts[idx] += 1
# convert the lists to numpy lists and perform element wise division to get the mean probabilities for each token
cnts = np.array(cnts)
err_cnts = np.array(err_cnts)
# perform division, but not when denominator is zero. In those cases, just leave value as NAN.
nans = np.empty(cnts.shape)
nans.fill(np.nan)
mean_errs = np.divide(err_cnts, cnts, out=nans, where=cnts != 0)
error_taxonomy = token_taxonomy.copy()
for cat, tokens in error_taxonomy.items():
errs = []
cnt_sum = 0
for token, keyword in tokens.items():
idx = model.tokenizer.token_to_id(token)
error_taxonomy[cat][token] = {"error_rate": mean_errs[idx], "count": cnts[idx]}
errs.append(mean_errs[idx])
cnt_sum += cnts[idx]
errs = np.array(errs)
error_taxonomy[cat]["stats"] = {
"mean_error_rate": np.nanmean(errs),
"stdev_error_rate": np.nanstd(errs),
"median_error_rate": np.nanmedian(errs),
"mad_error_rate": stats.median_abs_deviation(errs, nan_policy="omit"),
}
return error_taxonomy
# Cell
def get_error_rates_df(df: pd.DataFrame, model: Model, bs: int = 16, n: Optional[int] = None):
if n is None:
n = len(df)
# setup container lists for the number of occurrences and sum of probabilities for each token
rows = []
# loop through each method
for i in tqdm(range(0, n, bs), desc="Error Rates", total = (n // bs) + 1):
batch = ["<sos>" + mthd for mthd in df.code.values[i:i + bs]]
# token the method and get the probabilities for each token from the model
inputs = np.stack([x.ids for x in model.tokenizer.encode_batch(batch)], axis = 0)
probs = model.get_probs(inputs)
for i in range(len(batch)):
row = {
"y_" + k: np.array([0.] * model.tokenizer.get_vocab_size())
for k in token_taxonomy.keys()
}
row_cnt = {
"y_" + k: np.array([0] * model.tokenizer.get_vocab_size())
for k in token_taxonomy.keys()
}
# loop through each token and its probability and update the container lists
for j, (idx, p) in enumerate(zip(inputs[i], probs[i])):
tok = model.tokenizer.id_to_token(idx)
for k in token_taxonomy:
if tok in token_taxonomy[k]:
# Check if token is wordy and could be part of variable or method
if tok not in non_wordy:
# Get the token version of the token behind the token under study
# and check if the last character in the token contains a letter
inp_tok_prev = model.tokenizer.id_to_token(inputs[i][j - 1])
if re.search('[a-zA-Z]', inp_tok_prev[-1]):
break
# Check if there is a token infront of the token under study
# if there is, get the token version of it
# and check if the first character in the token contains a letter
if j + 1 < len(inputs[i]):
inp_tok_next = model.tokenizer.id_to_token(inputs[i][j + 1])
if re.search('[a-zA-Z]', inp_tok_next[0]):
break
row["y_" + k][idx] += p[idx]
row_cnt["y_" + k][idx] += 1
for k in row:
# Check if there were no tokens found in this method for this particular taxonomy category
if not row_cnt[k].any():
row[k] = np.nan
else:
sum_cnt = np.sum(row_cnt[k])
row[k] = np.sum(row[k]) / sum_cnt
rows.append(row)
error_df = pd.DataFrame(rows)
error_df["original_code"] = replace_spec_toks_to_original(df, java_special_tokens, n).code.values
error_df["transformed_code"] = df.code.values[:n]
return error_df
# Cell
def get_last_token_error_df(df: pd.DataFrame, model: Model, bs: int = 16, n: Optional[int] = None):
if n is None:
n = len(df)
# setup container lists for probabilities of the last token
ending = []
row = defaultdict(list)
# loop through each batch of methods
for i in tqdm(range(0, n, bs), desc="Error Rates", total = (n // bs) + 1):
# get the batch of methods
batch = [mthd for mthd in df.code.values[i:i + bs]]
# tokenize the batch and get the probabilities for each token from the model
inputs = model.batch_tokenize(batch)
probs = model.get_probs(inputs)
# turn off padding so that we can get the idx of the last token
model.tokenizer.backend_tokenizer.no_padding()
idx_last_ids = [len(x.ids) - 1 for x in model.tokenizer.backend_tokenizer.encode_batch(batch)]
# loop through the ids of the last token
for bs_idx, tok_idx in enumerate(idx_last_ids):
# append the probability of the last token to the list
prob = probs[bs_idx][tok_idx][inputs["input_ids"][bs_idx][tok_idx]].item()
ending.append(prob)
# turn the list into a dataframe in the correct format
last_error_df = pd.DataFrame(ending, columns=["y_ending_error"])
last_error_df["original_code"] = replace_spec_toks_to_original(df, java_special_tokens, n).code.values
last_error_df["transformed_code"] = df.code.values[:n]
return last_error_df
# Cell
def get_mean_cross_entropy(df: pd.DataFrame, model: Model, n: Optional[int] = None):
"""
Get the mean cross entropy for a model on an entire pandas dataframe
:param df: the pandas dataframe containing each method to have the model predict on
:param model: the model used to generate the predictions
:param n: the number of methods to evaluate. If none, the entire dataframe will be used
:returns: returns the mean cross entropy of the models predictions compared to true labels
"""
if n is None:
n = len(df)
cross_entropy_losses = []
# Need to change to sparse_categorical_crossentropy
for mthd in df.code.values[:n]:
# token the method and get the probabilities for each token from the model
inputs = model.tokenize(mthd)
probs = model.get_probs(inputs)[0].numpy()
# calculate the cross entropy between the labels and probabilities
losses = tf.keras.losses.sparse_categorical_crossentropy(
inputs["input_ids"], probs
).numpy()
cross_entropy_losses.append(losses)
# flatten list of cross entropies and calculate the mean, median, std, and mad
cross_entropy_losses = np.concatenate(cross_entropy_losses)
return {
"mean": np.mean(cross_entropy_losses),
"median": np.median(cross_entropy_losses),
"std": np.std(cross_entropy_losses),
"mad": stats.median_abs_deviation(cross_entropy_losses),
}
# Cell
def get_mean_probs(df: pd.DataFrame, model: Model, n: Optional[int] = None):
"""
Get the mean probability of each token that the model
should predict for an entire pandas dataframe.
:param df: the pandas dataframe containing each method to have the model predict on
:param model: the model used to generate the predictions
:param n: the number of methods to evaluate. If none, the entire dataframe will be used
:returns: returns a numpy array of the mean probability for each token in the model's vocab
"""
if n is None:
n = len(df)
# setup container lists for the number of occurrences and sum of probabilities for each token
counts = [0] * model.tokenizer.get_vocab_size()
sum_probs = [0.0] * model.tokenizer.get_vocab_size()
# loop through each method
for mthd in df.code.values[:n]:
# token the method and generate the probabilities for the model's predictions
inputs = model.tokenize(mthd)
probs = model.get_probs(inputs)[0].numpy()
# loop through each token and its probability and update the container lists
for idx, p in zip(inputs["input_ids"][0], probs):
counts[idx] += 1
sum_probs[idx] += p[idx]
# convert the lists to numpy lists and perform element wise division to get the mean probabilities for each token
counts = np.array(counts)
sum_probs = np.array(sum_probs)
# perform division, but not when denominator is zero. In those cases, just leave value as NAN.
nans = np.empty(counts.shape)
nans.fill(np.nan)
mean_probs = np.divide(sum_probs, counts, out=nans, where=counts != 0)
# TODO: convert to dictionary with keys as tokens
mean_probs = {
model.tokenizer.id_to_token(i): mean_probs[i] for i in range(len(mean_probs))
}
return mean_probs
# Cell
def get_mean_cross_entropy_df(df: pd.DataFrame, model: Model, bs = 16, n: Optional[int] = None):
"""
Get the mean cross entropy for a model on an entire pandas dataframe
:param df: the pandas dataframe containing each method to have the model predict on
:param model: the model used to generate the predictions
:param n: the number of methods to evaluate. If none, the entire dataframe will be used
:returns: returns the mean cross entropy of the models predictions compared to true labels
"""
if n is None:
n = len(df)
cross_entropy_losses = []
for i in tqdm(range(0, n, bs), desc="Cross Entropies", total = (n // bs) + 1):
batch = ["<sos>" + mthd for mthd in df.code.values[i:i + bs]]
# token the method and get the probabilities for each token from the model
inputs = np.stack([x.ids for x in model.tokenizer.encode_batch(batch)], axis = 0)
probs = model.get_probs(inputs)
# calculate the cross entropy between the labels and probabilities
losses = tf.keras.losses.sparse_categorical_crossentropy(
inputs, probs
).numpy()
cross_entropy_losses.extend(np.mean(losses, axis = 1))
new_df = pd.DataFrame(
zip(replace_spec_toks_to_original(df, java_special_tokens, n).code.values, df.code.values[:n], cross_entropy_losses),
columns=["original_code", "transformed_code", "y_cross_entropy"]
)
return new_df
# Cell
TYPES = 3
def _get_metrics(df, model):
error_taxonomy_df = get_error_rates_df(df, model, bs = 192)
# mean_cross_entropy_df = get_mean_cross_entropy_df(df, model, bs = 192)
return {
"error_taxonomy": error_taxonomy_df,
# "mean_cross_entropy": mean_cross_entropy_df,
}
def save_results(control_df, treatment_df, model, err_path, cross_path):
control_metrics = _get_metrics(control_df, model)
treatment_metrics = _get_metrics(treatment_df, model)
err_df = pd.concat(
[control_metrics["error_taxonomy"], treatment_metrics["error_taxonomy"]]
).sort_index().reset_index(drop=True)
err_df["x_treatment"] = [False, True] * len(control_metrics["error_taxonomy"])
err_df.to_json(err_path, orient="records", lines=True)
# cross_df = pd.concat(
# [control_metrics["mean_cross_entropy"], treatment_metrics["mean_cross_entropy"]]
# ).sort_index().reset_index(drop=True)
# cross_df["x_treatment"] = [False, True] * len(control_metrics["mean_cross_entropy"])
# cross_df.to_json(cross_path, orient="records", lines=True)
def _long_range(bigclone_path, bugfix_path, cmt_path, model, out_path, n=None):
out_path.mkdir(parents=True, exist_ok=True)
for i in range(1, TYPES + 1):
df = | pd.read_json(bigclone_path / f"bigclone-type-{i}.jsonl", orient="records", lines=True) | pandas.read_json |
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy.spatial import distance
from scipy.optimize import differential_evolution
class IntracellAnalysisV2:
# IA constants
FC_UPPER_VOLTAGE = 4.20
FC_LOWER_VOLTAGE = 2.70
NE_UPPER_VOLTAGE = 0.01
NE_LOWER_VOLTAGE = 1.50
PE_UPPER_VOLTAGE = 4.30
PE_LOWER_VOLTAGE = 2.86
THRESHOLD = 4.84 * 0.0
def __init__(self,
pe_pristine_file,
ne_pristine_file,
cycle_type='rpt_0.2C',
step_type=0,
error_type='V-Q',
ne_2pos_file=None,
ne_2neg_file=None
):
"""
Invokes the cell electrode analysis class. This is a class designed to fit the cell and electrode
parameters in order to determine changes of electrodes within the full cell from only full cell cycling data.
Args:
pe_pristine_file (str): file name for the half cell data of the pristine (uncycled) positive
electrode
ne_pristine_file (str): file name for the half cell data of the pristine (uncycled) negative
electrode
cycle_type (str): type of diagnostic cycle for the fitting
step_type (int): charge or discharge (0 for charge, 1 for discharge)
error_type (str): defines which error metric is to be used
ne_2neg_file (str): file name of the data for the negative component of the anode
ne_2pos_file (str): file name of the data for the positive component of the anode
"""
self.pe_pristine = pd.read_csv(pe_pristine_file, usecols=['SOC_aligned', 'Voltage_aligned'])
self.ne_1_pristine = pd.read_csv(ne_pristine_file, usecols=['SOC_aligned', 'Voltage_aligned'])
if ne_2neg_file and ne_2pos_file:
self.ne_2_pristine_pos = pd.read_csv(ne_2pos_file)
self.ne_2_pristine_neg = pd.read_csv(ne_2neg_file)
else:
self.ne_2_pristine_pos = pd.DataFrame()
self.ne_2_pristine_neg = pd.DataFrame()
if step_type == 0:
self.capacity_col = 'charge_capacity'
else:
self.capacity_col = 'discharge_capacity'
self.cycle_type = cycle_type
self.step_type = step_type
self.error_type = error_type
def process_beep_cycle_data_for_candidate_halfcell_analysis_ah(self,
cell_struct,
cycle_index):
"""
Ingests BEEP structured cycling data and cycle_index and returns
a Dataframe of evenly spaced capacity with corresponding voltage.
Inputs:
cell_struct (MaccorDatapath): BEEP structured cycling data
cycle_index (int): cycle number at which to evaluate
Outputs:
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned (evenly spaced)
and Voltage_aligned
"""
# filter the data down to the diagnostic type of interest
diag_type_cycles = cell_struct.diagnostic_data.loc[cell_struct.diagnostic_data['cycle_type'] == self.cycle_type]
real_cell_candidate_charge_profile = diag_type_cycles.loc[
(diag_type_cycles.cycle_index == cycle_index)
& (diag_type_cycles.step_type == 0) # step_type = 0 is charge, 1 is discharge
& (diag_type_cycles.voltage < self.FC_UPPER_VOLTAGE)
& (diag_type_cycles[self.capacity_col] > 0)][['voltage', 'charge_capacity']]
# renaming capacity,voltage column
real_cell_candidate_charge_profile['Q'] = real_cell_candidate_charge_profile['charge_capacity']
real_cell_candidate_charge_profile['Voltage'] = real_cell_candidate_charge_profile['voltage']
real_cell_candidate_charge_profile.drop('voltage', axis=1, inplace=True)
# interpolate voltage along evenly spaced capacity axis
q_vec = np.linspace(0, np.max(real_cell_candidate_charge_profile['Q']), 1001)
real_cell_candidate_charge_profile_aligned = pd.DataFrame()
real_cell_candidate_charge_profile_interper = interp1d(real_cell_candidate_charge_profile['Q'],
real_cell_candidate_charge_profile['Voltage'],
bounds_error=False,
fill_value=(
self.FC_LOWER_VOLTAGE, self.FC_UPPER_VOLTAGE))
real_cell_candidate_charge_profile_aligned['Voltage_aligned'] = real_cell_candidate_charge_profile_interper(
q_vec)
real_cell_candidate_charge_profile_aligned['Q_aligned'] = q_vec
return real_cell_candidate_charge_profile_aligned
def _impose_electrode_scale(self,
pe_pristine=pd.DataFrame(),
ne_1_pristine=pd.DataFrame(),
ne_2_pristine_pos=pd.DataFrame(),
ne_2_pristine_neg=pd.DataFrame(),
lli=0.0, q_pe=0.0, q_ne=0.0, x_ne_2=0.0):
"""
Scales the reference electrodes according to specified capacities and
offsets their capacities according to lli. Blends negative electrode materials.
Inputs:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
lli (float): Loss of Lithium Inventory - capacity of the misalignment between
cathode and anode zero-capacity
q_pe (float): capacity of the positive electrode (cathode)
q_ne (float): capacity of the negative electrode (anode)
x_ne_2 (float): fraction of ne_2_pristine_pos or ne_2_pristine_neg
(positive or negative value, respectively) to ne_1_pristine
Outputs:
pe_degraded (Dataframe): positive electrode with imposed capacity
scale to emulate degradation
ne_degraded (Dataframe): negative electrode with imposed capacity
scale and capacity offset to emulate degradation
"""
# Blend negative electrodes
ne_pristine = blend_electrodes(ne_1_pristine, ne_2_pristine_pos, ne_2_pristine_neg, x_ne_2)
# rescaling pristine electrodes to q_pe and q_ne
pe_q_scaled = pe_pristine.copy()
pe_q_scaled['Q_aligned'] = (pe_q_scaled['SOC_aligned'] / 100) * q_pe
ne_q_scaled = ne_pristine.copy()
ne_q_scaled['Q_aligned'] = (ne_q_scaled['SOC_aligned'] / 100) * q_ne
# translate pristine ne electrode with lli
ne_q_scaled['Q_aligned'] = ne_q_scaled['Q_aligned'] + lli
# Re-interpolate to align dataframes for differencing
lower_q = np.min((np.min(pe_q_scaled['Q_aligned']),
np.min(ne_q_scaled['Q_aligned'])))
upper_q = np.max((np.max(pe_q_scaled['Q_aligned']),
np.max(ne_q_scaled['Q_aligned'])))
q_vec = np.linspace(lower_q, upper_q, 1001)
# Actually aligning the electrode Q's
pe_pristine_interper = interp1d(pe_q_scaled['Q_aligned'],
pe_q_scaled['Voltage_aligned'], bounds_error=False)
pe_degraded = pe_q_scaled.copy()
pe_degraded['Q_aligned'] = q_vec
pe_degraded['Voltage_aligned'] = pe_pristine_interper(q_vec)
ne_pristine_interper = interp1d(ne_q_scaled['Q_aligned'],
ne_q_scaled['Voltage_aligned'], bounds_error=False)
ne_degraded = ne_q_scaled.copy()
ne_degraded['Q_aligned'] = q_vec
ne_degraded['Voltage_aligned'] = ne_pristine_interper(q_vec)
# Returning pe and ne degraded on an Ah basis
return pe_degraded, ne_degraded
def halfcell_degradation_matching_ah(self, x, *params):
"""
Calls underlying functions to impose degradation through electrode
capacity scale and alignment through LLI. Modifies emulated full cell
data to be within full cell voltage range and calibrates (zeros) capacity
at the lowest permissible voltage. Interpolates real and emulated data onto
a common capacity axis.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
df_real_aligned (Dataframe): capacity and voltage interpolated evenly across
capacity for the real cell data
emulated_full_cell_aligned (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
lli = x[0]
q_pe = x[1]
q_ne = x[2]
x_ne_2 = x[3]
(pe_pristine,
ne_1_pristine,
ne_2_pristine_pos,
ne_2_pristine_neg,
real_cell_candidate_charge_profile_aligned) = params
# output degraded ne and pe (on a AH basis, with electrode alignment
# (NaNs for voltage, when no capacity actually at the corresponding capacity index))
pe_out, ne_out = self._impose_electrode_scale(pe_pristine, ne_1_pristine,
ne_2_pristine_pos, ne_2_pristine_neg,
lli, q_pe,
q_ne, x_ne_2)
# PE - NE = full cell voltage
emulated_full_cell_with_degradation = pd.DataFrame()
emulated_full_cell_with_degradation['Q_aligned'] = pe_out['Q_aligned'].copy()
emulated_full_cell_with_degradation['Voltage_aligned'] = pe_out['Voltage_aligned'] - ne_out['Voltage_aligned']
# Replace emulated full cell values outside of voltage range with NaN
emulated_full_cell_with_degradation['Voltage_aligned'].loc[
emulated_full_cell_with_degradation['Voltage_aligned'] < self.FC_LOWER_VOLTAGE] = np.nan
emulated_full_cell_with_degradation['Voltage_aligned'].loc[
emulated_full_cell_with_degradation['Voltage_aligned'] > self.FC_UPPER_VOLTAGE] = np.nan
# Center the emulated full cell and half cell curves onto the same Q at which the real (degraded)
# capacity measurement started (self.FC_LOWER_VOLTAGE)
emulated_full_cell_with_degradation_zeroed = pd.DataFrame()
emulated_full_cell_with_degradation_zeroed['Voltage_aligned'] = emulated_full_cell_with_degradation[
'Voltage_aligned'].copy()
zeroing_value = emulated_full_cell_with_degradation['Q_aligned'].loc[
np.nanargmin(emulated_full_cell_with_degradation['Voltage_aligned'])
]
emulated_full_cell_with_degradation_zeroed['Q_aligned'] = \
(emulated_full_cell_with_degradation['Q_aligned'].copy() - zeroing_value)
pe_out_zeroed = pe_out.copy()
pe_out_zeroed['Q_aligned'] = pe_out['Q_aligned'] - zeroing_value
ne_out_zeroed = ne_out.copy()
ne_out_zeroed['Q_aligned'] = ne_out['Q_aligned'] - zeroing_value
# Interpolate full cell profiles across same Q range
min_q = np.min(
real_cell_candidate_charge_profile_aligned['Q_aligned'].loc[
~real_cell_candidate_charge_profile_aligned['Voltage_aligned'].isna()])
max_q = np.max(
real_cell_candidate_charge_profile_aligned['Q_aligned'].loc[
~real_cell_candidate_charge_profile_aligned['Voltage_aligned'].isna()])
emulated_interper = interp1d(emulated_full_cell_with_degradation_zeroed['Q_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()],
emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()],
bounds_error=False)
real_interper = interp1d(
real_cell_candidate_charge_profile_aligned['Q_aligned'].loc[
~real_cell_candidate_charge_profile_aligned['Voltage_aligned'].isna()],
real_cell_candidate_charge_profile_aligned['Voltage_aligned'].loc[
~real_cell_candidate_charge_profile_aligned['Voltage_aligned'].isna()],
bounds_error=False)
q_vec = np.linspace(min_q, max_q, 1001)
emulated_aligned = pd.DataFrame()
emulated_aligned['Q_aligned'] = q_vec
emulated_aligned['Voltage_aligned'] = emulated_interper(q_vec)
real_aligned = pd.DataFrame()
real_aligned['Q_aligned'] = q_vec
real_aligned['Voltage_aligned'] = real_interper(q_vec)
return pe_out_zeroed, ne_out_zeroed, real_aligned, emulated_aligned
def get_dqdv_over_v_from_degradation_matching_ah(self, x, *params):
"""
This function imposes degradation scaling ,then outputs the dqdv representation of the emulated cell data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
dq_dv_over_v_real (Dataframe): dqdv across voltage for the real cell data
dq_dv_over_v_emulated (Dataframe): dqdv across voltage for the emulated cell data
df_real_interped (Dataframe): capacity and voltage interpolated evenly across
capacity for the real cell data
emulated_full_cell_interped (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
pe_out_zeroed, ne_out_zeroed, df_real_interped, emulated_full_cell_interped = \
self.halfcell_degradation_matching_ah(x, *params)
# Calculate dqdv from full cell profiles
dq_dv_real = pd.DataFrame(np.gradient(df_real_interped['Q_aligned'], df_real_interped['Voltage_aligned']),
columns=['dQdV']).ewm(0.1).mean()
dq_dv_emulated = pd.DataFrame(
np.gradient(emulated_full_cell_interped['Q_aligned'], emulated_full_cell_interped['Voltage_aligned']),
columns=['dQdV']).ewm(0.1).mean()
# Include original data
dq_dv_real['Q_aligned'] = df_real_interped['Q_aligned']
dq_dv_real['Voltage_aligned'] = df_real_interped['Voltage_aligned']
dq_dv_emulated['Q_aligned'] = emulated_full_cell_interped['Q_aligned']
dq_dv_emulated['Voltage_aligned'] = emulated_full_cell_interped['Voltage_aligned']
# Interpolate dQdV and Q over V, aligns real and emulated over V
voltage_vec = np.linspace(self.FC_LOWER_VOLTAGE, self.FC_UPPER_VOLTAGE, 1001)
v_dq_dv_interper_real = interp1d(dq_dv_real['Voltage_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
dq_dv_real['dQdV'].loc[~dq_dv_real['Voltage_aligned'].isna()],
bounds_error=False, fill_value=0)
v_q_interper_real = interp1d(dq_dv_real['Voltage_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
dq_dv_real['Q_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
bounds_error=False, fill_value=(0, np.max(df_real_interped['Q_aligned'])))
v_dq_dv_interper_emulated = interp1d(dq_dv_emulated['Voltage_aligned'].loc[
~dq_dv_emulated['Voltage_aligned'].isna()],
dq_dv_emulated['dQdV'].loc[~dq_dv_emulated['Voltage_aligned'].isna()],
bounds_error=False, fill_value=0)
v_q_interper_emulated = interp1d(dq_dv_emulated['Voltage_aligned'].loc[
~dq_dv_emulated['Voltage_aligned'].isna()],
dq_dv_emulated['Q_aligned'].loc[~dq_dv_emulated['Voltage_aligned'].isna()],
bounds_error=False, fill_value=(0, np.max(df_real_interped['Q_aligned'])))
dq_dv_over_v_real = pd.DataFrame(v_dq_dv_interper_real(voltage_vec), columns=['dQdV']).fillna(0)
dq_dv_over_v_real['Q_aligned'] = v_q_interper_real(voltage_vec)
dq_dv_over_v_real['Voltage_aligned'] = voltage_vec
dq_dv_over_v_emulated = pd.DataFrame(v_dq_dv_interper_emulated(voltage_vec), columns=['dQdV']).fillna(0)
dq_dv_over_v_emulated['Q_aligned'] = v_q_interper_emulated(voltage_vec)
dq_dv_over_v_emulated['Voltage_aligned'] = voltage_vec
return (pe_out_zeroed,
ne_out_zeroed,
dq_dv_over_v_real,
dq_dv_over_v_emulated,
df_real_interped,
emulated_full_cell_interped)
def get_dvdq_over_q_from_degradation_matching_ah(self, x, *params):
"""
This function imposes degradation scaling ,then outputs the dVdQ representation of the emulated cell data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
dv_dq_real (Dataframe): dVdQ across capacity for the real cell data
dv_dq_emulated (Dataframe): dVdQ across capacity for the emulated cell data
df_real_interped (Dataframe): capacity and voltage interpolated evenly across
capacity for the real cell data
emulated_full_cell_interped (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
pe_out_zeroed, ne_out_zeroed, df_real_interped, emulated_full_cell_interped = \
self.halfcell_degradation_matching_ah(x, *params)
# Calculate dQdV from full cell profiles
dv_dq_real = pd.DataFrame(np.gradient(df_real_interped['Voltage_aligned'], df_real_interped['Q_aligned']),
columns=['dVdQ']).ewm(0.1).mean()
dv_dq_emulated = pd.DataFrame(
np.gradient(emulated_full_cell_interped['Voltage_aligned'], emulated_full_cell_interped['Q_aligned']),
columns=['dVdQ']).ewm(0.1).mean()
# Include original data
dv_dq_real['Q_aligned'] = df_real_interped['Q_aligned']
dv_dq_real['Voltage_aligned'] = df_real_interped['Voltage_aligned']
dv_dq_emulated['Q_aligned'] = emulated_full_cell_interped['Q_aligned']
dv_dq_emulated['Voltage_aligned'] = emulated_full_cell_interped['Voltage_aligned']
# Q interpolation not needed, as interpolated over Q by default
return (pe_out_zeroed,
ne_out_zeroed,
dv_dq_real,
dv_dq_emulated,
df_real_interped,
emulated_full_cell_interped)
def get_v_over_q_from_degradation_matching_ah(self, x, *params):
"""
This function imposes degradation scaling ,then outputs the V-Q representation of the emulated cell data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
df_real_interped (Dataframe): capacity and voltage interpolated evenly across
capacity for the real cell data
emulated_full_cell_interped (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
(pe_out_zeroed, ne_out_zeroed, real_aligned, emulated_aligned) = \
self.halfcell_degradation_matching_ah(x, *params)
min_soc_full_cell = np.min(real_aligned.loc[~real_aligned.Voltage_aligned.isna()].Q_aligned)
max_soc_full_cell = np.max(real_aligned.loc[~real_aligned.Voltage_aligned.isna()].Q_aligned)
soc_vec_full_cell = np.linspace(min_soc_full_cell, max_soc_full_cell, 1001)
emulated_full_cell_interper = interp1d(
emulated_aligned.Q_aligned.loc[~real_aligned.Voltage_aligned.isna()],
emulated_aligned.Voltage_aligned.loc[~real_aligned.Voltage_aligned.isna()],
bounds_error=False)
real_full_cell_interper = interp1d(real_aligned.Q_aligned.loc[~real_aligned.Voltage_aligned.isna()],
real_aligned.Voltage_aligned.loc[~real_aligned.Voltage_aligned.isna()],
bounds_error=False)
# Interpolate the emulated full-cell profile
emulated_full_cell_interped = pd.DataFrame()
emulated_full_cell_interped['Q_aligned'] = soc_vec_full_cell
emulated_full_cell_interped['Voltage_aligned'] = emulated_full_cell_interper(soc_vec_full_cell)
# Interpolate the true full-cell profile
df_real_interped = emulated_full_cell_interped.copy()
df_real_interped['Q_aligned'] = soc_vec_full_cell
df_real_interped['Voltage_aligned'] = real_full_cell_interper(soc_vec_full_cell)
return pe_out_zeroed, ne_out_zeroed, df_real_interped, emulated_full_cell_interped
def get_v_over_q_from_degradation_matching_ah_no_real(self, x, *params):
"""
This function imposes degradation scaling ,then outputs the V-Q representation of the
emulated cell data, in the absence of real cell data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
emulated_full_cell_interped (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
(pe_out_zeroed, ne_out_zeroed, emulated_aligned) = \
self.halfcell_degradation_matching_ah_no_real(x, *params)
min_q_full_cell = np.min(emulated_aligned.loc[~emulated_aligned.Voltage_aligned.isna()].Q_aligned)
max_q_full_cell = np.max(emulated_aligned.loc[~emulated_aligned.Voltage_aligned.isna()].Q_aligned)
q_vec_full_cell = np.linspace(min_q_full_cell, max_q_full_cell, 1001)
emulated_full_cell_interper = interp1d(
emulated_aligned.Q_aligned.loc[~emulated_aligned.Voltage_aligned.isna()],
emulated_aligned.Voltage_aligned.loc[~emulated_aligned.Voltage_aligned.isna()],
bounds_error=False)
# Interpolate the emulated full-cell profile
emulated_full_cell_interped = pd.DataFrame()
emulated_full_cell_interped['Q_aligned'] = q_vec_full_cell
emulated_full_cell_interped['Voltage_aligned'] = emulated_full_cell_interper(q_vec_full_cell)
return pe_out_zeroed, ne_out_zeroed, emulated_full_cell_interped
def halfcell_degradation_matching_ah_no_real(self, x, *params):
"""
Calls underlying functions to impose degradation through electrode
capacity scale and alignment through LLI. Modifies emulated full cell
data to be within full cell voltage range and calibrates (zeros) capacity
at the lowest permissible voltage.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
emulated_aligned (Dataframe): full cell data corresponding to the imposed degradation
"""
lli = x[0]
q_pe = x[1]
q_ne = x[2]
x_ne_2 = x[3]
pe_pristine, ne_1_pristine, ne_2_pristine_pos, ne_2_pristine_neg = params
pe_out, ne_out = self._impose_electrode_scale(pe_pristine, ne_1_pristine,
ne_2_pristine_pos, ne_2_pristine_neg,
lli, q_pe,
q_ne,
x_ne_2)
# outputs degraded ne and pe (on a AH basis, with electrode alignment (NaNs for voltage, when no overlap))
emulated_full_cell_with_degradation = pd.DataFrame()
emulated_full_cell_with_degradation['Q_aligned'] = pe_out['Q_aligned'].copy()
emulated_full_cell_with_degradation['Voltage_aligned'] = pe_out['Voltage_aligned'] - ne_out['Voltage_aligned']
# Replace emulated full cell values outside of voltage range with NaN
emulated_full_cell_with_degradation['Voltage_aligned'].loc[
emulated_full_cell_with_degradation['Voltage_aligned'] < self.FC_LOWER_VOLTAGE] = np.nan
emulated_full_cell_with_degradation['Voltage_aligned'].loc[
emulated_full_cell_with_degradation['Voltage_aligned'] > self.FC_UPPER_VOLTAGE] = np.nan
# Center the emulated full cell and half cell curves onto the same Q at which the real (degraded)
# capacity measurement started (self.FC_LOWER_VOLTAGE)
emulated_full_cell_with_degradation_zeroed = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2019 Elasticsearch BV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# File called _pytest for PyCharm compatability
import pandas as pd
import pytest
from eland.dataframe import DEFAULT_NUM_ROWS_DISPLAYED
from eland.tests.common import TestData, assert_pandas_eland_series_equal
class TestDataFrameRepr(TestData):
@classmethod
def setup_class(cls):
# conftest.py changes this default - restore to original setting
pd.set_option("display.max_rows", 60)
"""
to_string
"""
def test_simple_lat_lon(self):
"""
Note on nested object order - this can change when
note this could be a bug in ES...
PUT my_index/doc/1
{
"location": {
"lat": "50.033333",
"lon": "8.570556"
}
}
GET my_index/_search
"_source": {
"location": {
"lat": "50.033333",
"lon": "8.570556"
}
}
GET my_index/_search
{
"_source": "location"
}
"_source": {
"location": {
"lon": "8.570556",
"lat": "50.033333"
}
}
Hence we store the pandas df source json as 'lon', 'lat'
"""
pd_dest_location = self.pd_flights()["DestLocation"].head(1)
ed_dest_location = self.ed_flights()["DestLocation"].head(1)
assert_pandas_eland_series_equal(pd_dest_location, ed_dest_location)
def test_num_rows_to_string(self):
# check setup works
assert pd.get_option("display.max_rows") == 60
# Test eland.DataFrame.to_string vs pandas.DataFrame.to_string
# In pandas calling 'to_string' without max_rows set, will dump ALL rows
# Test n-1, n, n+1 for edge cases
self.num_rows_to_string(DEFAULT_NUM_ROWS_DISPLAYED - 1)
self.num_rows_to_string(DEFAULT_NUM_ROWS_DISPLAYED)
with pytest.warns(UserWarning):
# UserWarning displayed by eland here (compare to pandas with max_rows set)
self.num_rows_to_string(
DEFAULT_NUM_ROWS_DISPLAYED + 1, None, DEFAULT_NUM_ROWS_DISPLAYED
)
# Test for where max_rows lt or gt num_rows
self.num_rows_to_string(10, 5, 5)
self.num_rows_to_string(100, 200, 200)
def num_rows_to_string(self, rows, max_rows_eland=None, max_rows_pandas=None):
ed_flights = self.ed_flights()[["DestLocation", "OriginLocation"]]
pd_flights = self.pd_flights()[["DestLocation", "OriginLocation"]]
ed_head = ed_flights.head(rows)
pd_head = pd_flights.head(rows)
ed_head_str = ed_head.to_string(max_rows=max_rows_eland)
pd_head_str = pd_head.to_string(max_rows=max_rows_pandas)
# print("\n", ed_head_str)
# print("\n", pd_head_str)
assert pd_head_str == ed_head_str
def test_empty_dataframe_string(self):
ed_ecom = self.ed_ecommerce()
pd_ecom = self.pd_ecommerce()
ed_ecom_s = ed_ecom[ed_ecom["currency"] == "USD"].to_string()
pd_ecom_s = pd_ecom[pd_ecom["currency"] == "USD"].to_string()
assert ed_ecom_s == pd_ecom_s
"""
repr
"""
def test_num_rows_repr(self):
self.num_rows_repr(
pd.get_option("display.max_rows") - 1, pd.get_option("display.max_rows") - 1
)
self.num_rows_repr(
pd.get_option("display.max_rows"), pd.get_option("display.max_rows")
)
self.num_rows_repr(
pd.get_option("display.max_rows") + 1, pd.get_option("display.min_rows")
)
def num_rows_repr(self, rows, num_rows_printed):
ed_flights = self.ed_flights()
pd_flights = self.pd_flights()
ed_head = ed_flights.head(rows)
pd_head = pd_flights.head(rows)
ed_head_str = repr(ed_head)
pd_head_str = repr(pd_head)
if num_rows_printed < rows:
# add 1 for ellipsis
num_rows_printed = num_rows_printed + 1
# number of rows is num_rows_printed + 3 (header, summary)
assert (num_rows_printed + 3) == len(ed_head_str.splitlines())
assert pd_head_str == ed_head_str
def test_empty_dataframe_repr(self):
ed_ecom = self.ed_ecommerce()
pd_ecom = self.pd_ecommerce()
ed_ecom_r = repr(ed_ecom[ed_ecom["currency"] == "USD"])
pd_ecom_r = repr(pd_ecom[pd_ecom["currency"] == "USD"])
assert ed_ecom_r == pd_ecom_r
"""
to_html
"""
def test_num_rows_to_html(self):
# check setup works
assert pd.get_option("display.max_rows") == 60
# Test eland.DataFrame.to_string vs pandas.DataFrame.to_string
# In pandas calling 'to_string' without max_rows set, will dump ALL rows
# Test n-1, n, n+1 for edge cases
self.num_rows_to_html(DEFAULT_NUM_ROWS_DISPLAYED - 1)
self.num_rows_to_html(DEFAULT_NUM_ROWS_DISPLAYED)
with pytest.warns(UserWarning):
# UserWarning displayed by eland here
self.num_rows_to_html(
DEFAULT_NUM_ROWS_DISPLAYED + 1, None, DEFAULT_NUM_ROWS_DISPLAYED
)
# Test for where max_rows lt or gt num_rows
self.num_rows_to_html(10, 5, 5)
self.num_rows_to_html(100, 200, 200)
def num_rows_to_html(self, rows, max_rows_eland=None, max_rows_pandas=None):
ed_flights = self.ed_flights()
pd_flights = self.pd_flights()
ed_head = ed_flights.head(rows)
pd_head = pd_flights.head(rows)
ed_head_str = ed_head.to_html(max_rows=max_rows_eland)
pd_head_str = pd_head.to_html(max_rows=max_rows_pandas)
# print(ed_head_str)
# print(pd_head_str)
assert pd_head_str == ed_head_str
def test_empty_dataframe_to_html(self):
ed_ecom = self.ed_ecommerce()
pd_ecom = self.pd_ecommerce()
ed_ecom_h = ed_ecom[ed_ecom["currency"] == "USD"].to_html()
pd_ecom_h = pd_ecom[pd_ecom["currency"] == "USD"].to_html()
assert ed_ecom_h == pd_ecom_h
"""
_repr_html_
"""
def test_num_rows_repr_html(self):
# check setup works
assert pd.get_option("display.max_rows") == 60
show_dimensions = pd.get_option("display.show_dimensions")
# TODO - there is a bug in 'show_dimensions' as it gets added after the last </div>
# For now test without this
pd.set_option("display.show_dimensions", False)
# Test eland.DataFrame.to_string vs pandas.DataFrame.to_string
# In pandas calling 'to_string' without max_rows set, will dump ALL rows
# Test n-1, n, n+1 for edge cases
self.num_rows_repr_html(pd.get_option("display.max_rows") - 1)
self.num_rows_repr_html(pd.get_option("display.max_rows"))
self.num_rows_repr_html(
| pd.get_option("display.max_rows") | pandas.get_option |
import os
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import matplotlib.dates as mdates
from datetime import date, timedelta, datetime
import seaborn as sns
import geopandas as gpd
from shapely.geometry import mapping, Point, Polygon
mpl.rcParams['pdf.fonttype'] = 42
fsm_path = '/Volumes/fsmresfiles/PrevMed/Covid-19-Modeling'
idph_data_path = os.path.join(fsm_path, 'IDPH line list',)
ltcf_data_path = os.path.join(fsm_path, 'IDPH LTCF')
cleaned_line_list_fname = os.path.join(idph_data_path,
'LL_200701_JGcleaned_no_race.csv')
ltcf_fname = os.path.join(ltcf_data_path, 'Modelors LTC Report_200703.xlsx')
cleaned_ltcf_fname = os.path.join(ltcf_data_path, 'Modelors LTC Report_200703_first_specimen.csv')
box_data_path = '/Users/jlg1657/Box/NU-malaria-team/data/covid_IDPH'
project_path = '/Users/jlg1657/Box/NU-malaria-team/projects/covid_chicago'
plot_path = os.path.join(project_path, 'Plots + Graphs')
shp_path = os.path.join(box_data_path, 'shapefiles')
def load_daily_ll_deaths() :
df = pd.read_csv(cleaned_line_list_fname)
df = df.dropna(subset=['deceased_date'])
df = df.groupby('deceased_date')['id'].agg(len).reset_index()
df = df.rename(columns={'id' : 'daily_deaths_line_list',
'deceased_date' : 'Deceased Date'})
df['Deceased Date'] = pd.to_datetime(df['Deceased Date'])
return df
def clean_ltcf() :
df = pd.read_excel(ltcf_fname, skiprows=3)
gdf = df.groupby('State Case Number')['Outbreak ID'].agg(len).reset_index()
gdf = gdf.rename(columns={'Outbreak ID': 'num'})
gdf = gdf[gdf['num'] > 1]
adf = pd.DataFrame()
for d, ddf in df.groupby('State Case Number') :
sdf = ddf.head(1)
adf = pd.concat([adf, sdf])
adf.to_csv(cleaned_ltcf_fname, index=False)
def load_daily_ltcf_deaths() :
df = pd.read_csv(cleaned_ltcf_fname)
df = df.dropna(subset=['Deceased Date'])
df = df.groupby('Deceased Date')['State Case Number'].agg(len).reset_index()
df = df.rename(columns={'State Case Number' : 'daily_deaths_LTCF'})
df['Deceased Date'] = pd.to_datetime(df['Deceased Date'])
return df
def merge_LL_LTCF_deaths() :
ll_df = load_daily_ll_deaths()
ltcf_df = load_daily_ltcf_deaths()
df = | pd.merge(left=ll_df, right=ltcf_df, on='Deceased Date', how='outer') | pandas.merge |
import torch, os
import numpy as np
from MiniImagenet_memorization import MiniImagenet as MiniImagenet_fix
from torch.utils.data import DataLoader
import random, argparse
from meta import Meta_mini
from utils import get_config, save_model, name_path, load_model
import time
import pandas as pd
# get arguments
parser = argparse.ArgumentParser()
parser.add_argument('--n', type=int)
parser.add_argument('--s', type=int)
parser.add_argument('--q', type=int)
parser.add_argument('--zero', type=int)
parser.add_argument('--zero_interval', type=int, default=1)
parser.add_argument('--order', type=str)
parser.add_argument('--inivar', type=float)
parser.add_argument('--seed', type=int)
parser.add_argument('--device', type=int)
parser.add_argument('--epoch', type=int, default=12)
parser.add_argument('--resume_epoch', type=int, default=-1)
args = parser.parse_args()
n_way = args.n
k_shot = args.s
k_qry = args.q
if args.zero == 1:
apply_zero_trick = True
elif args.zero == 0:
apply_zero_trick = False
maml_order = args.order
init_var = args.inivar
seed = args.seed
device = torch.device('cuda:{}'.format(args.device))
num_epoch = args.epoch
if args.s == 1: task_num, batchsz = 4, 10000
if args.s == 5: task_num, batchsz = 2, 5000
outer_lr, inner_lr = 0.001, 0.01
train_update_steps, test_update_steps = 5, 10
# Set seeds
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
# Directories
root = "./results_memo/" # The directory for saving files
data_root = "../data_miniImagenet/" # The directory for data
save_path = name_path(root, n_way, k_shot, k_qry,
maml_order, args.zero, args.zero_interval, init_var,
seed)
print("Save the results at: ", save_path)
mini = MiniImagenet_fix(data_root, mode='train', n_way=n_way, k_shot=k_shot, k_query=k_qry, batchsz=batchsz, resize=84)
mini_test = MiniImagenet_fix(data_root, mode='test', n_way=n_way, k_shot=k_shot, k_query=k_qry, batchsz=400, resize=84)
maml = Meta_mini(n_way, k_shot, k_qry, task_num,
train_update_steps, test_update_steps,
inner_lr, outer_lr, get_config(n_way), device).to(device)
# Set initial norm
maml.set_last_layer_variance(init_var)
if init_var == 0:
maml.set_last_layer_to_zero()
test_text = []
train_text = []
start = time.time()
count_steps = 0
test_zero_text = []
test_text = []
for epoch in range(num_epoch):
db = DataLoader(mini, task_num, shuffle=True, num_workers=8, pin_memory=True)
# Set resume training
if args.resume_epoch>epoch:
continue
elif args.resume_epoch==epoch:
maml.load_model(save_path, epoch)
for step, (x_spt, y_spt, x_qry, y_qry) in enumerate(db):
x_spt, y_spt, x_qry, y_qry = x_spt.to(device), y_spt.to(device), x_qry.to(device), y_qry.to(device)
# Apply zero trick or not
if apply_zero_trick and (count_steps%args.zero_interval==0):
maml.set_last_layer_to_zero()
# Choose first order or second order MAML for training
if maml_order == "first":
accs = maml.forward_FOMAML(x_spt, y_spt, x_qry, y_qry)
elif maml_order == "second":
accs = maml.forward_SOMAML(x_spt, y_spt, x_qry, y_qry)
# Finetuning
if step % 200 == 0:
train_text.append(accs)
print(time.time()-start)
db_test = DataLoader(mini_test, 1, shuffle=True, num_workers=8, pin_memory=True)
accs_all_test = []
accs_all_test_zero = []
for x_spt, y_spt, x_qry, y_qry in db_test:
x_spt, y_spt, x_qry, y_qry = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device)
# Original finetune method
accs = maml.finetunning(x_spt, y_spt, x_qry, y_qry)
accs_all_test.append(accs)
# finetune with applying zeroing trick
accs = maml.finetunning_zero(x_spt, y_spt, x_qry, y_qry)
accs_all_test_zero.append(accs)
accs = np.array(accs_all_test).mean(axis=0).astype(np.float16)
test_text.append(accs)
accs = np.array(accs_all_test_zero).mean(axis=0).astype(np.float16)
test_zero_text.append(accs)
print(time.time()-start)
if (count_steps) % 200 == 0:
maml.save_model(save_path, epoch, step)
txt_path = os.path.join(save_path, "test_E{}S{}.csv".format(epoch, step))
df = pd.DataFrame(test_text)
df.to_csv(txt_path,index=False)
txt_path = os.path.join(save_path, "test_zero_E{}S{}.csv".format(epoch, step))
df = | pd.DataFrame(test_zero_text) | pandas.DataFrame |
import pandas as pd
import random
from collections import deque
from .Broker import Broker
from .Order import Order
class BacktestBroker(Broker):
def __init__(self, balance, maxLeverage=1, interest=0, commission=0.001,
liveTrading=False, symbol='BTC'):
self._balance = balance
self._cashBalance = balance
self._borrowed = 0
self._borrowedShares = 0
self.maxLeverage = maxLeverage
self.interest = interest
self.commission = commission
self.liveTrading = liveTrading
self.symbol = symbol
self.price = None
self.transactions = pd.DataFrame(columns=[
'amount',
'price',
'symbol',
], index=pd.to_datetime([]))
self.book = pd.DataFrame(columns=[
'balance',
'cashBalance',
'borrowed',
'borrowedShares',
symbol,
'returns',
], index= | pd.to_datetime([]) | pandas.to_datetime |
"""
Estimators for systems of equations
References
----------
Greene, <NAME>. "Econometric analysis 4th edition." International edition,
New Jersey: Prentice Hall (2000).
StataCorp. 2013. Stata 13 Base Reference Manual. College Station, TX: Stata
Press.
<NAME>., & <NAME>. (2007). systemfit: A Package for Estimating
Systems of Simultaneous Equations in R. Journal of Statistical Software,
23(4), 1 - 40. doi:http://dx.doi.org/10.18637/jss.v023.i04
"""
from __future__ import annotations
from collections import abc
from functools import reduce
import textwrap
from typing import Dict, List, Mapping, Optional, Sequence, Tuple, Union, cast
import warnings
import numpy as np
from numpy.linalg import inv, lstsq, matrix_rank, solve
from pandas import DataFrame, Index, Series, concat
from linearmodels.iv._utility import IVFormulaParser
from linearmodels.iv.data import IVData
from linearmodels.shared.exceptions import missing_warning
from linearmodels.shared.hypotheses import InvalidTestStatistic, WaldTestStatistic
from linearmodels.shared.linalg import has_constant
from linearmodels.shared.utility import AttrDict
from linearmodels.system._utility import (
LinearConstraint,
blocked_column_product,
blocked_cross_prod,
blocked_diag_product,
blocked_inner_prod,
inv_matrix_sqrt,
)
from linearmodels.system.covariance import (
ClusteredCovariance,
GMMHeteroskedasticCovariance,
GMMHomoskedasticCovariance,
GMMKernelCovariance,
HeteroskedasticCovariance,
HomoskedasticCovariance,
KernelCovariance,
)
from linearmodels.system.gmm import (
HeteroskedasticWeightMatrix,
HomoskedasticWeightMatrix,
KernelWeightMatrix,
)
from linearmodels.system.results import GMMSystemResults, SystemResults
from linearmodels.typing import (
ArrayLike,
ArraySequence,
Float64Array,
Literal,
OptionalArrayLike,
)
__all__ = ["SUR", "IV3SLS", "IVSystemGMM", "LinearConstraint"]
UNKNOWN_EQ_TYPE = """
Contents of each equation must be either a dictionary with keys 'dependent'
and 'exog' or a 2-element tuple of he form (dependent, exog).
equations[{key}] was {type}
"""
COV_TYPES = {
"unadjusted": "unadjusted",
"homoskedastic": "unadjusted",
"robust": "robust",
"heteroskedastic": "robust",
"kernel": "kernel",
"hac": "kernel",
"clustered": "clustered",
}
COV_EST = {
"unadjusted": HomoskedasticCovariance,
"robust": HeteroskedasticCovariance,
"kernel": KernelCovariance,
"clustered": ClusteredCovariance,
}
GMM_W_EST = {
"unadjusted": HomoskedasticWeightMatrix,
"robust": HeteroskedasticWeightMatrix,
"kernel": KernelWeightMatrix,
}
GMM_COV_EST = {
"unadjusted": GMMHomoskedasticCovariance,
"robust": GMMHeteroskedasticCovariance,
"kernel": GMMKernelCovariance,
}
def _missing_weights(weights: Dict[str, Optional[ArrayLike]]) -> None:
"""Raise warning if missing weighs found"""
missing = [key for key in weights if weights[key] is None]
if missing:
msg = "Weights not found for equation labels:\n{0}".format(", ".join(missing))
warnings.warn(msg, UserWarning)
def _parameters_from_xprod(
xpx: Float64Array, xpy: Float64Array, constraints: Optional[LinearConstraint] = None
) -> Float64Array:
r"""
Estimate regression parameters from cross produces
Parameters
----------
xpx : ndarray
Cross product measuring variation in x (nvar by nvar)
xpy : ndarray
Cross produce measuring covariation between x and y (nvar by 1)
constraints : LinearConstraint
Constraints to use in estimation
Returns
-------
params : ndarray
Estimated parameters (nvar by 1)
Notes
-----
xpx and xpy can be any form similar to the two inputs into the usual
parameter estimator for a linear regression. In particular, many
estimators can be written as
.. math::
(x^\prime w x)^{-1}(x^\prime w y)
for some weight matrix :math:`w`.
"""
if constraints is not None:
cons = constraints
xpy = cons.t.T @ xpy - cons.t.T @ xpx @ cons.a.T
xpx = cons.t.T @ xpx @ cons.t
params_c = solve(xpx, xpy)
params = cons.t @ params_c + cons.a.T
else:
params = solve(xpx, xpy)
return params
class SystemFormulaParser(object):
def __init__(
self,
formula: Union[Mapping[str, str], str],
data: DataFrame,
weights: Optional[Mapping[str, ArrayLike]] = None,
eval_env: int = 6,
) -> None:
if not isinstance(formula, (Mapping, str)):
raise TypeError("formula must be a string or dictionary-like")
self._formula: Union[Mapping[str, str], str] = formula
self._data = data
self._weights = weights
self._parsers: Dict[str, IVFormulaParser] = {}
self._weight_dict: Dict[str, Optional[Series]] = {}
self._eval_env = eval_env
self._clean_formula: Dict[str, str] = {}
self._parse()
@staticmethod
def _prevent_autoconst(formula: str) -> str:
if not (" 0+" in formula or " 0 +" in formula):
formula = "~ 0 +".join(formula.split("~"))
return formula
def _parse(self) -> None:
formula = self._formula
data = self._data
weights = self._weights
parsers = self._parsers
weight_dict = self._weight_dict
cln_formula = self._clean_formula
if isinstance(formula, Mapping):
for formula_key in formula:
f = formula[formula_key]
f = self._prevent_autoconst(f)
parsers[formula_key] = IVFormulaParser(f, data, eval_env=self._eval_env)
if weights is not None:
if formula_key in weights:
weight_dict[formula_key] = weights[formula_key]
else:
weight_dict[formula_key] = None
cln_formula[formula_key] = f
else:
formula = formula.replace("\n", " ").strip()
parts = formula.split("}")
for part in parts:
key = base_key = None
part = part.strip()
if part == "":
continue
part = part.replace("{", "")
if ":" in part.split("~")[0]:
base_key, part = part.split(":")
key = base_key = base_key.strip()
part = part.strip()
f = self._prevent_autoconst(part)
if base_key is None:
base_key = key = f.split("~")[0].strip()
count = 0
while key in parsers:
key = base_key + ".{0}".format(count)
count += 1
assert isinstance(key, str)
parsers[key] = IVFormulaParser(f, data, eval_env=self._eval_env)
cln_formula[key] = f
if weights is not None:
if key in weights:
weight_dict[key] = weights[key]
else:
weight_dict[key] = None
_missing_weights(weight_dict)
self._weight_dict = weight_dict
def _get_variable(self, variable: str) -> Dict[str, Optional[DataFrame]]:
return dict(
[(key, getattr(self._parsers[key], variable)) for key in self._parsers]
)
@property
def formula(self) -> Dict[str, str]:
"""Cleaned version of formula"""
return self._clean_formula
@property
def eval_env(self) -> int:
"""Set or get the eval env depth"""
return self._eval_env
@eval_env.setter
def eval_env(self, value: int) -> None:
self._eval_env = value
# Update parsers for new level
parsers = self._parsers
new_parsers = {}
for key in parsers:
parser = parsers[key]
new_parsers[key] = IVFormulaParser(
parser._formula, parser._data, self._eval_env
)
self._parsers = new_parsers
@property
def equation_labels(self) -> List[str]:
return list(self._parsers.keys())
@property
def data(self) -> Dict[str, Dict[str, ArrayLike]]:
out = {}
dep = self.dependent
for key in dep:
out[key] = {"dependent": dep[key]}
exog = self.exog
for key in exog:
out[key]["exog"] = exog[key]
endog = self.endog
for key in endog:
out[key]["endog"] = endog[key]
instr = self.instruments
for key in instr:
out[key]["instruments"] = instr[key]
for key in self._weight_dict:
if self._weight_dict[key] is not None:
out[key]["weights"] = self._weight_dict[key]
return out
@property
def dependent(self) -> Dict[str, Optional[DataFrame]]:
return self._get_variable("dependent")
@property
def exog(self) -> Dict[str, Optional[DataFrame]]:
return self._get_variable("exog")
@property
def endog(self) -> Dict[str, Optional[DataFrame]]:
return self._get_variable("endog")
@property
def instruments(self) -> Dict[str, Optional[DataFrame]]:
return self._get_variable("instruments")
class _SystemModelBase(object):
r"""
Base class for system estimators
Parameters
----------
equations : dict
Dictionary-like structure containing dependent, exogenous, endogenous
and instrumental variables. Each key is an equations label and must
be a string. Each value must be either a tuple of the form (dependent,
exog, endog, instrument[, weights]) or a dictionary with keys 'dependent',
and at least one of 'exog' or 'endog' and 'instruments'. When using a
tuple, values must be provided for all 4 variables, although either
empty arrays or `None` can be passed if a category of variable is not
included in a model. The dictionary may contain optional keys for
'exog', 'endog', 'instruments', and 'weights'. 'exog' can be omitted
if all variables in an equation are endogenous. Alternatively, 'exog'
can contain either an empty array or `None` to indicate that an
equation contains no exogenous regressors. Similarly 'endog' and
'instruments' can either be omitted or may contain an empty array (or
`None`) if all variables in an equation are exogenous.
sigma : array_like
Prespecified residual covariance to use in GLS estimation. If not
provided, FGLS is implemented based on an estimate of sigma.
"""
def __init__(
self,
equations: Mapping[str, Union[Mapping[str, ArrayLike], Sequence[ArrayLike]]],
*,
sigma: Optional[ArrayLike] = None,
) -> None:
if not isinstance(equations, Mapping):
raise TypeError("equations must be a dictionary-like")
for key in equations:
if not isinstance(key, str):
raise ValueError("Equation labels (keys) must be strings")
self._equations = equations
self._sigma = None
if sigma is not None:
self._sigma = np.asarray(sigma)
k = len(self._equations)
if self._sigma.shape != (k, k):
raise ValueError(
"sigma must be a square matrix with dimensions "
"equal to the number of equations"
)
self._param_names: List[str] = []
self._eq_labels: List[str] = []
self._dependent: List[IVData] = []
self._exog: List[IVData] = []
self._instr: List[IVData] = []
self._endog: List[IVData] = []
self._y: List[Float64Array] = []
self._x: List[Float64Array] = []
self._wy: List[Float64Array] = []
self._wx: List[Float64Array] = []
self._w: List[Float64Array] = []
self._z: List[Float64Array] = []
self._wz: List[Float64Array] = []
self._weights: List[IVData] = []
self._formula: Optional[Union[str, Dict[str, str]]] = None
self._constraints: Optional[LinearConstraint] = None
self._constant_loc: Optional[Series] = None
self._has_constant = None
self._common_exog = False
self._original_index: Optional[Index] = None
self._model_name = "Three Stage Least Squares (3SLS)"
self._validate_data()
@property
def formula(self) -> Optional[Union[str, Dict[str, str]]]:
"""Set or get the formula used to construct the model"""
return self._formula
@formula.setter
def formula(self, value: Optional[Union[str, Dict[str, str]]]) -> None:
self._formula = value
def _validate_data(self) -> None:
ids = []
for i, key in enumerate(self._equations):
self._eq_labels.append(key)
eq_data = self._equations[key]
dep_name = "dependent_" + str(i)
exog_name = "exog_" + str(i)
endog_name = "endog_" + str(i)
instr_name = "instr_" + str(i)
if isinstance(eq_data, (tuple, list)):
dep = IVData(eq_data[0], var_name=dep_name)
self._dependent.append(dep)
current_id: Tuple[int, ...] = (id(eq_data[1]),)
self._exog.append(
IVData(eq_data[1], var_name=exog_name, nobs=dep.shape[0])
)
endog = IVData(eq_data[2], var_name=endog_name, nobs=dep.shape[0])
if endog.shape[1] > 0:
current_id += (id(eq_data[2]),)
ids.append(current_id)
self._endog.append(endog)
self._instr.append(
IVData(eq_data[3], var_name=instr_name, nobs=dep.shape[0])
)
if len(eq_data) == 5:
self._weights.append(IVData(eq_data[4]))
else:
dep_shape = self._dependent[-1].shape
self._weights.append(IVData(np.ones(dep_shape)))
elif isinstance(eq_data, (dict, Mapping)):
dep = IVData(eq_data["dependent"], var_name=dep_name)
self._dependent.append(dep)
exog = eq_data.get("exog", None)
self._exog.append(IVData(exog, var_name=exog_name, nobs=dep.shape[0]))
current_id = (id(exog),)
endog_values = eq_data.get("endog", None)
endog = IVData(endog_values, var_name=endog_name, nobs=dep.shape[0])
self._endog.append(endog)
if "endog" in eq_data:
current_id += (id(eq_data["endog"]),)
ids.append(current_id)
instr_values = eq_data.get("instruments", None)
instr = IVData(instr_values, var_name=instr_name, nobs=dep.shape[0])
self._instr.append(instr)
if "weights" in eq_data:
self._weights.append(IVData(eq_data["weights"]))
else:
self._weights.append(IVData(np.ones(dep.shape)))
else:
msg = UNKNOWN_EQ_TYPE.format(key=key, type=type(vars))
raise TypeError(msg)
self._has_instruments = False
for instr in self._instr:
self._has_instruments = self._has_instruments or (instr.shape[1] > 1)
for i, comps in enumerate(
zip(self._dependent, self._exog, self._endog, self._instr, self._weights)
):
shapes = [a.shape[0] for a in comps]
if min(shapes) != max(shapes):
raise ValueError(
"Dependent, exogenous, endogenous and "
"instruments, and weights, if provided, do "
"not have the same number of observations in "
"{eq}".format(eq=self._eq_labels[i])
)
self._drop_missing()
self._common_exog = len(set(ids)) == 1
if self._common_exog:
# Common exog requires weights are also equal
w0 = self._weights[0].ndarray
for w in self._weights:
self._common_exog = self._common_exog and bool(np.all(w.ndarray == w0))
constant = []
constant_loc = []
for dep, exog, endog, instr, w, label in zip(
self._dependent,
self._exog,
self._endog,
self._instr,
self._weights,
self._eq_labels,
):
y = cast(Float64Array, dep.ndarray)
x = np.concatenate([exog.ndarray, endog.ndarray], 1)
z = np.concatenate([exog.ndarray, instr.ndarray], 1)
w_arr = cast(Float64Array, w.ndarray)
w_arr = w_arr / np.nanmean(w_arr)
w_sqrt = np.sqrt(w_arr)
self._w.append(w_arr)
self._y.append(y)
self._x.append(x)
self._z.append(z)
self._wy.append(y * w_sqrt)
self._wx.append(x * w_sqrt)
self._wz.append(z * w_sqrt)
cols = list(exog.cols) + list(endog.cols)
self._param_names.extend([label + "_" + col for col in cols])
if y.shape[0] <= x.shape[1]:
raise ValueError(
"Fewer observations than variables in "
"equation {eq}".format(eq=label)
)
if matrix_rank(x) < x.shape[1]:
raise ValueError(
"Equation {eq} regressor array is not full " "rank".format(eq=label)
)
if x.shape[1] > z.shape[1]:
raise ValueError(
"Equation {eq} has fewer instruments than "
"endogenous variables.".format(eq=label)
)
if z.shape[1] > z.shape[0]:
raise ValueError(
"Fewer observations than instruments in "
"equation {eq}".format(eq=label)
)
if matrix_rank(z) < z.shape[1]:
raise ValueError(
"Equation {eq} instrument array is full " "rank".format(eq=label)
)
for rhs in self._x:
const, const_loc = has_constant(rhs)
constant.append(const)
constant_loc.append(const_loc)
self._has_constant = Series(
constant, index=[d.cols[0] for d in self._dependent]
)
self._constant_loc = constant_loc
def _drop_missing(self) -> None:
k = len(self._dependent)
nobs = self._dependent[0].shape[0]
self._original_index = self._dependent[0].rows.copy()
missing = np.zeros(nobs, dtype=bool)
values = [self._dependent, self._exog, self._endog, self._instr, self._weights]
for i in range(k):
for value in values:
nulls = value[i].isnull
if nulls.any():
missing |= np.asarray(nulls)
missing_warning(missing, stacklevel=4)
if np.any(missing):
for i in range(k):
self._dependent[i].drop(missing)
self._exog[i].drop(missing)
self._endog[i].drop(missing)
self._instr[i].drop(missing)
self._weights[i].drop(missing)
def __repr__(self) -> str:
return self.__str__() + "\nid: {0}".format(hex(id(self)))
def __str__(self) -> str:
out = self._model_name + ", "
out += "{0} Equations:\n".format(len(self._y))
eqns = ", ".join(self._equations.keys())
out += "\n".join(textwrap.wrap(eqns, 70))
if self._common_exog:
out += "\nCommon Exogenous Variables"
return out
def predict(
self,
params: ArrayLike,
*,
equations: Optional[Mapping[str, Mapping[str, ArrayLike]]] = None,
data: Optional[DataFrame] = None,
eval_env: int = 8,
) -> DataFrame:
"""
Predict values for additional data
Parameters
----------
params : array_like
Model parameters (nvar by 1)
equations : dict
Dictionary-like structure containing exogenous and endogenous
variables. Each key is an equations label and must
match the labels used to fir the model. Each value must be a
dictionary with keys 'exog' and 'endog'. If predictions are not
required for one of more of the model equations, these keys can
be omitted.
data : DataFrame
Values to use when making predictions from a model constructed
from a formula
eval_env : int
Depth to use when evaluating formulas.
Returns
-------
predictions : DataFrame
Fitted values from supplied data and parameters
Notes
-----
If `data` is not none, then `equations` must be none.
Predictions from models constructed using formulas can
be computed using either `equations`, which will treat these are
arrays of values corresponding to the formula-process data, or using
`data` which will be processed using the formula used to construct the
values corresponding to the original model specification.
When using `exog` and `endog`, the regressor array for a particular
equation is assembled as
`[equations[eqn]['exog'], equations[eqn]['endog']]` where `eqn` is
an equation label. These must correspond to the columns in the
estimated model.
"""
if data is not None:
assert self.formula is not None
parser = SystemFormulaParser(self.formula, data=data, eval_env=eval_env)
equations = parser.data
params = np.atleast_2d(np.asarray(params))
if params.shape[0] == 1:
params = params.T
nx = int(sum([_x.shape[1] for _x in self._x]))
if params.shape[0] != nx:
raise ValueError(
f"Parameters must have {nx} elements; found {params.shape[0]}."
)
loc = 0
out = AttrDict()
for i, label in enumerate(self._eq_labels):
kx = self._x[i].shape[1]
assert isinstance(equations, abc.Mapping)
if label in equations:
b = params[loc : loc + kx]
eqn = equations[label]
exog = eqn.get("exog", None)
endog = eqn.get("endog", None)
if exog is None and endog is None:
loc += kx
continue
if exog is not None:
exog_endog = IVData(exog).pandas
if endog is not None:
endog = IVData(endog)
exog_endog = concat([exog_endog, endog.pandas], axis=1)
else:
exog_endog = IVData(endog).pandas
fitted = np.asarray(exog_endog) @ b
fitted = DataFrame(fitted, index=exog_endog.index, columns=[label])
out[label] = fitted
loc += kx
out = reduce(
lambda left, right: left.merge(
right, how="outer", left_index=True, right_index=True
),
[out[key] for key in out],
)
return out
def _multivariate_ls_fit(self) -> Tuple[Float64Array, Float64Array]:
wy, wx, wxhat = self._wy, self._wx, self._wxhat
k = len(wxhat)
xpx = blocked_inner_prod(wxhat, np.eye(len(wxhat)))
_xpy = []
for i in range(k):
_xpy.append(wxhat[i].T @ wy[i])
xpy = np.vstack(_xpy)
beta = _parameters_from_xprod(xpx, xpy, constraints=self.constraints)
loc = 0
eps = []
for i in range(k):
nb = wx[i].shape[1]
b = beta[loc : loc + nb]
eps.append(wy[i] - wx[i] @ b)
loc += nb
eps_arr = np.hstack(eps)
return beta, eps_arr
def _construct_xhat(self) -> None:
k = len(self._x)
self._xhat = []
self._wxhat = []
for i in range(k):
x, z = self._x[i], self._z[i]
if z.shape == x.shape and np.all(z == x):
# OLS, no instruments
self._xhat.append(x)
self._wxhat.append(self._wx[i])
else:
delta = lstsq(z, x, rcond=None)[0]
xhat = z @ delta
self._xhat.append(xhat)
w = self._w[i]
self._wxhat.append(xhat * np.sqrt(w))
def _gls_estimate(
self,
eps: Float64Array,
nobs: int,
total_cols: int,
ci: Sequence[int],
full_cov: bool,
debiased: bool,
) -> Tuple[Float64Array, Float64Array, Float64Array, Float64Array]:
"""Core estimation routine for iterative GLS"""
wy, wx, wxhat = self._wy, self._wx, self._wxhat
if self._sigma is None:
sigma = eps.T @ eps / nobs
sigma *= self._sigma_scale(debiased)
else:
sigma = self._sigma
est_sigma = sigma
if not full_cov:
sigma = np.diag(np.diag(sigma))
sigma_inv = inv(sigma)
k = len(wy)
xpx = blocked_inner_prod(wxhat, sigma_inv)
xpy = np.zeros((total_cols, 1))
for i in range(k):
sy = np.zeros((nobs, 1))
for j in range(k):
sy += sigma_inv[i, j] * wy[j]
xpy[ci[i] : ci[i + 1]] = wxhat[i].T @ sy
beta = _parameters_from_xprod(xpx, xpy, constraints=self.constraints)
loc = 0
for j in range(k):
_wx = wx[j]
_wy = wy[j]
kx = _wx.shape[1]
eps[:, [j]] = _wy - _wx @ beta[loc : loc + kx]
loc += kx
return beta, eps, sigma, est_sigma
def _multivariate_ls_finalize(
self,
beta: Float64Array,
eps: Float64Array,
sigma: Float64Array,
cov_type: str,
**cov_config: bool,
) -> SystemResults:
k = len(self._wx)
# Covariance estimation
cov_estimator = COV_EST[cov_type]
cov_est = cov_estimator(
self._wxhat,
eps,
sigma,
sigma,
gls=False,
constraints=self._constraints,
**cov_config,
)
cov = cov_est.cov
individual = AttrDict()
debiased = cov_config.get("debiased", False)
for i in range(k):
wy = wye = self._wy[i]
w = self._w[i]
cons = bool(self.has_constant.iloc[i])
if cons:
wc = np.ones_like(wy) * np.sqrt(w)
wye = wy - wc @ lstsq(wc, wy, rcond=None)[0]
total_ss = float(wye.T @ wye)
stats = self._common_indiv_results(
i,
beta,
cov,
eps,
eps,
"OLS",
cov_type,
cov_est,
0,
debiased,
cons,
total_ss,
)
key = self._eq_labels[i]
individual[key] = stats
nobs = eps.size
results = self._common_results(
beta, cov, "OLS", 0, nobs, cov_type, sigma, individual, debiased
)
results["wresid"] = results.resid
results["cov_estimator"] = cov_est
results["cov_config"] = cov_est.cov_config
individual = results["individual"]
r2s = [individual[eq].r2 for eq in individual]
results["system_r2"] = self._system_r2(eps, sigma, "ols", False, debiased, r2s)
return SystemResults(results)
@property
def has_constant(self) -> Series:
"""Vector indicating which equations contain constants"""
return self._has_constant
def _f_stat(
self, stats: AttrDict, debiased: bool
) -> Union[WaldTestStatistic, InvalidTestStatistic]:
cov = stats.cov
k = cov.shape[0]
sel = list(range(k))
if stats.has_constant:
sel.pop(stats.constant_loc)
cov = cov[sel][:, sel]
params = stats.params[sel]
df = params.shape[0]
nobs = stats.nobs
null = "All parameters ex. constant are zero"
name = "Equation F-statistic"
try:
stat = float(params.T @ inv(cov) @ params)
except np.linalg.LinAlgError:
return InvalidTestStatistic(
"Covariance is singular, possibly due " "to constraints.", name=name
)
if debiased:
total_reg = np.sum([s.shape[1] for s in self._wx])
df_denom = len(self._wx) * nobs - total_reg
wald = WaldTestStatistic(stat / df, null, df, df_denom=df_denom, name=name)
else:
return WaldTestStatistic(stat, null=null, df=df, name=name)
return wald
def _common_indiv_results(
self,
index: int,
beta: Float64Array,
cov: Float64Array,
wresid: Float64Array,
resid: Float64Array,
method: str,
cov_type: str,
cov_est: Union[
HomoskedasticCovariance,
HeteroskedasticCovariance,
KernelCovariance,
ClusteredCovariance,
GMMHeteroskedasticCovariance,
GMMHomoskedasticCovariance,
],
iter_count: int,
debiased: bool,
constant: bool,
total_ss: float,
*,
weight_est: Optional[
Union[
HomoskedasticWeightMatrix,
HeteroskedasticWeightMatrix,
KernelWeightMatrix,
]
] = None,
) -> AttrDict:
loc = 0
for i in range(index):
loc += self._wx[i].shape[1]
i = index
stats = AttrDict()
# Static properties
stats["eq_label"] = self._eq_labels[i]
stats["dependent"] = self._dependent[i].cols[0]
stats["instruments"] = (
self._instr[i].cols if self._instr[i].shape[1] > 0 else None
)
stats["endog"] = self._endog[i].cols if self._endog[i].shape[1] > 0 else None
stats["method"] = method
stats["cov_type"] = cov_type
stats["cov_estimator"] = cov_est
stats["cov_config"] = cov_est.cov_config
stats["weight_estimator"] = weight_est
stats["index"] = self._dependent[i].rows
stats["original_index"] = self._original_index
stats["iter"] = iter_count
stats["debiased"] = debiased
stats["has_constant"] = constant
assert self._constant_loc is not None
stats["constant_loc"] = self._constant_loc[i]
# Parameters, errors and measures of fit
wxi = self._wx[i]
nobs, df = wxi.shape
b = beta[loc : loc + df]
e = wresid[:, [i]]
nobs = e.shape[0]
df_c = nobs - int(constant)
df_r = nobs - df
stats["params"] = b
stats["cov"] = cov[loc : loc + df, loc : loc + df]
stats["wresid"] = e
stats["nobs"] = nobs
stats["df_model"] = df
stats["resid"] = resid[:, [i]]
stats["fitted"] = self._x[i] @ b
stats["resid_ss"] = float(resid[:, [i]].T @ resid[:, [i]])
stats["total_ss"] = total_ss
stats["r2"] = 1.0 - stats.resid_ss / stats.total_ss
stats["r2a"] = 1.0 - (stats.resid_ss / df_r) / (stats.total_ss / df_c)
names = self._param_names[loc : loc + df]
offset = len(stats.eq_label) + 1
stats["param_names"] = [n[offset:] for n in names]
# F-statistic
stats["f_stat"] = self._f_stat(stats, debiased)
return stats
def _common_results(
self,
beta: Float64Array,
cov: Float64Array,
method: str,
iter_count: int,
nobs: int,
cov_type: str,
sigma: Float64Array,
individual: AttrDict,
debiased: bool,
) -> AttrDict:
results = AttrDict()
results["method"] = method
results["iter"] = iter_count
results["nobs"] = nobs
results["cov_type"] = cov_type
results["index"] = self._dependent[0].rows
results["original_index"] = self._original_index
names = list(individual.keys())
results["sigma"] = | DataFrame(sigma, columns=names, index=names) | pandas.DataFrame |
import os, sys
import numpy as np
import pandas as pd
from datetime import datetime, date
import csv
import ast
import shutil
import requests
import json
from zipfile import ZipFile
from bs4 import BeautifulSoup
__author__ = '<NAME>, <NAME>'
__copyright__ = '© Pandemic Central, 2021'
__license__ = 'MIT'
__status__ = 'release'
__url__ = 'https://github.com/solveforj/pandemic-central'
__version__ = '3.0.0'
# DOWNLOAD
def get_facebook_data():
url = 'https://data.humdata.org/dataset/movement-range-maps'
content = requests.get(url).content
soup = BeautifulSoup(content, 'html.parser')
download_url = soup.find_all('a',\
{"class": "btn btn-empty btn-empty-blue hdx-btn resource-url-analytics ga-download"},\
href=True)[1]['href']
download_url = 'https://data.humdata.org' + download_url
zip = requests.get(download_url)
os.mkdir('temp')
temp_path = ('temp/data.zip')
with open(temp_path, 'wb') as temp:
temp.write(zip.content)
with ZipFile(temp_path, 'r') as unzip:
unzip.extractall('temp/')
files = os.listdir('temp')
return files
# PROCESS
def facebook_mobility_to_pd(files):
for filename in files:
if filename.startswith('movement-range'):
path = 'temp/' + filename
# Read and compress data file
df_load = pd.read_csv(path, sep='\t', dtype={'polygon_id':str})
df_load = df_load[df_load['country'] == 'USA']
df = df_load[['ds', 'polygon_id', 'all_day_bing_tiles_visited_relative_change', 'all_day_ratio_single_tile_users']]
df = df.rename({'polygon_id':'FIPS','ds':'date','all_day_bing_tiles_visited_relative_change':'fb_movement_change', 'all_day_ratio_single_tile_users':'fb_stationary'}, axis=1)
df = df.reset_index(drop=True)
# Compute 14 day rolling averages for movement data
df['fb_movement_change'] = pd.Series(df.groupby("FIPS")['fb_movement_change'].rolling(7).mean()).reset_index(drop=True)
df['fb_stationary'] = pd.Series(df.groupby("FIPS")['fb_stationary'].rolling(7).mean()).reset_index(drop=True)
# Move dates forward by 1 day so that movement averages represent data from past week
df['date'] = pd.to_datetime(df['date'])
df['date'] = df['date'] + | pd.Timedelta(value=1, unit='day') | pandas.Timedelta |
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import os
import pickle
import re
from collections import defaultdict
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix, roc_auc_score, roc_curve
from nltk.stem import SnowballStemmer
import matplotlib.pyplot as plt
class AmazonReviews():
''' Class for reading Amazon review data and building a ML model to predict whether or not a product
will trend based on a customer review. The review data is sourced from (https://s3.amazonaws.com/amazon-reviews-pds/readme.html).
date_filter: DataFrame
Filters the raw Amazon review data
reviews_df: DataFrame
Filtered data frame of the Amazon review data
reviews_selected_df: DataFrame
Filtered reviews_df for the time window to calculate the trend score
product_trend_df: DataFrame
Output of the trend calculation process and can analyze whether the trend score is calcualted correctly
obs: DataFrame
Entire set of observations the model will be trained and tested upon.
X: np.array
Array for sklearn interface representing the feature space.
y: np.array
Array for sklearn interface representing the target.
X_train: np.array
Array for the sklearn interface representing the training feature space.
X_test: np.array
Array for the sklearn interface representing the testing feature space.
y_train: np.array
Array for the sklearn interface representing the training target.
y_test: np.array
Array for the sklearn interface representing the testing target.
results: DataFrame
Stores the results of each model. DataFrame consists of accuracy, precision, recall, F1, and AUC.
y_scores: defaultdict
Dictionary storing the target probabilities for each model.
'''
data_path = '../data/'
RANDOM_STATE = 42
def __init__(self, date_filter=datetime(2014,1,1)):# should add a flag to force to read from file
''' Initalizes an AmazonReview instance
date_filter: (optional)
If None, then date_filter will be set to 2014-01-01
'''
self.date_filter = date_filter
self.results = pd.DataFrame(columns=['Precision', 'Recall', 'F1', 'Accuracy','AUC'])
self.y_scores = defaultdict(np.ndarray)
def load_data(self, path):
''' Loads the AmazonReview data
path:
File path to the tab separated Amazon Review data (https://s3.amazonaws.com/amazon-reviews-pds/readme.html)
'''
# only load from file if pickle does not exist
i = path.rfind('/')
f = self.data_path + path[i+1:] + '.pkl'
if os.path.isfile(f):
self.reviews_df = | pd.read_pickle(f) | pandas.read_pickle |
import numpy as np
import pandas as pd
import seaborn as sns
import estimagic.differentiation.finite_differences as fd
from multiprocessing import Pool
from matplotlib import pyplot as plt
from estimagic.differentiation.generate_steps import generate_steps
from estimagic.optimization.utilities import namedtuple_from_kwargs
from estimagic.differentiation.numdiff_np import first_derivative
from src.model_code.rastrigin import grad_rastrigin
from src.model_code.rastrigin import rastrigin
from src.model_code.levy import func_levy
from src.model_code.levy import grad_levy
from src.model_code.ackley import ackley
from src.model_code.ackley import grad_ackley
from scipy.optimize._numdiff import approx_derivative
from bld.project_paths import project_paths_join as ppj
if __name__ == "__main__":
def calculate_error(params, diff, func, grad):
"""Difference between numerical and analytical derivatives, devided by
analytial derivative.
Args:
params(np.array): 1d numpy array of function arguments
diff(np.array): difficulty parameter, controls wiggliness of the function
func(np.array): functions for which derivatives are calculated
grad(np.array): gradients of the functions
Returns:
error(np.array): numpy array of relative errors, calculated for different
methods
"""
method = ["center", "forward", "backward"]
error = {}
for i,m in enumerate(method):
diff_dict = {'diff': diff}
num_der = first_derivative(func, params,func_kwargs=diff_dict,
method=m)
analytical_der = grad(params, diff)
error[m] = (num_der - analytical_der )/np.abs(analytical_der).clip(1e-8, np.inf)
return error
diff_vec = np.linspace(0, 40, 100)
functions = [rastrigin, func_levy]
gradients = [grad_rastrigin, grad_levy]
function_names = ["Rastrigin", "Levy"]
for f,g, func_name in zip(functions, gradients, function_names):
error_vec = [calculate_error(np.array([5.0, 8.5]),diff_i, f, g) for
diff_i in diff_vec]
to_concat = []
for err, d in zip(error_vec, diff_vec):
df = | pd.DataFrame(err) | pandas.DataFrame |
#encoding=utf-8
import pandas as pd
from Data import load_file
from sklearn.preprocessing import Imputer
dir='D:/kesci/data/part_data'
test_master_numeric='/test_master_numeric.csv'
test_master_category='/test_master_category.csv'
test_UserUpdate='/test_UserUpdate.csv'
test_LogInfo='/test_LogInfo.csv'
train_master_numeric='/train_master_numeric.csv'
train_master_category='/train_master_category.csv'
train_UserUpdate='/train_UserUpdate.csv'
train_LogInfo='/train_LogInfo.csv'
test_master_1=load_file(dir,test_master_numeric)
test_master_2=load_file(dir,test_master_category)
train_master_1=load_file(dir,train_master_numeric)
train_master_2=load_file(dir,train_master_category)
test_UserUpdate_3=load_file(dir,test_UserUpdate)
test_LogInfo_4=load_file(dir,test_LogInfo)
train_UserUpdate_3=load_file(dir,train_UserUpdate)
train_LogInfo_4=load_file(dir,train_LogInfo)
train=pd.concat([train_master_1,train_master_2],axis=1)
test=pd.concat([test_master_1,test_master_2],axis=1)
print(train.shape)
print(test.shape)
train=pd.merge(train,train_UserUpdate_3,how='left',on='Idx')
train=pd.merge(train,train_LogInfo_4,how='left',on='Idx')
print(train.shape)
test=pd.merge(test,test_UserUpdate_3,how='left',on='Idx')
test=pd.merge(test,test_LogInfo_4,how='left',on='Idx')
print(test.shape)
train=Imputer().fit_transform(train)
test=Imputer().fit_transform(test)
print(train.shape)
print(test.shape)
train=pd.DataFrame(train)
test= | pd.DataFrame(test) | pandas.DataFrame |
import glob
import numpy as np
import scipy
import os
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from joblib import dump
import pandas as pd
from multiprocessing.pool import ThreadPool
from pyhydra.utils import check_symmetric, launch_svc
__author__ = "<NAME>"
__copyright__ = "Copyright 2019-2020 The CBICA & SBIA Lab"
__credits__ = ["<NAME>, <NAME>"]
__license__ = "See LICENSE file"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def consensus_clustering(clustering_results, k):
"""
This function performs consensus clustering on a co-occurence matrix
:param clustering_results: an array containing all the clustering results across different iterations, in order to
perform
:param k:
:return:
"""
num_pt = clustering_results.shape[0]
cooccurence_matrix = np.zeros((num_pt, num_pt))
for i in range(num_pt - 1):
for j in range(i + 1, num_pt):
cooccurence_matrix[i, j] = sum(clustering_results[i, :] == clustering_results[j, :])
cooccurence_matrix = np.add(cooccurence_matrix, cooccurence_matrix.transpose())
## here is to compute the Laplacian matrix
Laplacian = np.subtract(np.diag(np.sum(cooccurence_matrix, axis=1)), cooccurence_matrix)
Laplacian_norm = np.subtract(np.eye(num_pt), np.matmul(np.matmul(np.diag(1 / np.sqrt(np.sum(cooccurence_matrix, axis=1))), cooccurence_matrix), np.diag(1 / np.sqrt(np.sum(cooccurence_matrix, axis=1)))))
## replace the nan with 0
Laplacian_norm = np.nan_to_num(Laplacian_norm)
## check if the Laplacian norm is symmetric or not, because matlab eig function will automatically check this, but not in numpy or scipy
if check_symmetric(Laplacian_norm):
## extract the eigen value and vector
## matlab eig equivalence is eigh, not eig from numpy or scipy, see this post: https://stackoverflow.com/questions/8765310/scipy-linalg-eig-return-complex-eigenvalues-for-covariance-matrix
## Note, the eigenvector is not unique, thus the matlab and python eigenvector may be different, but this will not affect the results.
evalue, evector = scipy.linalg.eigh(Laplacian_norm)
else:
# evalue, evector = np.linalg.eig(Laplacian_norm)
raise Exception("The Laplacian matrix should be symmetric here...")
## check if the eigen vector is complex
if np.any(np.iscomplex(evector)):
evalue, evector = scipy.linalg.eigh(Laplacian)
## create the kmean algorithm with sklearn
kmeans = KMeans(n_clusters=k, n_init=20).fit(evector.real[:, 0: k])
final_predict = kmeans.labels_
return final_predict
def cv_cluster_stability(result, k):
"""
To compute the adjusted rand index across different pair of 2 folds cross CV
:param result:
:return:
"""
num_pair = 0
aris = []
if k == 1:
adjusted_rand_index = 0 ## note, here, we manually set it to be 0, because it does not make sense when k==1. TODO, need to clarify if there is really heterogeneity in the data, i.e., k == 1 or k>1
else:
for i in range(result.shape[1] - 1):
for j in range(i+1, result.shape[1]):
num_pair += 1
non_zero_index = np.all(result[:, [i, j]], axis=1)
pair_result = result[:, [i, j]][non_zero_index]
ari = adjusted_rand_score(pair_result[:, 0], pair_result[:, 1])
aris.append(ari)
adjusted_rand_index = np.mean(np.asarray(aris))
return adjusted_rand_index
def hydra_solver_svm_tl(num_component, num_component_former, num_repetition, X, y, k, output_dir, num_iteration, tol, balanced, predefined_c, n_threads, num_run):
"""
This is the main function of HYDRA, which find the convex polytope using a supervised classification fashion.
:param num_repetition: the number of iteration of CV currently. This is helpful to reconstruct the model and also moniter the processing
:param X: corrected training data feature
:param y: traing data label
:param k: hyperparameter for desired number of clusters in patients
:param options: commandline parameters
:return: the optimal model
"""
index_pt = np.where(y == 1)[0] # index for PTs
index_cn = np.where(y == -1)[0] # index for CNs
### initialize the final weight for the polytope from the former C
weight_file = os.path.join(output_dir, 'clustering_run' + str(num_run-1), 'component_' + str(num_component_former), str(k) + '_clusters', 'tsv', 'weight_sample_cv_' + str(num_repetition) + '.tsv')
weight_sample = pd.read_csv(weight_file, sep='\t').to_numpy()
## cluster assignment is based on this svm scores across different SVM/hyperplanes
svm_scores = np.zeros((weight_sample.shape[0], weight_sample.shape[1]))
update_weights_pool = ThreadPool(n_threads)
for j in range(num_iteration):
for m in range(k):
sample_weight = np.ascontiguousarray(weight_sample[:, m])
if np.count_nonzero(sample_weight[index_pt]) == 0:
print("Cluster dropped, meaning that all PT has been assigned to one single hyperplane in iteration: %d" % (j-1))
svm_scores[:, m] = np.asarray([np.NINF] * (y.shape[0]))
else:
results = update_weights_pool.apply_async(launch_svc, args=(X, y, predefined_c, sample_weight, balanced))
weight_coef = results.get()[0]
intesept = results.get()[1]
## Apply the data again the trained model to get the final SVM scores
svm_scores[:, m] = (np.matmul(weight_coef, X.transpose()) + intesept).transpose().squeeze()
final_predict = np.argmax(svm_scores[index_pt], axis=1)
## decide the converge of the polytope based on the toleration
weight_sample_hold = weight_sample.copy()
# after each iteration, first set the weight of patient rows to be 0
weight_sample[index_pt, :] = 0
# then set the pt's weight to be 1 for the assigned hyperplane
for n in range(len(index_pt)):
weight_sample[index_pt[n], final_predict[n]] = 1
## check the loss comparted to the tolorence for stopping criteria
loss = np.linalg.norm(np.subtract(weight_sample, weight_sample_hold), ord='fro')
print("The loss is: %f" % loss)
if loss < tol:
print("The polytope has been converged for iteration %d in finding %d clusters" % (j, k))
break
update_weights_pool.close()
update_weights_pool.join()
## after deciding the final convex polytope, we refit the training data once to save the best model
weight_sample_final = np.zeros((y.shape[0], k))
## change the weight of PTs to be 1, CNs to be 1/k
# then set the pt's weight to be 1 for the assigned hyperplane
for n in range(len(index_pt)):
weight_sample_final[index_pt[n], final_predict[n]] = 1
weight_sample_final[index_cn] = 1 / k
update_weights_pool_final = ThreadPool(n_threads)
for o in range(k):
sample_weight = np.ascontiguousarray(weight_sample_final[:, o])
if np.count_nonzero(sample_weight[index_pt]) == 0:
print("Cluster dropped, meaning that the %d th hyperplane is useless!" % (o))
else:
results = update_weights_pool_final.apply_async(launch_svc, args=(X, y, predefined_c, sample_weight, balanced))
## save the final model for the k SVMs/hyperplanes
if not os.path.exists(
os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component),
str(k) + '_clusters', 'models')):
os.makedirs(os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component),
str(k) + '_clusters', 'models'))
dump(results.get()[2],
os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component),
str(k) + '_clusters', 'models',
'svm-' + str(o) + '_last_repetition.joblib'))
update_weights_pool_final.close()
update_weights_pool_final.join()
y[index_pt] = final_predict + 1
if not os.path.exists(os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component), str(k) + '_clusters', 'tsv')):
os.makedirs(os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component), str(k) + '_clusters', 'tsv'))
## save the assigned weight for each subject across k-fold
columns = ['hyperplane' + str(i) for i in range(k)]
weight_sample_df = pd.DataFrame(weight_sample_final, columns=columns)
weight_sample_df.to_csv(os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component), str(k) + '_clusters', 'tsv', 'weight_sample_cv_' + str(num_repetition) + '.tsv'), index=False, sep='\t', encoding='utf-8')
## save the final_predict_all
columns = ['y_hat']
y_hat_df = pd.DataFrame(y, columns=columns)
y_hat_df.to_csv(os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component), str(k) + '_clusters', 'tsv', 'y_hat_cv_' + str(num_repetition) + '.tsv'), index=False, sep='\t', encoding='utf-8')
## save the pt index
columns = ['pt_index']
pt_df = pd.DataFrame(index_pt, columns=columns)
pt_df.to_csv(os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component), str(k) + '_clusters', 'tsv', 'pt_index_cv_' + str(num_repetition) + '.tsv'), index=False, sep='\t', encoding='utf-8')
return y
def cluster_stability_across_resolution(c, c_former, output_dir, k_continuing, num_run, stop_tol=0.98):
"""
To evaluate the stability of clustering across two different C for stopping criterion.
Args:
c:
c_former:
output_dir:
k_continuing:
num_run:
stop_tol:
max_num_iter:
Returns:
"""
## read the output of current C and former Cs
cluster_ass1 = os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(c), 'clustering_assignment.tsv')
ass1_df = pd.read_csv(cluster_ass1, sep='\t')
ass1_df = ass1_df.loc[ass1_df['diagnosis'] == 1]
cluster_ass2 = os.path.join(output_dir, 'clustering_run' + str(num_run-1), 'component_' + str(c_former), 'clustering_assignment.tsv')
ass2_df = | pd.read_csv(cluster_ass2, sep='\t') | pandas.read_csv |
# pip3 install apyori
# importacao das bibliotecas
import pandas as pd
import numpy as np
from apyori import apriori
from matplotlib import pyplot as plt
# Configurando print das colunas e linhas
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
def apriori_alg(observations):
# Aplicacao do algoritmo apriori
association = apriori(observations, min_support=0.1)# , min_lift=1.2, min_length=2)
association_results = list(association)
# Criando dataframe para visualizacao dos dados
df = pd.DataFrame(columns=('Names','Support','Confidence','Lift'))
Support =[]
Confidence = []
Lift = []
Items = []
for RelationRecord in association_results:
for ordered_stat in RelationRecord.ordered_statistics:
Support.append(RelationRecord.support)
Items.append(RelationRecord.items)
Confidence.append(ordered_stat.confidence)
Lift.append(ordered_stat.lift)
df['Names'] = list(map(set, Items))
df['Support'] = Support
df['Confidence'] = Confidence
df['Lift']= Lift
return df.copy()
# Selecionando dados a partir do csv
df = | pd.read_csv('_ASSOC_VoleiStars.csv', index_col=None, encoding='iso-8859-1') | pandas.read_csv |
#libraries
import numpy as np
import pandas as pd
from datetime import datetime as dt
import time
import datetime
import os
import warnings
warnings.filterwarnings("ignore")
import logging
logging.basicConfig(filename='log.txt',level=logging.DEBUG, format='%(asctime)s %(message)s')
| pd.set_option('max_colwidth', 500) | pandas.set_option |
from functools import partial
from pathlib import Path
from pprint import pprint
from typing import Any
import ujson
import numpy as np
import pandas as pd
from tensorflow import config as tfc
from tensorflow.keras.models import load_model
from tqdm import trange, tqdm
from tifffile import imsave
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.ticker as mtick
from matplotlib.ticker import FormatStrFormatter
import matplotlib.colors as mcolors
import utils
import vis
import backend
import preprocessing
from synthetic import SyntheticPSF
from wavefront import Wavefront
from zernike import Zernike
import logging
logger = logging.getLogger('')
def zernikies_to_actuators(coefficients: np.array, dm_pattern: Path, dm_state: np.array, scalar: float = 1):
dm_pattern = | pd.read_csv(dm_pattern, header=None) | pandas.read_csv |
import numpy as np
import pandas as pd
class LinearRegression:
def __init__(self, reg: str = None, pen: float = None):
# Sanity check
if reg not in (None, 'l1', 'l2'):
raise ValueError('Regularization not supported')
if reg in ('l1', 'l2'):
try:
assert pen > 0
except (AssertionError, TypeError):
raise ValueError('Penalty must be positive when regularization is specified')
self.reg = reg # type of regularization, can be None, 'l1' or 'l2'
self.pen = pen # penalty (lambda), must be positive for L1 or L2 regularization
self.features = pd.Index([]) # name of the features
self.target = pd.Index([]) # name of the target
self.w = pd.Series([]) # weights
self.b = 0.0 # bias
self.lr = 0.0 # learning rate, must be positive
self.n_epoch = 0 # number of epochs, must be a positive integer
def train(self, X: pd.DataFrame, y: pd.Series,
lr: float = 0.01, n_epoch: int = 50) -> pd.Series:
r"""Train the model to data
Args:
X: pd.DataFrame
Feature matrix
y: pd.Series
Target vector
lr: float
Learning rate, must be positive
n_epoch: int
Number of epochs, must be a positive integer
Returns:
losses: pd.Series
Training loss per data point
"""
# Sanity check
try:
assert lr > 0
except (AssertionError, TypeError):
raise ValueError('Learning rate must be positive')
try:
assert isinstance(n_epoch, int)
assert n_epoch > 0
except AssertionError:
raise ValueError('Number of epochs must be a positive integer')
# Initialize weights and bias
self.features = X.columns
self.target = y.name
self.w = pd.Series(np.zeros(len(self.features)), name='w', index=self.features)
self.b = 0.0
self.lr = lr
self.n_epoch = n_epoch
# Train model with SGD
losses = []
for i_epoch in range(n_epoch):
for (i_data, xi), (_, yi) in zip(X.iterrows(), y.iteritems()):
_X = xi.to_frame().T
_y = | pd.Series(yi, index=[i_data], name=self.target) | pandas.Series |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import scipy.optimize as optimize
from scipy.special import betaln
from pandas.stats.moments import rolling_mean as rolling_m
from pandas.stats.moments import rolling_corr
from warnings import warn
import matplotlib.pyplot as plt
from time import time
from datetime import datetime
from pandas.io.data import DataReader
import sys
import os
import logging
import itertools
def dataset(name):
""" Return sample dataset from /data directory. """
mod = sys.modules[__name__]
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', name + '.pkl')
return pd.read_pickle(filename)
def profile(algo, data=None, to_profile=[]):
""" Profile algorithm using line_profiler.
:param algo: Algorithm instance.
:param data: Stock prices, default is random portfolio.
:param to_profile: List of methods to profile, default is `step` method.
Example of use:
tools.profile(Anticor(window=30, c_version=False), to_profile=[Anticor.weights])
"""
from line_profiler import LineProfiler
if data is None:
data = random_portfolio(n=1000, k=10, mu=0.)
to_profile = to_profile or [algo.step]
profile = LineProfiler(*to_profile)
profile.runcall(algo.run, data)
profile.print_stats()
def load_ticker(ticker, start=datetime(2000,1,1), end=None):
return DataReader(ticker, "yahoo", start=start, end=None)
def quickrun(algo, data=None, **kwargs):
""" Run algorithm and print its running time and some statistics. """
if data is None:
data = random_portfolio(n=1000, k=3, mu=0.0001)
t = time()
result = algo.run(data)
logging.debug('Time: {:.2f}s'.format(time() - t))
print(result.summary())
result.plot(**kwargs)
plt.show()
return result
def random_portfolio(n, k, mu=0., sd=0.01, corr=None, dt=1., nan_pct=0.):
""" Generate asset prices assuming multivariate geometric Brownian motion.
:param n: Number of time steps.
:param k: Number of assets.
:param mu: Drift parameter. Can be scalar or vector. Default is 0.
:param sd: Volatility of single assets. Default is 0.01.
:param corr: Correlation matrix of assets. Default is identity.
:param dt: Time step.
:param nan_pct: Add given percentage of NaN values. Useful for testing
"""
# default values
corr = corr if corr is not None else np.eye(k)
sd = sd * np.ones(k)
mu = mu * np.ones(k)
# drift
nu = mu - sd**2 / 2.
# do a Cholesky factorization on the correlation matrix
R = np.linalg.cholesky(corr).T
# generate uncorrelated random sequence
x = np.matrix(np.random.normal(size=(n - 1,k)))
# correlate the sequences
ep = x * R
# multivariate brownian
W = nu * dt + ep * np.diag(sd) * np.sqrt(dt)
# generate potential path
S = np.vstack([np.ones((1, k)), np.cumprod(np.exp(W), 0)])
# add nan values
if nan_pct > 0:
r = S * 0 + np.random.random(S.shape)
S[r < nan_pct] = np.nan
return pd.DataFrame(S)
def bcrp_weights(X):
""" Find best constant rebalanced portfolio.
:param X: Prices in ratios.
"""
x_0 = np.ones(X.shape[1]) / float(X.shape[1])
fun = lambda b: -np.prod(np.dot(X, b))
cons = ({'type': 'eq', 'fun': lambda b: sum(b) - 1.},)
res = optimize.minimize(fun, x_0, bounds=[(0.,1.)]*len(x_0), constraints=cons, method='slsqp')
if not res.success:
warn('BCRP not found', RuntimeWarning)
return res.x
def rolling_cov_pairwise(df, *args, **kwargs):
d = {}
for c in df.columns:
d[c] = pd.rolling_cov(df[c], df, *args, **kwargs)
p = pd.Panel(d)
return p.transpose(1,0,2)
def rolling_corr(x, y, **kwargs):
""" Rolling correlation between columns from x and y. """
def rolling(dataframe, *args, **kwargs):
ret = dataframe.copy()
for col in ret:
ret[col] = rolling_m(ret[col], *args, **kwargs)
return ret
n, k = x.shape
EX = rolling(x, **kwargs)
EY = rolling(y, **kwargs)
EX2 = rolling(x ** 2, **kwargs)
EY2 = rolling(y ** 2, **kwargs)
RXY = np.zeros((n, k, k))
for i, col_x in enumerate(x):
for j, col_y in enumerate(y):
DX = EX2[col_x] - EX[col_x] ** 2
DY = EY2[col_y] - EY[col_y] ** 2
RXY[:, i, j] = | rolling_m(x[col_x] * y[col_y], **kwargs) | pandas.stats.moments.rolling_mean |
import os.path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import pyplot
from pandas.api.types import is_string_dtype
from sklearn import metrics
from sklearn.decomposition import PCA
from sklearn.ensemble import BaggingClassifier, AdaBoostClassifier, RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from dataset_creation import read_data
# Options for pandas -----
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
accuracy = []
precision = []
recall = []
f1 = []
model_names = []
def read_data_from_questionnaire():
# reads data/category_columns_dataset.csv
dataset = pd.read_csv("final_questionnaire.csv",
sep=',',
header=0,
skiprows=0)
print('Dataset size is: ', len(dataset))
print(dataset.head(5))
#Convert "Ναι / Οχι" responses in 0-1
dataset['follow'] = dataset['follow'].apply(lambda x: 0 if x=='Όχι' else 1)
# Convert "Άντρας / Γυναίκα" responses in m-f
dataset['gender'] = dataset['gender'].apply(lambda x: 0 if x == 'Άντρας' else 1)
print(dataset.head(5))
print(dataset.columns)
#Drop name, reasons & category columns
dataset.drop(['name', 'reasons', 'category'], axis='columns', inplace=True)
print(dataset.shape)
#Convert all string values to numeric
for x in dataset.columns:
dataset[x] = pd.to_numeric(dataset[x], errors='coerce')
# Drop rows with age <0 and >100
dataset.drop(dataset[dataset['age'] < 0].index, inplace=True)
dataset.drop(dataset[dataset['age'] > 100].index, inplace=True)
print(dataset.shape)
# Drop examples (if any) that may contain NaN features
# ---------------------------------------------------------------
dataset.dropna(inplace=True)
print(dataset.shape)
#dataset.drop(['age'])
return dataset;
def preprocess_data_from_mongo(df):
for i in df.columns:
if is_string_dtype(df[i]):
df[i] = df[i].map(lambda x: str(x).replace('%',''))
df[i] = df[i].map(lambda x: str(x).replace('--', '0'))
df[i] = df[i].map(lambda x: str(x).replace(',', ''))
# Convert all string values to numeric, except the category column
if i != 'category':
df[i] = pd.to_numeric(df[i], errors='coerce')
# Convert continuous follow_probability to 0-1
df['follow_probability'] = df['follow_probability'].apply(lambda x: 1 if x >= 0.5 else 0)
# Drop examples (if any) that may contain NaN features
# ---------------------------------------------------------------
df.dropna(inplace=True)
#print(df.shape)
#print(df.head(5))
return df
def print_scores(y_true, y_pred, model_name):
print("Results with ",model_name)
print("Accuracy: {:.5f}".format(metrics.accuracy_score(y_true, y_pred)))
print("Precision: {:.5f}".format(metrics.precision_score(y_true, y_pred)))
print("Recall: {:.5f}".format(metrics.recall_score(y_true, y_pred)))
print("F1: {:.5f}".format(metrics.f1_score(y_true, y_pred)))
accuracy.append(metrics.accuracy_score(y_true, y_pred))
precision.append(metrics.precision_score(y_true, y_pred))
recall.append(metrics.recall_score(y_true, y_pred))
f1.append(metrics.f1_score(y_true, y_pred))
model_names.append(model_name)
def plot_results(name):
x = np.arange(len(model_names))
width = 0.2 # the width of the bars
X_axis = np.arange(len(model_names))
plt.bar(X_axis - 0.3, accuracy, width, label='Accuracy', color='red')
plt.bar(X_axis - 0.1, precision, width, label='Precision', color='purple')
plt.bar(X_axis + 0.1, recall, width, label='Recall')
plt.bar(X_axis + 0.3, f1, width, label='F1')
plt.xticks(X_axis, model_names, rotation=45)
# Pad margins so that markers don't get clipped by the axes
plt.margins(0.2)
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Models")
plt.ylabel("Scores")
plt.title("Machine Learning Model Scores")
plt.legend()
plt.savefig(name + '.png')
plt.show()
def fit_predict(x_train, x_test, model):
model.fit(x_train)
y_predicted = model.predict(x_test)
return y_predicted
def make_prediction(dataset, prediction_type):
print(dataset.head(2))
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=2)
# Scale all the data with MinMaxScaler
# ---------------------------------------------------------------
scaler = MinMaxScaler()
x_train_s = scaler.fit_transform(x_train)
x_test_s = scaler.transform(x_test)
if prediction_type == 'mongo_info':
pca = PCA(0.95)
pca.fit(x_train_s)
x_train_final = pca.transform(x_train_s)
x_test_final = pca.transform(x_test_s)
else:
x_train_final = x_train_s
x_test_final = x_test_s
if prediction_type == 'questionnaire_info':
# Naive Bayes
# ---------------------------------------------------------------
mnb = MultinomialNB().fit(x_train, y_train)
y_predicted = mnb.predict(x_test_final)
print_scores(y_test, y_predicted, "MultinomialNB")
print("score on train: ", str(mnb.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
# Logistic Regression
# ---------------------------------------------------------------
grid = {"C": np.logspace(-3, 3, 7), "tol": [1e-2, 1e-3, 1e-4, 1e-5], "penalty": ["l1", "l2"], "solver": ["saga"], "max_iter": [5000]}
lr = GridSearchCV(LogisticRegression(), param_grid=grid)
lr.fit(x_train_final, y_train)
y_predicted = lr.predict(x_test_final)
print_scores(y_test, y_predicted, "Logistic Regression")
print("score on train: ", str(lr.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
# K Neighbors Classifier
# ---------------------------------------------------------------
knn = KNeighborsClassifier(algorithm='brute', n_jobs=-1)
knn.fit(x_train_final, y_train)
y_predicted = knn.predict(x_test_final)
print_scores(y_test, y_predicted, "K Neighbors Classifier")
print("score on train: ", str(knn.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
# Support Vector Machine
# ---------------------------------------------------------------
param_grid = {'C': [0.1, 1, 10, 100, 1000],
'gamma': [1, 0.1, 0.01, 0.001, 0.0001],
'kernel': ['rbf']}
svm = GridSearchCV(SVC(), param_grid)
svm.fit(x_train_final, y_train)
y_predicted = svm.predict(x_test_final)
print_scores(y_test, y_predicted, "Support Vector Machine")
print("score on train: " + str(svm.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
# Decision Tree Classifier
# ---------------------------------------------------------------
clf = DecisionTreeClassifier()
clf.fit(x_train_final, y_train)
y_predicted = clf.predict(x_test_final)
print_scores(y_test, y_predicted, "Decision Tree Classifier")
print("score on train: " + str(clf.score(x_train_final, y_train)), "\n")
print(clf.feature_importances_)
# ---------------------------------------------------------------
# Bagging Decision Tree
# ---------------------------------------------------------------
# max_samples: maximum size 0.5=50% of each sample taken from the full dataset
# max_features: maximum of features 1=100% taken here all 10K
# n_estimators: number of decision trees
bg = BaggingClassifier(DecisionTreeClassifier(), max_samples=0.5, max_features=1.0, n_estimators=10)
bg.fit(x_train_final, y_train)
y_predicted = bg.predict(x_test_final)
print_scores(y_test, y_predicted, "Bagging Decision Tree")
print("score on train: " + str(bg.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
# Boosting Decision Tree
# ---------------------------------------------------------------
adb = AdaBoostClassifier(DecisionTreeClassifier(min_samples_split=10, max_depth=4), n_estimators=10,
learning_rate=0.6)
adb.fit(x_train_final, y_train)
y_predicted = adb.predict(x_test_final)
print_scores(y_test, y_predicted, "Boosting Decision Tree")
print("score on train: " + str(adb.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
# Random Forest Classifier
# ---------------------------------------------------------------
# n_estimators = number of decision trees
rf = RandomForestClassifier(n_estimators=30, max_depth=9)
rf.fit(x_train_final, y_train)
y_predicted = rf.predict(x_test_final)
print_scores(y_test, y_predicted, "Random Forest Classifier")
print("score on train: " + str(rf.score(x_train_final, y_train)), "\n")
# perform Random Forest Build-in importance
importance = rf.feature_importances_
# summarize feature importance
importanceDict = {}
listImp = []
print('Random Forest Build-in importance')
for i, v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i, v))
listImp.append(v)
importanceDict[v] = i
print()
# plot feature importance
pyplot.bar([x for x in range(len(importance))], importance)
pyplot.show()
# ---------------------------------------------------------------
if prediction_type == 'questionnaire_info':
# Voting Classifier
# ---------------------------------------------------------------
# 1) naive bias = mnb
# 2) logistic regression =lr
# 3) random forest =rf
# 4) support vector machine = svm
evc = VotingClassifier(estimators=[('mnb', mnb), ('lr', lr), ('rf', rf), ('svm', svm)], voting='hard')
evc.fit(x_train_final, y_train)
y_predicted = evc.predict(x_test_final)
print_scores(y_test, y_predicted, "Voting Classifier")
print("score on train: " + str(evc.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
if __name__ == '__main__':
#Make prediction on the questionnaire
print("Machine learning methods: Questionnaire")
dataset_quest = read_data_from_questionnaire()
make_prediction(dataset_quest,'questionnaire_info')
plot_results("questionnaire")
accuracy = []
precision = []
recall = []
f1 = []
model_names = []
# Make prediction on the crowdtangle's info
print("Machine learning methods: Crowdtangle")
if os.path.isfile('prediction_info.csv'):
dataset_mongo_updated = | pd.read_csv("prediction_info.csv") | pandas.read_csv |
# Load datasets
import os
import re
import shutil
import json
import pickle
import logging as log
from pathlib import Path
from typing import *
from collections import Counter
from itertools import combinations, groupby
import tqdm
import pandas as pd
import numpy as np
import takco
from takco.link.profile import pfd_prob_pervalue
def progress(*args, **kwargs):
disable = log.getLogger().getEffectiveLevel() >= 30
return tqdm.tqdm(*args, disable=disable, **kwargs)
## Dataset
def get_snow_datasets(snow_rootdir: Path):
root = Path(snow_rootdir).joinpath("datasets").expanduser().absolute()
for d in root.iterdir():
if d.is_dir():
union_path = d.joinpath("union_dedup_json")
input_dataset = takco.evaluate.dataset.WebDataCommons(
fnames=sorted(union_path.glob("*.json"))
)
reference_path = d.joinpath("evaluation/normalised_fd_relations")
output_dataset = takco.evaluate.dataset.WebDataCommons(
fnames=sorted(reference_path.glob("*.json"))
)
yield d.name, (input_dataset, output_dataset)
## Generic functions
def aggr_by_val(items):
aggr = {}
for k, v in items:
aggr.setdefault(v, set()).add(k)
return aggr
def _make_date_regex():
import calendar
ms = [m.lower() for m in calendar.month_name if m]
monthnames = "(?:%s)" % "|".join([s for m in ms for s in set([m, m[:3]])])
dns = [f for i in range(1, 32) for f in set([str(i), f"{i:02d}"])]
daynrs = "(%s)" % "|".join(dns)
mns = [f for i in range(1, 13) for f in set([str(i), f"{i:02d}"])]
monthnrs = "(%s)" % "|".join(mns)
regexes = [
r"(?:\d{4}[/\-\.\s])?%s[/\-\.\s]%s(?:[/\-\.\s]\d{4})?\b" % x
for m in [monthnrs, monthnames]
for d in [daynrs, daynrs + "(?:th|nd)?"]
for x in [(m, d), (d, m)]
] + [
r"\d{4}\b",
monthnames,
] # year
return "|".join("(?:%s)" % r for r in regexes)
def looks_date(series, threshold=0.75):
series = series.replace("", np.nan).dropna().astype("str")
# First match simple numbers
if series.str.match("-?\d+[,.]\d+").mean() > threshold:
return False
rex = _make_date_regex()
flags = re.IGNORECASE
return series.str.match(rex, flags=flags).mean() > threshold
def looks_duration(series, threshold=0.75):
series = series.replace("", np.nan).dropna().astype("str")
return series.str.match("(?:\d+:)+\d+").mean() > threshold
def looks_numeric(series, threshold=0.75):
series = series.replace("", np.nan).dropna().astype("str")
if looks_date(series, threshold=threshold) or looks_duration(series):
return False # date
return (series.str.count("[\d\.\-%]") / series.str.len()).mean() > threshold
def looks_longtext(series, threshold=30):
return series.str.len().mean() > threshold
def get_context_headers(headers: Sequence[Sequence[str]]):
p = ["page title", "table heading", "disambiguation of", "uri"]
return [j for j, h in enumerate(headers) for i in p for c in h if c.startswith(i)]
def get_singleton_cols(df):
return list(df.columns[df.describe().T["unique"] == 1])
def get_longtext_cols(df, threshold=30):
return [
n
for n, (_, c) in enumerate(df.iteritems())
if looks_longtext(c, threshold=threshold)
]
def guess_numeric_cols(df, threshold=0.75):
return [
n
for n, (_, c) in enumerate(df.iteritems())
if looks_numeric(c, threshold=threshold)
]
def guess_date_cols(df, threshold=0.75):
return [
n
for n, (_, c) in enumerate(df.iteritems())
if looks_date(c, threshold=threshold)
]
## KB matching
class KB:
def __init__(self, snow_rootdir: Path, **kwargs):
# Make KB features
root = Path(snow_rootdir).expanduser().absolute()
kb_fnames = list(root.joinpath("knowledgebase/tables/").glob("*.csv"))
kb_text = {}
for fname in progress(kb_fnames, desc="Loading KB classes"):
name = fname.name.split(".")[0]
vals = pd.read_csv(fname, usecols=[1], skiprows=4, header=None, nrows=None)
kb_text[name] = vals[1]
from sklearn.feature_extraction.text import TfidfVectorizer
self.kb_vectorizer = TfidfVectorizer(analyzer=self._analyze, **kwargs)
self.K = self.kb_vectorizer.fit_transform(kb_text.values())
log.debug("Made KB feature matrix of shape %s", self.K.shape)
self.classes = list(kb_text)
@staticmethod
def _analyze(xs):
"""Extract full-cell and BoW features"""
return [t for x in xs if x for t in ([x.lower()] + x.lower().split()) if t]
def _get_query(self, df):
ok_cols = [
ci
for ci, (_, col) in enumerate(df.iteritems())
if (
(not looks_numeric(col, 0.5))
and (not looks_longtext(col))
and (not looks_date(col, 0.5))
and col.notna().mean() > 0.5
)
]
if not ok_cols:
return [], None
qtexts = [df.iloc[:, ci] for ci in ok_cols]
Q = self.kb_vectorizer.transform(qtexts)
return ok_cols, Q
def get_sim(self, df):
ok_cols, Q = self._get_query(df)
if not ok_cols:
return None
simmat = self.K.dot(Q.T).todense()
sim = pd.DataFrame(simmat, index=self.classes, columns=ok_cols)
# weight similarities by log-frac of matching cells in column
sim *= np.log1p(np.array((Q > 0).sum(axis=1)).T[0]) / np.log1p(len(df))
# also weight by fraction unique
sim *= df.describe().T.reset_index().unique.astype("float") / len(df)
return sim
def predict_classes(self, df, threshold=0.01):
"""Predict KB classes for short, non-numeric columns"""
sim = self.get_sim(df)
if sim is None:
return {}
preds = pd.DataFrame({"class": sim.idxmax(), "score": sim.max()})
return preds[preds.score > threshold].to_dict("index")
## Foreign Keys
class ForeignKeyTracker:
def __init__(self, dataset_name):
self.dataset_name = dataset_name
self.class_value_fk = {}
self.class_nfds = Counter()
def split_fk(self, df, columns, fkcolnr, fkclass):
value_fk = self.class_value_fk.setdefault(fkclass, {})
prefix = f"{fkclass}_{self.dataset_name}"
fks = [
f"{prefix}~Row{value_fk.setdefault(v, len(value_fk))}"
for v in df.iloc[:, fkcolnr]
]
df = df.fillna("")
df[fkcolnr] = fks
columns = list(columns)
columns[fkcolnr] = ("FK",)
return df, columns
def iter_fk_tables(self):
for fkclass, value_fk in self.class_value_fk.items():
body = [
(f"{fkclass}_{self.dataset_name}~Row{i}", val)
for val, i in value_fk.items()
]
head = [("PK", "rdf-schema#label")]
name = f"{fkclass}_{self.dataset_name}.json"
yield takco.Table(head=head, body=body, _id=name)
def decompose_fd_tables(
self, df, keys, fkclass: str, header: pd.Series = None, exclude_cols=None
):
for c in df.columns:
if exclude_cols and (c in exclude_cols):
continue
if (c not in keys) and (len(set(df[c])) > 1):
fd_df = df[[c] + list(keys)]
# get filled unique rows
filled_mask = fd_df[[c]].fillna(False).applymap(bool).any(axis=1)
filled_mask &= fd_df[list(keys)].fillna(False).applymap(bool).all(axis=1)
fd_df = fd_df[filled_mask].drop_duplicates(ignore_index=True)
if header is not None:
head = list(zip(*header[[c] + list(keys)]))
else:
head = list(zip(*fd_df.columns))
# Make name based on fkclass count
nfd = self.class_nfds[fkclass]
_id = f"{fkclass}_{self.dataset_name}_fd_{nfd}.json"
self.class_nfds[fkclass] += 1
yield takco.Table(head=head, body=fd_df.values, _id=_id)
## Matching and stitching
class TfidfMatcher:
curr = r"(?:\$|kr|\€|usd|chf|\£|\¥|\₹|s\$|hk\$|nt\$|tl|р|aed)"
re_money = re.compile(f"(?:{curr}[\d\s,\.]+)|(?:[\d\s,\.]+{curr})|free|gratis")
def __init__(self, name=None, num_threshold=0.5, **kwargs):
self.num_threshold = num_threshold
kwargs["analyzer"] = self.__class__._analyzer
self.tfidf_kwargs = kwargs
self.name = name or self.__class__.__name__
@staticmethod
def iter_text(tabid_df, num_threshold=0.5):
for tabid, df in progress(tabid_df.items(), desc="Getting column text"):
columns = list(df.columns)
numeric_cis = set(guess_numeric_cols(df, threshold=num_threshold))
date_cis = set(guess_date_cols(df, threshold=num_threshold))
literal_cis = numeric_cis | date_cis
context_cis = set(get_context_headers(columns))
singleton_cis = set(get_singleton_cols(df))
longtext_cis = set(get_longtext_cols(df))
for colnr, c in enumerate(df):
text = []
# Only cluster non-numeric, short, non-singleton-context columns
bad_colnrs = literal_cis | longtext_cis | (singleton_cis & context_cis)
if colnr not in bad_colnrs:
text = df.iloc[:, colnr]
yield tabid, colnr, text
@classmethod
def _analyzer_tokens(cls, value):
value = cls.re_money.sub(" $MONEY$ ", value)
value = re.sub("\d", "$", value)
yield from value.split()
@classmethod
def _analyzer(cls, values):
return [t for v in values if v for t in cls._analyzer_tokens(v) if t]
def _get_index_and_features(self, tabid_df, tabid_and_colnr_to_colid):
from sklearn.feature_extraction.text import TfidfVectorizer
tabids, colnrs, texts = zip(
*self.iter_text(tabid_df, num_threshold=self.num_threshold)
)
colids = [tabid_and_colnr_to_colid[tc] for tc in zip(tabids, colnrs)]
index = pd.MultiIndex.from_tuples(zip(colids, tabids))
texts = progress(texts, desc="Extracting features")
D = TfidfVectorizer(**self.tfidf_kwargs).fit_transform(texts)
return index, D
def match(self, tabid_df, tabid_and_colnr_to_colid):
index, D = self._get_index_and_features(tabid_df, tabid_and_colnr_to_colid)
log.debug("Got %s column features. Calculating similarities...", D.shape)
simmat = pd.DataFrame(D.dot(D.T).todense(), index=index, columns=index)
simseries = simmat.stack().stack()
simseries.index.names = ("ci1", "ti1", "ti2", "ci2")
return simseries.rename(self.name)
class ExactHeadMatcher:
def __init__(self, name=None, include_context=True):
self.include_context = include_context
self.name = name or self.__class__.__name__
def match(self, tabid_df, tabid_and_colnr_to_colid):
D = pd.DataFrame(
[
(ti, ci, " ".join(tabid_df[ti].columns[cn]))
for (ti, cn), ci in tabid_and_colnr_to_colid.items()
for context in [set(get_context_headers(tabid_df[ti].columns))]
if self.include_context or (cn not in context)
],
columns=["ti", "ci", "h"],
)
hsim = D.merge(D, on="h", suffixes=("1", "2")).set_index(
["ci1", "ti1", "ti2", "ci2"]
)
return hsim.assign(h=1).h.rename(self.name)
class KBClassMatcher:
def __init__(self, kb, name=None, include_context=True, pred_max_threshold = 0.5):
self.kb = kb
self.include_context = include_context
self.name = name or self.__class__.__name__
self.pred_max_threshold = pred_max_threshold
def match(self, tabid_df, tabid_and_colnr_to_colid):
tabid_fkpreds = {}
for tabid, df in progress(tabid_df.items(), f'Getting {self.name} candidates'):
pred = self.kb.get_sim(df).agg(['idxmax', 'max']).T
pred['max'] /= pred['max'].max()
pred = pred[ pred['max'] > self.pred_max_threshold ]
tabid_fkpreds[tabid] = pred.values
# tabid_fkpreds[tabid] = pred.to_dict(orient='index')
# fkclass_candidates = []
# for tabid, fkpreds in tabid_fkpreds.items():
# context = set(get_context_headers(df.columns))
# for cn, fkpred in fkpreds.items():
# fkclass, score = fkpred['idxmax'], fkpred['max']
# if self.include_context or (cn not in context):
# ci = tabid_and_colnr_to_colid.get((tabid, cn))
# if ci is not None:
# cands = (tabid, ci, fkclass, score)
# fkclass_candidates.append( cands )
fkclass_candidates = []
for (ti, cn), ci in tabid_and_colnr_to_colid.items():
if ti in tabid_fkpreds:
context = set(get_context_headers(tabid_df[ti].columns))
if self.include_context or (cn not in context):
for fkclass, score in tabid_fkpreds.get(tabid, []):
cands = (ti, ci, fkclass, score)
fkclass_candidates.append( cands )
D = | pd.DataFrame(fkclass_candidates, columns=["ti", "ci", "fkclass", "score"]) | pandas.DataFrame |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import inspect
import numpy as np
import pandas as pd
import pyspark
import databricks.koalas as ks
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.indexes import MissingPandasLikeIndex, MissingPandasLikeMultiIndex
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class IndexesTest(ReusedSQLTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0],},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf(self):
return ks.from_pandas(self.pdf)
def test_index(self):
for pdf in [
pd.DataFrame(np.random.randn(10, 5), index=list("abcdefghij")),
pd.DataFrame(
np.random.randn(10, 5), index=pd.date_range("2011-01-01", freq="D", periods=10)
),
pd.DataFrame(np.random.randn(10, 5), columns=list("abcde")).set_index(["a", "b"]),
]:
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index, pdf.index)
def test_index_getattr(self):
kidx = self.kdf.index
item = "databricks"
expected_error_message = "'Index' object has no attribute '{}'".format(item)
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_multi_index_getattr(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
kidx = kdf.index
item = "databricks"
expected_error_message = "'MultiIndex' object has no attribute '{}'".format(item)
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_to_series(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a")))
# With name
pidx.name = "Koalas"
kidx.name = "Koalas"
self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series()))
self.assert_eq(repr(kidx.to_series(name=("x", "a"))), repr(pidx.to_series(name=("x", "a"))))
# With tupled name
pidx.name = ("x", "a")
kidx.name = ("x", "a")
self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series()))
self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a")))
self.assert_eq((kidx + 1).to_series(), (pidx + 1).to_series())
pidx = self.pdf.set_index("b", append=True).index
kidx = self.kdf.set_index("b", append=True).index
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name="a"), pidx.to_series(name="a"))
def test_to_frame(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
pidx.name = "a"
kidx.name = "a"
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
self.assert_eq(repr(kidx.to_frame(name="x")), repr(pidx.to_frame(name="x")))
self.assert_eq(
repr(kidx.to_frame(index=False, name="x")),
repr(pidx.to_frame(index=False, name="x")),
)
pidx = self.pdf.set_index("b", append=True).index
kidx = self.kdf.set_index("b", append=True).index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
self.assert_eq(
repr(kidx.to_frame(name=["x", "y"])), repr(pidx.to_frame(name=["x", "y"]))
)
self.assert_eq(
repr(kidx.to_frame(index=False, name=["x", "y"])),
repr(pidx.to_frame(index=False, name=["x", "y"])),
)
def test_index_names(self):
kdf = self.kdf
self.assertIsNone(kdf.index.name)
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
pdf = pd.DataFrame(np.random.randn(10, 5), index=idx, columns=list("abcde"))
kdf = ks.from_pandas(pdf)
pser = pdf.a
kser = kdf.a
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.name = "renamed"
kidx.name = "renamed"
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
self.assertEqual(kser.index.names, pser.index.names)
pidx.name = None
kidx.name = None
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
self.assertEqual(kser.index.names, pser.index.names)
with self.assertRaisesRegex(ValueError, "Names must be a list-like"):
kidx.names = "hi"
expected_error_message = "Length of new names must be {}, got {}".format(
len(kdf._internal.index_map), len(["0", "1"])
)
with self.assertRaisesRegex(ValueError, expected_error_message):
kidx.names = ["0", "1"]
def test_multi_index_names(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.names = ["renamed_number", "renamed_color"]
kidx.names = ["renamed_number", "renamed_color"]
self.assertEqual(kidx.names, pidx.names)
pidx.names = ["renamed_number", None]
kidx.names = ["renamed_number", None]
self.assertEqual(kidx.names, pidx.names)
if LooseVersion(pyspark.__version__) < LooseVersion("2.4"):
# PySpark < 2.4 does not support struct type with arrow enabled.
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
self.assert_eq(kidx, pidx)
else:
self.assert_eq(kidx, pidx)
with self.assertRaises(PandasNotImplementedError):
kidx.name
with self.assertRaises(PandasNotImplementedError):
kidx.name = "renamed"
def test_index_rename(self):
pdf = pd.DataFrame(
np.random.randn(10, 5), index=pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
)
kdf = ks.from_pandas(pdf)
pidx = pdf.index
kidx = kdf.index
self.assert_eq(kidx.rename("y"), pidx.rename("y"))
self.assert_eq(kdf.index.names, pdf.index.names)
kidx.rename("z", inplace=True)
pidx.rename("z", inplace=True)
self.assert_eq(kidx, pidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kidx.rename(None), pidx.rename(None))
self.assert_eq(kdf.index.names, pdf.index.names)
def test_multi_index_rename(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
pmidx = pdf.index
kmidx = kdf.index
self.assert_eq(kmidx.rename(["n", "c"]), pmidx.rename(["n", "c"]))
self.assert_eq(kdf.index.names, pdf.index.names)
kmidx.rename(["num", "col"], inplace=True)
pmidx.rename(["num", "col"], inplace=True)
self.assert_eq(kmidx, pmidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kmidx.rename([None, None]), pmidx.rename([None, None]))
self.assert_eq(kdf.index.names, pdf.index.names)
self.assertRaises(TypeError, lambda: kmidx.rename("number"))
self.assertRaises(ValueError, lambda: kmidx.rename(["number"]))
def test_multi_index_levshape(self):
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2)])
kidx = ks.from_pandas(pidx)
self.assertEqual(pidx.levshape, kidx.levshape)
def test_index_unique(self):
kidx = self.kdf.index
# here the output is different than pandas in terms of order
expected = [0, 1, 3, 5, 6, 8, 9]
self.assert_eq(expected, sorted(kidx.unique().to_pandas()))
self.assert_eq(expected, sorted(kidx.unique(level=0).to_pandas()))
expected = [1, 2, 4, 6, 7, 9, 10]
self.assert_eq(expected, sorted((kidx + 1).unique().to_pandas()))
with self.assertRaisesRegex(IndexError, "Too many levels*"):
kidx.unique(level=1)
with self.assertRaisesRegex(KeyError, "Requested level (hi)*"):
kidx.unique(level="hi")
def test_multi_index_copy(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index.copy(), pdf.index.copy())
def test_drop_duplicates(self):
pidx = pd.Index([4, 2, 4, 1, 4, 3])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.drop_duplicates().sort_values(), pidx.drop_duplicates().sort_values())
self.assert_eq(
(kidx + 1).drop_duplicates().sort_values(), (pidx + 1).drop_duplicates().sort_values()
)
def test_dropna(self):
pidx = pd.Index([np.nan, 2, 4, 1, np.nan, 3])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.dropna(), pidx.dropna())
self.assert_eq((kidx + 1).dropna(), (pidx + 1).dropna())
def test_index_symmetric_difference(self):
pidx1 = pd.Index([1, 2, 3, 4])
pidx2 = pd.Index([2, 3, 4, 5])
kidx1 = ks.from_pandas(pidx1)
kidx2 = ks.from_pandas(pidx2)
self.assert_eq(
kidx1.symmetric_difference(kidx2).sort_values(),
pidx1.symmetric_difference(pidx2).sort_values(),
)
self.assert_eq(
(kidx1 + 1).symmetric_difference(kidx2).sort_values(),
(pidx1 + 1).symmetric_difference(pidx2).sort_values(),
)
pmidx1 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
pmidx2 = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
kmidx1 = ks.from_pandas(pmidx1)
kmidx2 = ks.from_pandas(pmidx2)
self.assert_eq(
kmidx1.symmetric_difference(kmidx2).sort_values(),
pmidx1.symmetric_difference(pmidx2).sort_values(),
)
idx = ks.Index(["a", "b", "c"])
midx = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
with self.assertRaisesRegex(NotImplementedError, "Doesn't support*"):
idx.symmetric_difference(midx)
def test_multi_index_symmetric_difference(self):
idx = ks.Index(["a", "b", "c"])
midx = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
midx_ = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
self.assert_eq(
midx.symmetric_difference(midx_),
midx.to_pandas().symmetric_difference(midx_.to_pandas()),
)
with self.assertRaisesRegex(NotImplementedError, "Doesn't support*"):
midx.symmetric_difference(idx)
def test_missing(self):
kdf = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
# Index functions
missing_functions = inspect.getmembers(MissingPandasLikeIndex, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index("a").index, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index("a").index, name)()
# MultiIndex functions
missing_functions = inspect.getmembers(MissingPandasLikeMultiIndex, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index(["a", "b"]).index, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index(["a", "b"]).index, name)()
# Index properties
missing_properties = inspect.getmembers(
MissingPandasLikeIndex, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index("a").index, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index("a").index, name)
# MultiIndex properties
missing_properties = inspect.getmembers(
MissingPandasLikeMultiIndex, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index(["a", "b"]).index, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index(["a", "b"]).index, name)
def test_index_has_duplicates(self):
indexes = [("a", "b", "c"), ("a", "a", "c"), (1, 3, 3), (1, 2, 3)]
names = [None, "ks", "ks", None]
has_dup = [False, True, True, False]
for idx, name, expected in zip(indexes, names, has_dup):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(idx, name=name))
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.has_duplicates, expected)
def test_multiindex_has_duplicates(self):
indexes = [
[list("abc"), list("edf")],
[list("aac"), list("edf")],
[list("aac"), list("eef")],
[[1, 4, 4], [4, 6, 6]],
]
has_dup = [False, False, True, True]
for idx, expected in zip(indexes, has_dup):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.has_duplicates, expected)
def test_multi_index_not_supported(self):
kdf = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
with self.assertRaisesRegex(TypeError, "cannot perform any with this index type"):
kdf.set_index(["a", "b"]).index.any()
with self.assertRaisesRegex(TypeError, "cannot perform all with this index type"):
kdf.set_index(["a", "b"]).index.all()
def test_index_nlevels(self):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(["a", "b", "c"]))
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.nlevels, 1)
def test_multiindex_nlevel(self):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=[list("abc"), list("def")])
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.nlevels, 2)
def test_multiindex_from_arrays(self):
arrays = [["a", "a", "b", "b"], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays)
kidx = ks.MultiIndex.from_arrays(arrays)
self.assert_eq(pidx, kidx)
def test_multiindex_swaplevel(self):
pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]], names=["word", "number"])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]], names=["word", None])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.swaplevel(-2, -1), kidx.swaplevel(-2, -1))
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
self.assert_eq(pidx.swaplevel("word", 1), kidx.swaplevel("word", 1))
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(-3, "word")
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(0, 2)
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(0, -3)
with self.assertRaisesRegex(KeyError, "Level work not found"):
kidx.swaplevel(0, "work")
def test_multiindex_droplevel(self):
pidx = pd.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2)], names=["level1", "level2", "level3"]
)
kidx = ks.from_pandas(pidx)
with self.assertRaisesRegex(IndexError, "Too many levels: Index has only 3 levels, not 5"):
kidx.droplevel(4)
with self.assertRaisesRegex(KeyError, "Level level4 not found"):
kidx.droplevel("level4")
with self.assertRaisesRegex(KeyError, "Level.*level3.*level4.*not found"):
kidx.droplevel([("level3", "level4")])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 4 levels from an index with 3 levels: at least one "
"level must be left.",
):
kidx.droplevel([0, 0, 1, 2])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 3 levels from an index with 3 levels: at least one "
"level must be left.",
):
kidx.droplevel([0, 1, 2])
self.assert_eq(pidx.droplevel(0), kidx.droplevel(0))
self.assert_eq(pidx.droplevel([0, 1]), kidx.droplevel([0, 1]))
self.assert_eq(pidx.droplevel([0, "level2"]), kidx.droplevel([0, "level2"]))
def test_index_fillna(self):
pidx = pd.Index([1, 2, None])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.fillna(0), kidx.fillna(0))
self.assert_eq(pidx.rename("name").fillna(0), kidx.rename("name").fillna(0))
with self.assertRaisesRegex(TypeError, "Unsupported type <class 'list'>"):
kidx.fillna([1, 2])
def test_index_drop(self):
pidx = pd.Index([1, 2, 3])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop(1), kidx.drop(1))
self.assert_eq(pidx.drop([1, 2]), kidx.drop([1, 2]))
def test_multiindex_drop(self):
pidx = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z")], names=["level1", "level2"]
)
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop("a"), kidx.drop("a"))
self.assert_eq(pidx.drop(["a", "b"]), kidx.drop(["a", "b"]))
self.assert_eq(pidx.drop(["x", "y"], level=1), kidx.drop(["x", "y"], level=1))
self.assert_eq(pidx.drop(["x", "y"], level="level2"), kidx.drop(["x", "y"], level="level2"))
pidx.names = ["lv1", "lv2"]
kidx.names = ["lv1", "lv2"]
self.assert_eq(pidx.drop(["x", "y"], level="lv2"), kidx.drop(["x", "y"], level="lv2"))
self.assertRaises(IndexError, lambda: kidx.drop(["a", "b"], level=2))
self.assertRaises(KeyError, lambda: kidx.drop(["a", "b"], level="level"))
kidx.names = ["lv", "lv"]
self.assertRaises(ValueError, lambda: kidx.drop(["x", "y"], level="lv"))
def test_sort_values(self):
pidx = pd.Index([-10, -100, 200, 100])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
pidx.name = "koalas"
kidx.name = "koalas"
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
pidx = | pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) | pandas.MultiIndex.from_tuples |
from __future__ import annotations
import numpy as np
import pandas as pd
from lamarck.utils import objective_ascending_map
def rank_formatter(name):
def deco(rank_func):
def wrapper(obj, *a, **kw):
return rank_func(obj, *a, **kw).astype(int).rename(name)
return wrapper
return deco
class RankCalculator:
"""
Fitness calculations based on the simulation results.
"""
results: pd.DataFrame
out: list | str
def __init__(self, results_df: pd.DataFrame = pd.DataFrame(), out: list | str = ''):
self.update(results_df=results_df, out=out)
def update(self, results_df: pd.DataFrame = None, out: list | str = None) -> None:
if results_df is not None:
self.results = results_df.copy()
if out is not None:
self.out = out
@rank_formatter('Rank')
def single(self, objective: str) -> pd.Series:
"""
Ranks one `output` to optimize according to a defined `objective`.
"""
return self.results[self.out]\
.rank(method='min', ascending=objective_ascending_map[objective])
@rank_formatter('Rank')
def ranked(self, objectives: list[str]) -> pd.Series:
"""
Get the Gene Ranks based on a set of `outputs` and `objectives` in order of priority.
"""
ranks = [
self.results[priority].rank(method='min',
ascending=objective_ascending_map[objective])
for priority, objective in zip(self.out, objectives)]
rank = ranks[-1]
for r in ranks[::-1]:
order = int(np.log10(r.max())) + 1
factor = 10**order
rscore = r * factor + rank
rank = rscore.rank(method='min')
return rank
@rank_formatter('Rank')
def pareto(self, objectives: list[str]) -> pd.Series:
"""
Get the Pareto Ranks based on the `pareto fronts` and the `crowds` Series.
"""
fronts = self.pareto_fronts(objectives)
crowds = self.pareto_crowds(fronts)
r1 = fronts.rank(method='dense', ascending=True)
r2 = crowds.rank(method='dense', ascending=False)
order1 = int(np.log10(r2.max())) + 1
factor1 = 10**order1
return (r1 * factor1 + r2).rank(method='min')
def pareto_fronts(self, objectives: list[str]) -> pd.Series:
"""
Get the Pareto Fronts.
"""
norm_df = normalize_df_by_objective(self.results, self.out, objectives)
dominators = get_dominators(norm_df)
return get_fronts(dominators).rename('Front')
def pareto_crowds(self, fronts: pd.Series) -> pd.Series:
"""
Get the Pareto Crowds.
"""
frontvals = sorted(fronts.unique())
crowds = pd.Series(np.zeros(len(self.results[self.out])), index=self.results.index)
for front in frontvals:
f = fronts == front
crowds[f] = get_crowd(self.results[f])
return crowds.rename('Crowd')
def normalize_series_by_objective(series, objective):
maxval = series.max()
minval = series.min()
data_range = maxval - minval
abs_series = series - minval
if objective == 'max':
norm_series = abs_series/data_range
elif objective == 'min':
norm_series = 1 - abs_series/data_range
return norm_series
def normalize_df_by_objective(df, outputs, objectives):
data_dict = {
output: normalize_series_by_objective(df[output], objective)
for output, objective in zip(outputs, objectives)
}
return | pd.DataFrame(data_dict, index=df.index) | pandas.DataFrame |
#!/usr/bin/env python
from pandas.io.formats.format import SeriesFormatter
from Bio.SeqUtils import seq1
from Bio import SeqIO
import pandas as pd
import argparse
from pathlib import Path
import numpy as np
from summarise_snpeff import parse_vcf, write_vcf
import csv
import re
from functools import reduce
from bindingcalculator import BindingCalculator
from itertools import takewhile
def get_contextual_bindingcalc_values(residues_list,binding_calculator, option, bindingcalc_data = None):
if option == "res_ret_esc":
residues_df = residues_list.copy()
res_ret_esc_df = binding_calculator.escape_per_site(residues_df.loc[(residues_df["Gene_Name"] == "S") & (residues_df["respos"] >= 331) & (residues_df["respos"] <= 531) & (residues_df["respos"].isin(bindingcalc_data["site"].unique())), "respos"])
res_ret_esc_df["Gene_Name"] = "S"
res_ret_esc_df.rename(columns = {"retained_escape" : "BEC_RES"}, inplace = True)
residues_df = residues_df.merge(res_ret_esc_df[["site", "BEC_RES", "Gene_Name"]], left_on = ["Gene_Name", "respos"], right_on = ["Gene_Name", "site"],how = "left")
residues_df.drop(axis = 1 , columns = ["site"], inplace = True)
return(residues_df)
else:
ab_escape_fraction = 1 - binding_calculator.binding_retained(residues_list)
return(ab_escape_fraction)
def summarise_score(summary_df, metric):
#assumes grouping by sample_id and summarising for each sample
summary_df_info = summary_df.groupby("sample_id").agg({metric: ['sum', 'min', 'max']})
summary_df_info.columns = summary_df_info.columns.droplevel(0)
summary_df_info = summary_df_info.reset_index()
summary_df_info = summary_df_info.rename_axis(None, axis=1)
summary_df_mins = pd.merge(left = summary_df, right = summary_df_info[["sample_id", "min"]], left_on = ["sample_id", metric], right_on = ["sample_id", "min"])
summary_df_mins[metric + "_min"] = summary_df_mins["residues"] + ":" + summary_df_mins[metric].fillna("").astype(str)
summary_df_mins = summary_df_mins[["sample_id",metric + "_min"]].groupby("sample_id").agg({metric + "_min" : lambda x : list(x)})
summary_df_mins[metric + "_min"] = summary_df_mins[metric + "_min"].str.join(",")
summary_df_max = pd.merge(left = summary_df, right = summary_df_info[["sample_id", "max"]], left_on = ["sample_id", metric], right_on = ["sample_id", "max"])
summary_df_max[metric + "_max"] = summary_df_max["residues"] + ":" + summary_df_max[metric].fillna("").astype(str)
summary_df_max = summary_df_max[["sample_id",metric + "_max"]].groupby("sample_id").agg({metric + "_max" : lambda x : list(x)})
summary_df_max[metric + "_max"] = summary_df_max[metric + "_max"].str.join(",")
summary_df_sum = summary_df.groupby("sample_id").agg({metric: sum})
summary_df_sum.columns = [metric + "_sum"]
summary_df_final = summary_df_sum.merge(summary_df_max,on='sample_id').merge(summary_df_mins,on='sample_id')
return(summary_df_final)
def sample_header_format(item,sample,vcf,filtered,vcf_loc):
if vcf == True:
if item.startswith("##bcftools_mergeCommand=merge"):
if filtered:
item = re.sub(r'(?<=merged\.vcf )[a-zA-Z0-9_\. \/]+(?=;)', vcf_loc, item)
else:
item = re.sub(r'(?<=merged\.vcf )[a-zA-Z0-9_\. \/]+(?=;)', vcf_loc, item)
else:
if item.startswith("##reference="):
item = re.sub(r'(?<=muscle\/)[a-zA-Z0-9_\.\/]+(?=\.fasta)', f'{sample}', item)
if item.startswith("##source="):
item = re.sub(r'(?<=muscle\/)[a-zA-Z0-9_\.]+(?=\.fasta)', f'{sample}', item)
item = re.sub(r'(?<=fatovcf\/)[a-zA-Z0-9_\.]+(?=\.vcf)', f'{sample}', item)
if item.startswith("##bcftools_mergeCommand=merge"):
if filtered:
item = re.sub(r'(?<=merged\.vcf )[a-zA-Z0-9_\. \/]+(?=;)', vcf_loc, item)
else:
item = re.sub(r'(?<=merged\.vcf )[a-zA-Z0-9_\. \/]+(?=;)', vcf_loc, item)
return(item)
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('input_vcf', metavar='anno_concat.tsv', type=str,
help='Concatenated SPEAR anno file')
parser.add_argument('output_dir', metavar='spear_vcfs/', type=str,
help='Destination dir for summary tsv files')
parser.add_argument('data_dir', metavar='data/', type=str,
help='Data dir for binding calculator data files')
parser.add_argument('input_header', metavar='merged.vcf', type=str,
help='Merged VCF file for header retrieval')
parser.add_argument('sample_list', metavar='', nargs="+",
help='list of inputs to detect no variant samples ')
parser.add_argument('--is_vcf_input', default="False", type=str,
help = "Set input file type to VCF")
parser.add_argument('--is_filtered', default="False", type=str,
help = "Specify files come from filtered directory")
args = parser.parse_args()
Path(f'{args.output_dir}/per_sample_annotation').mkdir(parents=True, exist_ok=True)
if args.is_vcf_input == True:
if args.is_filtered:
infiles = f'{args.output_dir}/intermediate_output/masked/*.masked.vcf'
else:
infiles = f'{args.output_dir}/intermediate_output/indels/*.indels.vcf'
else:
infiles = f'{args.output_dir}/intermediate_output/indels/*.indels.vcf'
with open(args.input_header, 'r') as fobj:
headiter = takewhile(lambda s: s.startswith('#'), fobj)
merged_header = | pd.Series(headiter) | pandas.Series |
import os
import logging
import json
import collections
import yaml
import pandas as pd
import graphviz as gv
# import numpy as np
# from matplotlib import pyplot as plt
from kinemparse import assembly as lib_asm
from mathtools import utils
from seqtools import fstutils_openfst as fstutils
import pywrapfst as libfst
logger = logging.getLogger(__name__)
def convert_labels(labels):
new_labels = tuple(gen_labels(labels))
def gen_filled_labels(labels):
for start, end, action, arg1 in labels:
if arg1.startswith('leg'):
yield start, end, action, arg1, 'table top 1'
elif arg1.startswith('shelf'):
yield start, end, action, arg1, 'leg 1'
yield start, end, action, arg1, 'leg 2'
yield start, end, action, arg1, 'leg 3'
yield start, end, action, arg1, 'leg 4'
elif arg1.startswith('side panel'):
all_args = tuple(label[3] for label in labels)
if 'back panel 1' in all_args and 'front panel 1' not in all_args:
yield start, end, action, arg1, 'front panel 1'
elif 'front panel 1' in all_args and 'back panel 1' not in all_args:
yield start, end, action, arg1, 'back panel 1'
else:
warn_str = f"Can't guess arg2 for side panel: {all_args}"
raise AssertionError(warn_str)
elif arg1.startswith('bottom panel'):
yield start, end, action, arg1, 'side panel 1'
yield start, end, action, arg1, 'side panel 2'
yield start, end, action, arg1, 'front panel 1'
elif arg1.startswith('pin'):
yield start, end, action, arg1, '??? FIXME'
elif arg1.startswith('front panel') or arg1.startswith('back panel'):
yield start, end, action, arg1, 'side panel 1'
yield start, end, action, arg1, 'side panel 2'
yield start, end, action, arg1, 'bottom panel 1'
new_new_labels = pd.DataFrame(
tuple(gen_filled_labels(new_labels)),
columns=('start', 'end', 'action', 'arg1', 'arg2')
)
return new_new_labels
def gen_labels(labels):
event_starts = {}
def get_event_bounds(part_name, start_index, end_index):
if part_name in event_starts:
start_index = event_starts[part_name]
del event_starts[part_name]
else:
warn_str = f" No start action for {part_name}"
logger.warning(warn_str)
start_index = start_index
return (start_index, end_index)
part_names = collections.defaultdict(list)
def get_part_name(base_name):
part_num = len(part_names[base_name]) + 1
part_name = f"{base_name} {part_num}"
part_names[base_name].append(part_name)
return part_name
for row in labels.itertuples(index=False):
label = row.label
i_start = row.start
i_end = row.end
if label.startswith('pick up'):
part_name = label.split('pick up ')[1]
if part_name in event_starts:
warn_str = f" Repeated pick up action: {label}"
logger.warning(warn_str)
event_starts[part_name] = i_start
elif label.startswith('lay down'):
part_name = label.split('lay down ')[1]
if part_name not in event_starts:
warn_str = f" No pick up action before {label}"
logger.warning(warn_str)
continue
del event_starts[part_name]
elif label == 'spin leg':
base_name = 'leg'
start_index, end_index = get_event_bounds(base_name, i_start, i_end)
part_name = get_part_name(base_name)
yield (start_index, end_index, 'attach', part_name)
elif label == 'attach shelf to table':
base_name = 'shelf'
start_index, end_index = get_event_bounds(base_name, i_start, i_end)
part_name = get_part_name(base_name)
yield (start_index, end_index, 'attach', part_name)
elif label.startswith('attach drawer'):
base_name = label.split('attach drawer ')[1]
start_index, end_index = get_event_bounds(base_name, i_start, i_end)
part_name = get_part_name(base_name)
yield (start_index, end_index, 'attach', part_name)
elif label == 'attach drawer back panel':
base_name = 'back panel'
start_index, end_index = get_event_bounds(base_name, i_start, i_end)
part_name = get_part_name(base_name)
yield (start_index, end_index, 'attach', part_name)
elif label == 'slide bottom of drawer':
base_name = 'bottom panel'
start_index, end_index = get_event_bounds(base_name, i_start, i_end)
part_name = get_part_name(base_name)
yield (start_index, end_index, 'attach', part_name)
elif label == 'insert drawer pin':
base_name = 'pin'
start_index, end_index = get_event_bounds(base_name, i_start, i_end)
part_name = get_part_name(base_name)
logger.warning(' SKIPPING PIN-INSERTION ACTIONS')
continue
# yield (start_index, end_index, 'attach', part_name)
def parse_assembly_actions(actions, kinem_vocab):
def gen_segments(actions):
prev_start = actions['start'][0]
prev_end = actions['end'][0]
prev_start_index = 0
for row in actions.itertuples(index=True):
i = row.Index
# label = row.label
i_start = row.start
i_end = row.end
# arg1 = row.arg1
# arg2 = row.arg2
if i_start != prev_start or i_end != prev_end:
yield prev_start_index, i - 1
prev_start = i_start
prev_end = i_end
prev_start_index = i
else:
yield prev_start_index, i
def gen_kinem_labels(actions):
state = lib_asm.Assembly()
action_segs = tuple(gen_segments(actions))
for start, end in action_segs:
segment = actions.loc[start:end]
for row in segment.itertuples(index=False):
# label = row.label
# i_start = row.start
# i_end = row.end
arg1 = row.arg1
arg2 = row.arg2
parent = lib_asm.Link(arg1)
child = lib_asm.Link(arg2)
joint = lib_asm.Joint((arg1, arg2), 'rigid', arg1, arg2)
state = state.add_joint(
joint, parent, child,
# directed=False,
in_place=False
)
joint = lib_asm.Joint((arg2, arg1), 'rigid', arg2, arg1)
state = state.add_joint(
joint, child, parent,
# directed=False,
in_place=False
)
start_idx = actions.loc[start]['start']
end_idx = actions.loc[end]['end']
state_idx = utils.getIndex(state, kinem_vocab)
yield start_idx, end_idx, state_idx
kinem_labels = tuple(gen_kinem_labels(actions))
return pd.DataFrame(kinem_labels, columns=['start', 'end', 'state'])
def make_goal_state(furn_name):
if furn_name == 'Kallax_Shelf_Drawer':
connections = (
('side panel 1', 'front panel 1'),
('side panel 2', 'front panel 1'),
('bottom panel 1', 'front panel 1'),
('bottom panel 1', 'side panel 1'),
('bottom panel 1', 'side panel 2'),
('back panel 1', 'side panel 1'),
('back panel 1', 'side panel 2'),
('back panel 1', 'bottom panel 1'),
)
elif furn_name == 'Lack_Coffee_Table':
connections = (
('leg 1', 'table top 1'),
('leg 2', 'table top 1'),
('leg 3', 'table top 1'),
('leg 4', 'table top 1'),
('shelf 1', 'leg 1'),
('shelf 1', 'leg 2'),
('shelf 1', 'leg 3'),
('shelf 1', 'leg 4')
)
elif furn_name == 'Lack_TV_Bench':
connections = (
('leg 1', 'table top 1'),
('leg 2', 'table top 1'),
('leg 3', 'table top 1'),
('leg 4', 'table top 1'),
('shelf 1', 'leg 1'),
('shelf 1', 'leg 2'),
('shelf 1', 'leg 3'),
('shelf 1', 'leg 4')
)
elif furn_name == 'Lack_Side_Table':
connections = (
('leg 1', 'table top 1'),
('leg 2', 'table top 1'),
('leg 3', 'table top 1'),
('leg 4', 'table top 1'),
)
else:
err_str = f"Unrecognized furniture name: {furn_name}"
raise ValueError(err_str)
goal_state = lib_asm.Assembly()
for arg1, arg2 in connections:
link1 = lib_asm.Link(arg1)
link2 = lib_asm.Link(arg2)
joint_12 = lib_asm.Joint((arg1, arg2), 'rigid', arg1, arg2)
joint_21 = lib_asm.Joint((arg2, arg1), 'rigid', arg2, arg1)
goal_state = goal_state.add_joint(joint_12, link1, link2, in_place=False)
goal_state = goal_state.add_joint(joint_21, link2, link1, in_place=False)
return goal_state
def _convert_labels(labels):
def ignore(label):
ignore_prefixes = (
'push', 'align', 'tighten', 'rotate', 'flip', 'position',
'pick up', 'lay down'
)
for prefix in ignore_prefixes:
if label.startswith(prefix):
return True
return False
filtered_labels = tuple(label for label in labels if not ignore(label))
label_pairs = []
for cur_label, next_label in zip(filtered_labels[:-1], filtered_labels[1:]):
if cur_label.startswith('pick up') and next_label.startswith('lay down'):
pick_name = cur_label.split('pick up')[1]
place_name = next_label.split('lay down')[1]
if pick_name == place_name:
continue
else:
logger.info(f"{pick_name} != {place_name}")
label_pairs.append((cur_label, next_label))
if not any(label_pairs):
return []
new_labels = (label_pairs[0][0],) + tuple(second for first, second in label_pairs)
return new_labels
def main(out_dir=None, data_dir=None, annotation_dir=None):
out_dir = os.path.expanduser(out_dir)
data_dir = os.path.expanduser(data_dir)
annotation_dir = os.path.expanduser(annotation_dir)
annotation_dir = os.path.join(annotation_dir, 'action_annotations')
vocab_fn = os.path.join(
data_dir, 'ANU_ikea_dataset', 'indexing_files', 'atomic_action_list.txt'
)
with open(vocab_fn, 'rt') as file_:
action_vocab = file_.read().split('\n')
part_names = (
label.split('pick up ')[1] for label in action_vocab
if label.startswith('pick up')
)
new_action_vocab = tuple(f"{part}" for part in part_names)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_labels_dir = os.path.join(out_dir, 'labels')
if not os.path.exists(out_labels_dir):
os.makedirs(out_labels_dir)
# gt_action = np.load(os.path.join(annotation_dir, 'gt_action.npy'), allow_pickle=True)
with open(os.path.join(annotation_dir, 'gt_segments.json'), 'r') as _file:
gt_segments = json.load(_file)
ann_seqs = {
seq_name: [ann for ann in ann_seq['annotation']]
for seq_name, ann_seq in gt_segments['database'].items()
}
kinem_vocab = [lib_asm.Assembly()]
all_label_index_seqs = collections.defaultdict(list)
for seq_name, ann_seq in ann_seqs.items():
logger.info(f"Processing sequence {seq_name}...")
furn_name, other_name = seq_name.split('/')
goal_state = make_goal_state(furn_name)
label_seq = tuple(ann['label'] for ann in ann_seq)
segment_seq = tuple(ann['segment'] for ann in ann_seq)
start_seq, end_seq = tuple(zip(*segment_seq))
df = | pd.DataFrame({'start': start_seq, 'end': end_seq, 'label': label_seq}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import library.areamanager as areamanager
import pandas as pd
import json
import time
import collections
import numpy as np
import pickle
import library.cat_utils as cat_utils
import library.geo_utils as geo_utils
from library.parallel_util import run_parallel
from library.constants import geocat_constants,experiment_constants
from tqdm import tqdm
import math
import sklearn
import sklearn.neighbors
SPLIT_YEAR=2017
earth_radius = 6371000/1000 # km in earth
cities=['lasvegas','phoenix','charlotte','madison']
# cities=experiment_constants.CITIES
#cities=['madison']
dict_alias_title,category_tree,dict_alias_depth=cat_utils.cat_structs("../../data/categories.json")
undirected_category_tree=category_tree.to_undirected()
def category_filter(categories):
tmp_cat_list=list()
if categories != None:
for category in categories:
try:
if dict_alias_depth[dict_alias_title[category]] <= 2:
tmp_cat_list.append(dict_alias_title[category])
except:
pass
tmp_cat_list=cat_utils.get_most_detailed_categories(tmp_cat_list,dict_alias_title,dict_alias_depth)
return tmp_cat_list
def category_normalization(categories):
if categories != None:
return categories
else:
return []
TRAIN_SIZE=experiment_constants.TRAIN_SIZE
TEST_SIZE=1-TRAIN_SIZE
# In[2]:
fbusiness=open("../../data/business.json")
poi_data = dict()
start_time=time.time()
for i, line in enumerate(fbusiness):
# json to dict
obj_json = json.loads(line)
# add to the data collection
if obj_json['categories'] != None:
poi_data[obj_json['business_id']]={'latitude':obj_json['latitude'],
'longitude':obj_json['longitude'],
'categories':obj_json['categories'].split(', ')}
else:
poi_data[obj_json['business_id']]={'latitude':obj_json['latitude'],
'longitude':obj_json['longitude'],
'categories':obj_json['categories']}
print(time.time()-start_time)
# In[3]:
areas=dict()
for city in cities:
areas[city]=areamanager.delimiter_area(city)
# In[4]:
cities_pid_in_area=dict()
start_time=time.time()
for city in cities:
area=areas[city]
pid_in_area=collections.defaultdict(bool)
for poi_id in poi_data:
if areamanager.poi_in_area(area,poi_data[poi_id]):
pid_in_area[poi_id]=True
cities_pid_in_area[city]=pid_in_area
print(time.time()-start_time)
# In[5]:
fuser=open("../../data/user.json")
user_friend = dict()
user_data = dict()
start_time=time.time()
for i, line in enumerate(fuser):
# json to dict
obj_json = json.loads(line)
# add to the data collection
user_friend[obj_json['user_id']]=obj_json['friends'].split(', ')
custom_obj = dict()
for key, value in obj_json.items():
if key not in ['friends','elite','name','user_id']:
custom_obj[key] = value
user_data[obj_json['user_id']] = custom_obj
print(time.time()-start_time)
# In[6]:
freview=open("../../data/review.json")
cities_checkin_data=dict()
for city in cities:
cities_checkin_data[city]=list()
start_time=time.time()
for i, line in enumerate(freview):
# json to dict
obj_json = json.loads(line)
for city in cities:
if cities_pid_in_area[city][obj_json['business_id']]:
# add to the data collection
cities_checkin_data[city].append({'user_id':obj_json['user_id'],
'poi_id':obj_json['business_id'],
'date':obj_json['date']})
break
if i % 500000 ==0:
print(i)
print(time.time()-start_time)
ftip=open("../../data/tip.json")
start_time=time.time()
for i, line in enumerate(ftip):
# json to dict
obj_json = json.loads(line)
for city in cities:
if cities_pid_in_area[city][obj_json['business_id']]:
# add to the data collection
cities_checkin_data[city].append({'user_id':obj_json['user_id'],
'poi_id':obj_json['business_id'],
'date':obj_json['date']})
break
if i % 500000 ==0:
print(i)
print(time.time()-start_time)
# In[ ]:
# df_checkin=pd.read_csv("../../data/checkin.csv")
# df_checkin=df_checkin.set_index("user_id")
# In[ ]:
# city_area=areamanager.delimiter_area('madison')
# df_checkin_city=areamanager.pois_in_area(city_area,df_checkin.reset_index())
# In[ ]:
# i=0
# for idx,checkin in df_checkin.iterrows():
# # print(checkin.business_id)
# if cities_pid_in_area['madison'][checkin.business_id]:
# i+=1
# i
# In[ ]:
# print(len(df_checkin_city['business_id'].drop_duplicates()))
# print(len(df_checkin_city['user_id'].drop_duplicates()))
# print(len(df_checkin_city))
# In[7]:
genoptions=['poi','neighbor','user','checkin','test','train'
,'user_data']
genoptions=['checkin',
'poi','neighbor',
'user','user_data'
]
# In[ ]:
for city in cities:
print("CITY: %s" % (city))
# Pega os checkins da cidade
checkin_data=cities_checkin_data[city]
print("checkin_data size: %d"%(len(checkin_data)))
# transforma em dataframe
df_checkin=pd.DataFrame.from_dict(checkin_data)
df_checkin.head(1)
# Começa a parte de filtragrem
df_diff_users_visited=df_checkin[['user_id','poi_id']].drop_duplicates().reset_index(drop=True).groupby('poi_id').count().reset_index().rename(columns={"user_id":"diffusersvisited"})
df_diff_users_visited=df_diff_users_visited[df_diff_users_visited['diffusersvisited']>=5]
del df_diff_users_visited['diffusersvisited']
df_checkin= | pd.merge(df_checkin,df_diff_users_visited,on='poi_id',how='inner') | pandas.merge |
#!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: collect_results.py
# --- Creation Date: 08-09-2020
# --- Last Modified: Tue 13 Oct 2020 23:36:17 AEDT
# --- Author: <NAME>
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""
Collect results.
"""
import os
import json
import numpy as np
import argparse
import pandas as pd
from collections import OrderedDict
METRICS_TEMPLATE = {
'beta_vae_sklearn': {
"train_accuracy": None,
"eval_accuracy": None
},
'dci': {
"informativeness_train": None,
"informativeness_test": None,
"disentanglement": None,
"completeness": None
},
'downstream_task_boosted_trees': {},
'factor_vae_metric': {
"train_accuracy": None,
"eval_accuracy": None,
# "num_active_dims": None # disentanglement_lib wrong implementation.
},
'mig': {
"discrete_mig": None
},
'modularity_explicitness': {
"modularity_score": None,
"explicitness_score_train": None,
"explicitness_score_test": None
},
'sap_score': {
"SAP_score": None
},
'unsupervised': {
"gaussian_total_correlation": None,
"gaussian_wasserstein_correlation": None,
"gaussian_wasserstein_correlation_norm": None,
"mutual_info_score": None
},
'tpl': {
"avg_tpl": None,
"n_active_dims": None
}
}
def get_mean_std_for_config(v_ls, target):
'''
v_ls: [{'eval':0.8, ..}, {'eval': 0.7, ...}, ...]
target: 'eval'
'''
pure_ls = []
for item in v_ls:
if item is not None:
pure_ls.append(item[target])
return (None, None) if len(pure_ls) == 0 else (np.mean(pure_ls),
np.std(pure_ls))
def count_samples(x):
x = list(filter(None, x))
return len(x)
def get_moments(res_dict, template):
'''
Args: result dict for each config and seed:
{'0_0_0_0': [{'eval':0.8}, {'eval': 0.7}, ...]}
template of collected results:
{'eval': None, ...}
Return: mean and std of each config:
{'0_0_0_0': {'eval.mean': 0.75, 'eval.std': 0.05}, ...}
'''
res_dict_moments = {}
for k, v in res_dict.items():
res_dict_moments[k] = {}
for res_k in template.keys():
res_dict_moments[k][res_k+'.mean'], \
res_dict_moments[k][res_k+'.std'] \
= get_mean_std_for_config(v, res_k)
res_dict_moments[k]['n_samples'] = count_samples(v)
return res_dict_moments
def get_json(result_json):
if os.path.exists(result_json):
with open(result_json, 'r') as f:
data = json.load(f)
return data
else:
return None
def get_metric_result(subdir, metric, representation):
result_json = os.path.join(subdir, 'metrics', representation, metric,
'results/json/evaluation_results.json')
return get_json(result_json)
def get_tpl_result(subdir):
result_json = os.path.join(
subdir, 'metrics/tpl/results/json/evaluation_results.json')
return get_json(result_json)
def main():
parser = argparse.ArgumentParser(description='Project description.')
parser.add_argument('--results_dir',
help='Results directory.',
type=str,
default='/mnt/hdd/repo_results/Ramiel/sweep')
parser.add_argument('--metric',
help='Name of the collect metric.',
type=str,
default='factor_vae_metric',
choices=[
'beta_vae_sklearn', 'dci',
'downstream_task_boosted_trees',
'factor_vae_metric', 'mig',
'modularity_explicitness', 'sap_score',
'unsupervised', 'tpl'
])
parser.add_argument('--representation',
help='Representation used.',
type=str,
default='mean',
choices=['mean', 'sampled'])
# parser.add_argument('--overwrite',
# help='Whether to overwrite output directory.',
# type=_str_to_bool,
# default=False)
args = parser.parse_args()
subdirs = os.listdir(args.results_dir)
res_dict = {}
key_template = METRICS_TEMPLATE[args.metric]
for subdir in subdirs:
sub_path = os.path.join(args.results_dir, subdir)
if not os.path.isdir(sub_path):
continue
parse_subdir = subdir.split('-')
hyps = '-'.join(parse_subdir)
seed = '0'
if hyps not in res_dict:
res_dict[hyps] = [None] * 1
# get result for this seed, a dictionary.
if args.metric == 'tpl':
res_dict[hyps][int(seed)] = get_tpl_result(sub_path)
else:
res_dict[hyps][int(seed)] = get_metric_result(
sub_path, args.metric, args.representation)
# {'0_0_0_0': {'eval.mean': 0.75, 'eval.std': 0.05, 'n_samples': 2}, ...}
res_dict = get_moments(res_dict, key_template)
col_heads = ['_config'] + list(res_dict[list(res_dict.keys())[0]].keys())
col_dicts = {k: [] for k in col_heads}
for k, v in res_dict.items():
col_dicts['_config'].append(k)
for j in col_dicts.keys():
if j != '_config':
col_dicts[j].append(v[j])
new_results = OrderedDict(sorted(col_dicts.items()))
results_df = | pd.DataFrame(new_results) | pandas.DataFrame |
"""
Summarize and run basic analysis on MTurk returns
"""
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from factor_analyzer import FactorAnalyzer, ModelSpecificationParser, ConfirmatoryFactorAnalyzer
from joblib import load
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.impute import IterativeImputer
from models.plotting import make_fa_plots, make_density
from processing.loading import process_turk_files
from processing.mappings import short_question_names, question_names, factor_names
factor_structure = {"competent":
["competent", "efficient", "focused", "intelligent", "reliable", "responsible"],
"broken": ["broken", "clumsy", "confused", "lost"],
"curious": ["curious", "investigative"]
}
condition_ratings = None
demos = None
other_data = None
demo_trajs = []
demo_exps = []
demo_prompts = []
demo_wids = []
for base in ["pilot1", "active1", "active2", "mdn_active1", "mdn_active2"]:
cr, d, o, comparison = process_turk_files(base + ".csv", traj_file=base + "_trajs.json")
q_names = [q_name for q_name in question_names if q_name in cr.columns]
# Fill in missing values
cr[cr[q_names] == 6] = np.nan
imp = IterativeImputer(missing_values=np.nan, max_iter=200, random_state=0, min_value=1, max_value=5)
to_impute = cr[q_names].to_numpy()
cr[q_names] = np.rint(imp.fit_transform(to_impute)).astype(int)
assert not cr[cr[q_names] == np.nan].any().any()
assert not cr[cr[q_names] == 6].any().any()
if condition_ratings is not None:
condition_ratings = pd.concat([condition_ratings, cr], ignore_index=True)
demos = | pd.concat([demos, d]) | pandas.concat |
"""
training of LR_clim_clim_conv baseline
"""
from tensorflow.keras.layers import Input, Dense
from cbrain.layers import *
from tensorflow.keras.models import Model
from tensorflow.keras.losses import mse, binary_crossentropy
from tensorflow.keras.utils import plot_model
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import LearningRateScheduler,Callback
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os
import tensorflow as tf
from cbrain.imports import *
from cbrain.utils import *
import pandas as ps
# constuction of LR_clim_clim_conv closely mirroring AE_clim_clim_conv with linear activations
original_dim_input=64 # CAM variables node size
original_dim_output=int(65+64) # SP + CAM variables node size
# network parameters
input_shape = (original_dim_input,)
out_shape=(original_dim_output,)
intermediate_dim = 463 # number of first hidden layers of linear Encoder or last hidden layers of linear Decoder
batch_size = 714
latent_dim = 5 # latent space width
epochs = 40
## Linear Encoder
inputs =Input(shape=input_shape, name='encoder_input')
x_0 =Dense(intermediate_dim, activation='linear')(inputs)
x_1 =Dense(intermediate_dim, activation='linear')(x_0)
x_2 =Dense(int(np.round(intermediate_dim/2)), activation='linear')(x_1)
x_3 =Dense(int(np.round(intermediate_dim/4)), activation='linear')(x_2)
x_4 =Dense(int(np.round(intermediate_dim/8)), activation='linear')(x_3)
x_5 =Dense(int(np.round(intermediate_dim/16)), activation='linear')(x_4)
z_lin = Dense(latent_dim, activation='linear', name='z_lin')(x_5)
# instantiate encoder model
encoder_lin = Model(inputs, [z_lin], name='encoder_lin')
## linear Decoder
decoder_inputs =Input(shape=(latent_dim,), name='decoder_input')
x_1 =Dense(int(np.round(intermediate_dim/16)), activation='linear')(decoder_inputs)
x_2 =Dense(int(np.round(intermediate_dim/8)), activation='linear')(x_1)
x_3 =Dense(int(np.round(intermediate_dim/4)), activation='linear')(x_2)
x_4 =Dense(int(np.round(intermediate_dim/2)), activation='linear')(x_3)
x_5 =Dense(intermediate_dim, activation='linear')(x_4)
x_6 =Dense(intermediate_dim, activation='linear')(x_5)
outputs = Dense(original_dim_output, activation='linear')(x_6)
decoder_lin = Model(decoder_inputs, outputs, name='decoder')
emul_outputs=decoder_lin(encoder_lin(inputs))
LR_clim_clim_conv=Model(inputs,emul_outputs)
#loading scaling dictionary of SP variables
scale_array=ps.read_csv('nn_config/scale_dicts/Scaling_cond_VAE.csv')
PHQ_std_surf=scale_array.PHQ_std.values[-1]
TPHYSTND_std_23=scale_array.TPHYSTND_std.values[-1]
PRECT_std=scale_array.PRECT_std.values
FSNS_std=scale_array.FSNS_std.values
FSNT_std=scale_array.FSNT_std.values
FLNS_std=scale_array.FLNS_std.values
FLNT_std=scale_array.FLNT_std.values
# loading scaling dictionaries of CAM variables
scale_array_2D=ps.read_csv('nn_config/scale_dicts/Scaling_enc_II_range_profiles.csv')
scale_array_1D= | ps.read_csv('nn_config/scale_dicts/Scaling_enc_II_range.csv') | pandas.read_csv |
# pylint: disable=W0102
import nose
import numpy as np
from pandas import Index, MultiIndex, DataFrame, Series
from pandas.compat import OrderedDict, lrange
from pandas.sparse.array import SparseArray
from pandas.core.internals import *
import pandas.core.internals as internals
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, randn)
from pandas.compat import zip, u
def assert_block_equal(left, right):
assert_almost_equal(left.values, right.values)
assert(left.dtype == right.dtype)
assert_almost_equal(left.mgr_locs, right.mgr_locs)
def get_numeric_mat(shape):
arr = np.arange(shape[0])
return np.lib.stride_tricks.as_strided(
x=arr, shape=shape,
strides=(arr.itemsize,) + (0,) * (len(shape) - 1)).copy()
N = 10
def create_block(typestr, placement, item_shape=None, num_offset=0):
"""
Supported typestr:
* float, f8, f4, f2
* int, i8, i4, i2, i1
* uint, u8, u4, u2, u1
* complex, c16, c8
* bool
* object, string, O
* datetime, dt
* sparse (SparseArray with fill_value=0.0)
* sparse_na (SparseArray with fill_value=np.nan)
"""
placement = BlockPlacement(placement)
num_items = len(placement)
if item_shape is None:
item_shape = (N,)
shape = (num_items,) + item_shape
mat = get_numeric_mat(shape)
if typestr in ('float', 'f8', 'f4', 'f2',
'int', 'i8', 'i4', 'i2', 'i1',
'uint', 'u8', 'u4', 'u2', 'u1'):
values = mat.astype(typestr) + num_offset
elif typestr in ('complex', 'c16', 'c8'):
values = 1.j * (mat.astype(typestr) + num_offset)
elif typestr in ('object', 'string', 'O'):
values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],
shape)
elif typestr in ('bool'):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ('datetime', 'dt'):
values = (mat * 1e9).astype('M8[ns]')
elif typestr in ('sparse', 'sparse_na'):
# FIXME: doesn't support num_rows != 10
assert shape[-1] == 10
assert all(s == 1 for s in shape[:-1])
if typestr.endswith('_na'):
fill_value = np.nan
else:
fill_value = 0.0
values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,
4, 5, fill_value, 6], fill_value=fill_value)
arr = values.sp_values.view()
arr += (num_offset - 1)
else:
raise ValueError('Unsupported typestr: "%s"' % typestr)
return make_block(values, placement=placement, ndim=len(shape))
def create_single_mgr(typestr, num_rows=None):
if num_rows is None:
num_rows = N
return SingleBlockManager(
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
np.arange(num_rows))
def create_mgr(descr, item_shape=None):
"""
Construct BlockManager from string description.
String description syntax looks similar to np.matrix initializer. It looks
like this::
a,b,c: f8; d,e,f: i8
Rules are rather simple:
* see list of supported datatypes in `create_block` method
* components are semicolon-separated
* each component is `NAME,NAME,NAME: DTYPE_ID`
* whitespace around colons & semicolons are removed
* components with same DTYPE_ID are combined into single block
* to force multiple blocks with same dtype, use '-SUFFIX'::
'a:f8-1; b:f8-2; c:f8-foobar'
"""
if item_shape is None:
item_shape = (N,)
offset = 0
mgr_items = []
block_placements = OrderedDict()
for d in descr.split(';'):
d = d.strip()
names, blockstr = d.partition(':')[::2]
blockstr = blockstr.strip()
names = names.strip().split(',')
mgr_items.extend(names)
placement = list(np.arange(len(names)) + offset)
try:
block_placements[blockstr].extend(placement)
except KeyError:
block_placements[blockstr] = placement
offset += len(names)
mgr_items = Index(mgr_items)
blocks = []
num_offset = 0
for blockstr, placement in block_placements.items():
typestr = blockstr.split('-')[0]
blocks.append(create_block(typestr, placement, item_shape=item_shape,
num_offset=num_offset,))
num_offset += len(placement)
return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]),
[mgr_items] + [np.arange(n) for n in item_shape])
class TestBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# self.fblock = get_float_ex() # a,c,e
# self.cblock = get_complex_ex() #
# self.oblock = get_obj_ex()
# self.bool_block = get_bool_ex()
# self.int_block = get_int_ex()
self.fblock = create_block('float', [0, 2, 4])
self.cblock = create_block('complex', [7])
self.oblock = create_block('object', [1, 3])
self.bool_block = create_block('bool', [5])
self.int_block = create_block('int', [6])
def test_constructor(self):
int32block = create_block('i4', [0])
self.assertEqual(int32block.dtype, np.int32)
def test_pickle(self):
import pickle
def _check(blk):
pickled = pickle.dumps(blk)
unpickled = pickle.loads(pickled)
assert_block_equal(blk, unpickled)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_mgr_locs(self):
assert_almost_equal(self.fblock.mgr_locs, [0, 2, 4])
def test_attrs(self):
self.assertEqual(self.fblock.shape, self.fblock.values.shape)
self.assertEqual(self.fblock.dtype, self.fblock.values.dtype)
self.assertEqual(len(self.fblock), len(self.fblock.values))
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = Index(['e', 'a', 'b', 'd', 'f'])
ablock = make_block(avals,
ref_cols.get_indexer(['e', 'b']))
bblock = make_block(bvals,
ref_cols.get_indexer(['a', 'd']))
merged = ablock.merge(bblock)
assert_almost_equal(merged.mgr_locs, [0, 1, 2, 3])
assert_almost_equal(merged.values[[0, 2]], avals)
assert_almost_equal(merged.values[[1, 3]], bvals)
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
self.assertIsNot(cop, self.fblock)
assert_block_equal(self.fblock, cop)
def test_reindex_index(self):
pass
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.copy()
newb.delete(0)
assert_almost_equal(newb.mgr_locs, [2, 4])
self.assertTrue((newb.values[0] == 1).all())
newb = self.fblock.copy()
newb.delete(1)
assert_almost_equal(newb.mgr_locs, [0, 4])
self.assertTrue((newb.values[1] == 2).all())
newb = self.fblock.copy()
newb.delete(2)
assert_almost_equal(newb.mgr_locs, [0, 2])
self.assertTrue((newb.values[1] == 1).all())
newb = self.fblock.copy()
self.assertRaises(Exception, newb.delete, 3)
def test_split_block_at(self):
# with dup column support this method was taken out
# GH3679
raise nose.SkipTest("skipping for now")
bs = list(self.fblock.split_block_at('a'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['c', 'e']))
bs = list(self.fblock.split_block_at('c'))
self.assertEqual(len(bs), 2)
self.assertTrue(np.array_equal(bs[0].items, ['a']))
self.assertTrue(np.array_equal(bs[1].items, ['e']))
bs = list(self.fblock.split_block_at('e'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['a', 'c']))
bblock = get_bool_ex(['f'])
bs = list(bblock.split_block_at('f'))
self.assertEqual(len(bs), 0)
def test_get(self):
pass
def test_set(self):
pass
def test_fillna(self):
pass
def test_repr(self):
pass
class TestBlockManager(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.mgr = create_mgr('a: f8; b: object; c: f8; d: object; e: f8;'
'f: bool; g: i8; h: complex')
def test_constructor_corner(self):
pass
def test_attrs(self):
mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2')
self.assertEqual(mgr.nblocks, 2)
self.assertEqual(len(mgr), 6)
def test_is_mixed_dtype(self):
self.assertFalse(create_mgr('a,b:f8').is_mixed_type)
self.assertFalse(create_mgr('a:f8-1; b:f8-2').is_mixed_type)
self.assertTrue(create_mgr('a,b:f8; c,d: f4').is_mixed_type)
self.assertTrue(create_mgr('a,b:f8; c,d: object').is_mixed_type)
def test_is_indexed_like(self):
mgr1 = create_mgr('a,b: f8')
mgr2 = create_mgr('a:i8; b:bool')
mgr3 = create_mgr('a,b,c: f8')
self.assertTrue(mgr1._is_indexed_like(mgr1))
self.assertTrue(mgr1._is_indexed_like(mgr2))
self.assertTrue(mgr1._is_indexed_like(mgr3))
self.assertFalse(mgr1._is_indexed_like(
mgr1.get_slice(slice(-1), axis=1)))
def test_duplicate_ref_loc_failure(self):
tmp_mgr = create_mgr('a:bool; a: f8')
axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([0])
# test trying to create block manager with overlapping ref locs
self.assertRaises(AssertionError, BlockManager, blocks, axes)
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([1])
mgr = BlockManager(blocks, axes)
mgr.iget(1)
def test_contains(self):
self.assertIn('a', self.mgr)
self.assertNotIn('baz', self.mgr)
def test_pickle(self):
import pickle
pickled = pickle.dumps(self.mgr)
mgr2 = pickle.loads(pickled)
# same result
assert_frame_equal(DataFrame(self.mgr), DataFrame(mgr2))
# share ref_items
# self.assertIs(mgr2.blocks[0].ref_items, mgr2.blocks[1].ref_items)
# GH2431
self.assertTrue(hasattr(mgr2, "_is_consolidated"))
self.assertTrue(hasattr(mgr2, "_known_consolidated"))
# reset to False on load
self.assertFalse(mgr2._is_consolidated)
self.assertFalse(mgr2._known_consolidated)
def test_non_unique_pickle(self):
import pickle
mgr = create_mgr('a,a,a:f8')
mgr2 = pickle.loads(pickle.dumps(mgr))
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
mgr = create_mgr('a: f8; a: i8')
mgr2 = pickle.loads(pickle.dumps(mgr))
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
def test_get_scalar(self):
for item in self.mgr.items:
for i, index in enumerate(self.mgr.axes[1]):
res = self.mgr.get_scalar((item, index))
exp = self.mgr.get(item, fastpath=False)[i]
assert_almost_equal(res, exp)
exp = self.mgr.get(item).values[i]
assert_almost_equal(res, exp)
def test_get(self):
cols = Index(list('abc'))
values = np.random.rand(3, 3)
block = make_block(values=values.copy(),
placement=np.arange(3))
mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)])
assert_almost_equal(mgr.get('a', fastpath=False), values[0])
assert_almost_equal(mgr.get('b', fastpath=False), values[1])
assert_almost_equal(mgr.get('c', fastpath=False), values[2])
assert_almost_equal(mgr.get('a').values, values[0])
assert_almost_equal(mgr.get('b').values, values[1])
assert_almost_equal(mgr.get('c').values, values[2])
def test_set(self):
mgr = create_mgr('a,b,c: int', item_shape=(3,))
mgr.set('d', np.array(['foo'] * 3))
mgr.set('b', np.array(['bar'] * 3))
assert_almost_equal(mgr.get('a').values, [0] * 3)
assert_almost_equal(mgr.get('b').values, ['bar'] * 3)
assert_almost_equal(mgr.get('c').values, [2] * 3)
assert_almost_equal(mgr.get('d').values, ['foo'] * 3)
def test_insert(self):
self.mgr.insert(0, 'inserted', np.arange(N))
self.assertEqual(self.mgr.items[0], 'inserted')
assert_almost_equal(self.mgr.get('inserted'), np.arange(N))
for blk in self.mgr.blocks:
yield self.assertIs, self.mgr.items, blk.ref_items
def test_set_change_dtype(self):
self.mgr.set('baz', np.zeros(N, dtype=bool))
self.mgr.set('baz', np.repeat('foo', N))
self.assertEqual(self.mgr.get('baz').dtype, np.object_)
mgr2 = self.mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
self.assertEqual(mgr2.get('baz').dtype, np.object_)
mgr2.set('quux', randn(N).astype(int))
self.assertEqual(mgr2.get('quux').dtype, np.int_)
mgr2.set('quux', randn(N))
self.assertEqual(mgr2.get('quux').dtype, np.float_)
def test_copy(self):
shallow = self.mgr.copy(deep=False)
# we don't guaranteee block ordering
for blk in self.mgr.blocks:
found = False
for cp_blk in shallow.blocks:
if cp_blk.values is blk.values:
found = True
break
self.assertTrue(found)
def test_sparse(self):
mgr = create_mgr('a: sparse-1; b: sparse-2')
# what to test here?
self.assertEqual(mgr.as_matrix().dtype, np.float64)
def test_sparse_mixed(self):
mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8')
self.assertEqual(len(mgr.blocks), 3)
self.assertIsInstance(mgr, BlockManager)
# what to test here?
def test_as_matrix_float(self):
mgr = create_mgr('c: f4; d: f2; e: f8')
self.assertEqual(mgr.as_matrix().dtype, np.float64)
mgr = create_mgr('c: f4; d: f2')
self.assertEqual(mgr.as_matrix().dtype, np.float32)
def test_as_matrix_int_bool(self):
mgr = create_mgr('a: bool-1; b: bool-2')
self.assertEqual(mgr.as_matrix().dtype, np.bool_)
mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1')
self.assertEqual(mgr.as_matrix().dtype, np.int64)
mgr = create_mgr('c: i4; d: i2; e: u1')
self.assertEqual(mgr.as_matrix().dtype, np.int32)
def test_as_matrix_datetime(self):
mgr = create_mgr('h: datetime-1; g: datetime-2')
self.assertEqual(mgr.as_matrix().dtype, 'M8[ns]')
def test_astype(self):
# coerce all
mgr = create_mgr('c: f4; d: f2; e: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t)
self.assertEqual(tmgr.get('c').dtype.type, t)
self.assertEqual(tmgr.get('d').dtype.type, t)
self.assertEqual(tmgr.get('e').dtype.type, t)
# mixed
mgr = create_mgr('a,b: object; c: bool; d: datetime;'
'e: f4; f: f2; g: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t, raise_on_error=False)
self.assertEqual(tmgr.get('c').dtype.type, t)
self.assertEqual(tmgr.get('e').dtype.type, t)
self.assertEqual(tmgr.get('f').dtype.type, t)
self.assertEqual(tmgr.get('g').dtype.type, t)
self.assertEqual(tmgr.get('a').dtype.type, np.object_)
self.assertEqual(tmgr.get('b').dtype.type, np.object_)
if t != np.int64:
self.assertEqual(tmgr.get('d').dtype.type, np.datetime64)
else:
self.assertEqual(tmgr.get('d').dtype.type, t)
def test_convert(self):
def _compare(old_mgr, new_mgr):
""" compare the blocks, numeric compare ==, object don't """
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
self.assertEqual(len(old_blocks), len(new_blocks))
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
self.assertTrue(found)
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
self.assertTrue(found)
# noops
mgr = create_mgr('f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr,new_mgr)
mgr = create_mgr('a, b: object; f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr,new_mgr)
# convert
mgr = create_mgr('a,b,foo: object; f: i8; g: f8')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(convert_numeric=True)
self.assertEqual(new_mgr.get('a').dtype, np.int64)
self.assertEqual(new_mgr.get('b').dtype, np.float64)
self.assertEqual(new_mgr.get('foo').dtype, np.object_)
self.assertEqual(new_mgr.get('f').dtype, np.int64)
self.assertEqual(new_mgr.get('g').dtype, np.float64)
mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;'
'i: i8; g: f8; h: f2')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(convert_numeric=True)
self.assertEqual(new_mgr.get('a').dtype, np.int64)
self.assertEqual(new_mgr.get('b').dtype, np.float64)
self.assertEqual(new_mgr.get('foo').dtype, np.object_)
self.assertEqual(new_mgr.get('f').dtype, np.int32)
self.assertEqual(new_mgr.get('bool').dtype, np.bool_)
self.assertEqual(new_mgr.get('dt').dtype.type, np.datetime64)
self.assertEqual(new_mgr.get('i').dtype, np.int64)
self.assertEqual(new_mgr.get('g').dtype, np.float64)
self.assertEqual(new_mgr.get('h').dtype, np.float16)
def test_interleave(self):
pass
def test_interleave_non_unique_cols(self):
df = DataFrame([
[Timestamp('20130101'), 3.5],
[Timestamp('20130102'), 4.5]],
columns=['x', 'x'],
index=[1, 2])
df_unique = df.copy()
df_unique.columns = ['x', 'y']
np.testing.assert_array_equal(df_unique.values, df.values)
def test_consolidate(self):
pass
def test_consolidate_ordering_issues(self):
self.mgr.set('f', randn(N))
self.mgr.set('d', randn(N))
self.mgr.set('b', randn(N))
self.mgr.set('g', | randn(N) | pandas.util.testing.randn |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from catboost import CatBoostRegressor
from tqdm import tqdm
import gc
import datetime as dt
print('Loading Properties ...')
properties2016 = pd.read_csv('../input/properties_2016.csv', low_memory = False)
properties2017 = pd.read_csv('../input/properties_2017.csv', low_memory = False)
print('Loading Train ...')
train2016 = pd.read_csv('../input/train_2016_v2.csv', parse_dates=['transactiondate'], low_memory=False)
train2017 = pd.read_csv('../input/train_2017.csv', parse_dates=['transactiondate'], low_memory=False)
def add_date_features(df):
df["transaction_year"] = df["transactiondate"].dt.year
df["transaction_month"] = (df["transactiondate"].dt.year - 2016)*12 + df["transactiondate"].dt.month
df["transaction_day"] = df["transactiondate"].dt.day
df["transaction_quarter"] = (df["transactiondate"].dt.year - 2016)*4 +df["transactiondate"].dt.quarter
df.drop(["transactiondate"], inplace=True, axis=1)
return df
train2016 = add_date_features(train2016)
train2017 = add_date_features(train2017)
print('Loading Sample ...')
sample_submission = pd.read_csv('../input/sample_submission.csv', low_memory = False)
print('Merge Train with Properties ...')
train2016 = pd.merge(train2016, properties2016, how = 'left', on = 'parcelid')
train2017 = pd.merge(train2017, properties2017, how = 'left', on = 'parcelid')
print('Tax Features 2017 ...')
train2017.iloc[:, train2017.columns.str.startswith('tax')] = np.nan
print('Concat Train 2016 & 2017 ...')
train_df = pd.concat([train2016, train2017], axis = 0)
test_df = pd.merge(sample_submission[['ParcelId']], properties2016.rename(columns = {'parcelid': 'ParcelId'}), how = 'left', on = 'ParcelId')
del properties2016, properties2017, train2016, train2017
gc.collect();
print('Remove missing data fields ...')
missing_perc_thresh = 0.98
exclude_missing = []
num_rows = train_df.shape[0]
for c in train_df.columns:
num_missing = train_df[c].isnull().sum()
if num_missing == 0:
continue
missing_frac = num_missing / float(num_rows)
if missing_frac > missing_perc_thresh:
exclude_missing.append(c)
print("We exclude: %s" % len(exclude_missing))
del num_rows, missing_perc_thresh
gc.collect();
print ("Remove features with one unique value !!")
exclude_unique = []
for c in train_df.columns:
num_uniques = len(train_df[c].unique())
if train_df[c].isnull().sum() != 0:
num_uniques -= 1
if num_uniques == 1:
exclude_unique.append(c)
print("We exclude: %s" % len(exclude_unique))
print ("Define training features !!")
exclude_other = ['parcelid', 'logerror','propertyzoningdesc']
train_features = []
for c in train_df.columns:
if c not in exclude_missing \
and c not in exclude_other and c not in exclude_unique:
train_features.append(c)
print("We use these for training: %s" % len(train_features))
print ("Define categorial features !!")
cat_feature_inds = []
cat_unique_thresh = 1000
for i, c in enumerate(train_features):
num_uniques = len(train_df[c].unique())
if num_uniques < cat_unique_thresh \
and not 'sqft' in c \
and not 'cnt' in c \
and not 'nbr' in c \
and not 'number' in c:
cat_feature_inds.append(i)
print("Cat features are: %s" % [train_features[ind] for ind in cat_feature_inds])
print ("Replacing NaN values by -999 !!")
train_df.fillna(-999, inplace=True)
test_df.fillna(-999, inplace=True)
print ("Training time !!")
X_train = train_df[train_features]
y_train = train_df.logerror
print(X_train.shape, y_train.shape)
test_df['transactiondate'] = pd.Timestamp('2016-12-01')
test_df = add_date_features(test_df)
X_test = test_df[train_features]
print(X_test.shape)
num_ensembles = 5
y_pred = 0.0
for i in tqdm(range(num_ensembles)):
model = CatBoostRegressor(
iterations=630, learning_rate=0.03,
depth=6, l2_leaf_reg=3,
loss_function='MAE',
eval_metric='MAE',
random_seed=i)
model.fit(
X_train, y_train,
cat_features=cat_feature_inds)
y_pred += model.predict(X_test)
y_pred /= num_ensembles
submission = pd.DataFrame({
'ParcelId': test_df['ParcelId'],
})
test_dates = {
'201610': pd.Timestamp('2016-09-30'),
'201611': pd.Timestamp('2016-10-31'),
'201612': pd.Timestamp('2016-11-30'),
'201710': pd.Timestamp('2017-09-30'),
'201711': pd.Timestamp('2017-10-31'),
'201712': | pd.Timestamp('2017-11-30') | pandas.Timestamp |
# pylint: disable-msg=E1101,W0613,W0603
import os
import copy
from collections import defaultdict
import numpy as np
import pandas.json as _json
from pandas.tslib import iNaT
from pandas.compat import StringIO, long, u
from pandas import compat, isnull
from pandas import Series, DataFrame, to_datetime
from pandas.io.common import get_filepath_or_buffer, _get_handle
from pandas.core.common import AbstractMethodError
from pandas.formats.printing import pprint_thing
loads = _json.loads
dumps = _json.dumps
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False):
if lines and orient != 'records':
raise ValueError(
"'lines' keyword only valid when 'orient' is records")
if isinstance(obj, Series):
s = SeriesWriter(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
elif isinstance(obj, DataFrame):
s = FrameWriter(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
if lines:
s = _convert_to_line_delimits(s)
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, 'w') as fh:
fh.write(s)
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise | AbstractMethodError(self) | pandas.core.common.AbstractMethodError |
import re
import struct
import pandas as pd
import numpy as np
from argparse import ArgumentParser
from pathlib import Path
from tqdm import tqdm
from collections import namedtuple
from datetime import datetime, timedelta
from model_sx_log import ModelSxLog
from kaitaistruct import KaitaiStream, BytesIO, ValidationNotEqualError
TIMESTAMP_IDS = (0xd00007dd, 0xd00007de) # these id's update the rolling timestamp
HEADER_PAT = re.compile(b'\xd0\x00\x07\xde', re.MULTILINE)
agg_descriptor = namedtuple('periodicEntry', 'ID unknown size')
dataPoint = namedtuple('dataPoint', 'timestamp ID value')
MAX_LEN = 50000
def read_entries(data, start_offset=0):
# Inspired by: https://stackoverflow.com/questions/49699820/parsing-binary-messages-with-kaitai-struct-python
stream = KaitaiStream(BytesIO(data))
stream.seek(start_offset)
last = stream.pos()
start = ModelSxLog(stream)
log_entry = start.log_entry
yield log_entry
n_entries = 1
with tqdm(total=stream.size() - start_offset, unit='B',
unit_scale=True, desc='Processing log') as pbar:
while not stream.is_eof():
if n_entries % 1000 == 0:
consumed = stream.pos() - last
pbar.update(consumed)
last = stream.pos()
try:
log_entry = ModelSxLog.Entry(stream, _root=start._root)
if sum(log_entry.raw_bytes) % 256 != 0:
print(f'Checksum error at {stream.pos()}, seeking to the next entry...')
stream.read_bytes_term(0xaa, include_term=False, consume_term=False, eos_error=True)
else:
yield log_entry
except ValidationNotEqualError:
print(f'Encountered an error at {stream.pos()}, probably a corrupt entry, seeking to next one...')
stream.read_bytes_term(0xaa, include_term=False, consume_term=False, eos_error=True)
pass
n_entries += 1
pbar.update(stream.pos() - last)
stream.close()
def process_log(data,
dt_min=None,
dt_max=None,
start_offset=0):
agg_descriptors = dict()
rollingTime = datetime.utcfromtimestamp(0) # init rolling time
for entry in read_entries(data, start_offset=start_offset):
if entry.is_on_change and entry.body.sig_id in TIMESTAMP_IDS:
rollingTime = datetime.utcfromtimestamp(struct.unpack('>L', entry.body.value[:4])[0])
millis = struct.unpack('>H', entry.body.value[4:6])[0]
rollingTime += timedelta(seconds=millis / 1000)
timestamp = rollingTime + timedelta(seconds=entry.counter / 1000.0)
if dt_max and timestamp >= dt_max:
print('Reached end timestamp, stop processing', flush=True)
break
if not entry.is_on_change:
if entry.body.is_descriptor:
agg_id = entry.body.aggregate_body.aggregate_id
agg_descriptors[agg_id] = [agg_descriptor(desc.sig_id, desc.unknown, desc.size)
for desc in entry.body.aggregate_body.descriptors]
else:
if timestamp <= dt_min:
continue
agg_id = entry.body.aggregate_id
if agg_id not in agg_descriptors:
continue # The descriptors are probably in a previous log file
substream = entry.body.aggregate_body.values
cursor = 0
for descriptor in agg_descriptors[agg_id]:
yield dataPoint(timestamp, descriptor.ID, substream[cursor: cursor + descriptor.size])
cursor += descriptor.size
else:
if timestamp <= dt_min:
continue
yield dataPoint(timestamp, entry.body.sig_id, entry.body.value)
def process_row(d, key, max_len=50000, _cache=None, _store=None):
"""
Append row d to the store 'key'.
When the number of items in the key's cache reaches max_len,
append the list of rows to the HDF5 store and clear the list.
"""
# keep the rows for each key separate.
lst = _cache.setdefault(key, [])
if len(lst) >= max_len:
store_and_clear(lst, key, _store=_store)
lst.append(d)
def store_and_clear(lst, key, _store=None):
"""
Convert key's cache list to a DataFrame and append that to HDF5.
"""
key = f'/{key}'
values = streamline_list(lst)
if values is not None:
df = | pd.DataFrame(values) | pandas.DataFrame |
import argparse
import tempfile
import os
import pandas as pd
import numpy as np
from conga.tcrdist.make_10x_clones_file import make_10x_clones_file
from conga.preprocess import calc_tcrdist_matrix_cpp
# from hello import say_hello_to, parse_charptr_to_py_int
def covepitope_convert_from_10x():
parser = argparse.ArgumentParser(description='')
parser.add_argument(
'fca_file',
type=str,
help='The CSV file containing filtered contig annotation',
)
parser.add_argument(
'--organism',
dest='organism',
default='human',
)
parser.add_argument(
'--edge-threshold',
dest='edge_threshold',
default=100,
help='Any edge that has distance above this number will be removed from the output edge table. This is to reduce the file size. (default: 100)',
)
parser.add_argument(
'clones_csv',
help='The name/path of the output CSV file containing clonotypes',
)
parser.add_argument(
'edges_csv',
help='The name/path of the output CSV file containing the pairwise TCRDist3 distances',
)
args = parser.parse_args()
temp_clones_file = tempfile.NamedTemporaryFile()
# parse filtered_contig_annotations.csv into paired clonotypes
make_10x_clones_file(args.fca_file, args.organism, temp_clones_file.name)
df = pd.read_csv(temp_clones_file.name, sep='\t')
df = df.rename(
columns={
'clone_id': 'original_index',
'subject': 'donor',
'va_gene': 'v_a_gene',
'ja_gene': 'j_a_gene',
'va2_gene': 'va2',
'ja2_gene': 'ja2',
'vb_gene': 'v_b_gene',
'jb_gene': 'j_b_gene',
'cdr3a': 'cdr3_a_aa',
'cdr3a_nucseq': 'cdr3_a_nucseq',
'cdr3a2_nucseq': 'cdr3a2_nt',
'cdr3b': 'cdr3_b_aa',
'cdr3b_nucseq': 'cdr3_b_nucseq',
}
)
df['index'] = np.arange(len(df))
df.to_csv(args.clones_csv, index=False)
# tuples of tuples with tcr info
tcrs = [((l.v_a_gene, l.j_a_gene, l.cdr3_a_aa), (l.v_b_gene, l.j_b_gene, l.cdr3_b_aa)) for l in df.itertuples()]
D_cpp = calc_tcrdist_matrix_cpp(tcrs, args.organism).astype(np.uint)
df_edges = | pd.DataFrame(D_cpp) | pandas.DataFrame |
import pandas as pd
from sqlalchemy import create_engine
from library import cf
import talib.abstract as ta
import pymysql.cursors
import numpy as np
from library.logging_pack import *
logger.debug("subindex시작!!!!")
pymysql.install_as_MySQLdb()
daily_craw_engine=create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/daily_craw",
encoding='utf-8')
daily_buy_list_engine = create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/daily_buy_list" ,
encoding='utf-8')
simul_engine=create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/simulator11",
encoding='utf-8')
min_craw_engine = create_engine("mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/min_craw",
encoding='utf-8')
stand_date = '20070903'
#데이터 변환
class subindex:
def __init__(self):
logger.debug("subindex 함수로 들어왔다!!")
def collecting(self):
co_sql = f"select TABLE_NAME FROM information_schema.tables WHERE table_schema = 'daily_craw'"
target_code = daily_craw_engine.execute(co_sql).fetchall()
num = len(target_code)
for i in range(num):
self.db_name = target_code[i][0]
self.db_name = self.db_name.replace("%", "%%")
self.collect_db()
print(self.db_name , "을 가져온다!")
def collect_db(self):
# 데이터 불러오기
sql = "select date,code,vol10,code_name,open,close,low,high,volume from daily_craw.`%s` where Date >= %s order by Date "
rows = daily_craw_engine.execute(sql%(self.db_name,stand_date)).fetchall()
three_s = pd.DataFrame(rows, columns=['date', 'code','vol10' ,'code_name','open' ,'close', 'low', 'high', 'volume'])
three_s = three_s.fillna(0)
# 데이터 변환
th_date = list(np.asarray(three_s['date'].tolist()))
th_date_np = np.array(th_date, dtype='f8')
th_close = list(np.asarray(three_s['close'].tolist()))
th_close_np = np.array(th_close, dtype='f8')
th_high = list(np.asarray(three_s['high'].tolist()))
th_high_np = np.array(th_high, dtype='f8')
th_low = list(np.asarray(three_s['low'].tolist()))
th_low_np = np.array(th_low, dtype='f8')
th_volume = list(np.asarray(three_s['volume'].tolist()))
th_volume_np = np.array(th_volume, dtype='f8')
th_open = list(np.asarray(three_s['open'].tolist()))
th_open_np = np.array(th_open, dtype='f8')
th_vol10 = list(np.asarray(three_s['vol10'].tolist()))
th_vol10_np = np.array(th_vol10, dtype='f8')
#주가고가저가 변동폭
th_diff =((three_s['high']-three_s['low'])/three_s['high'])*100
# 30일간 주가최저최고 변동폭 클때
th_diff30 = th_diff.rolling(30).max()
# 보조지표 계산
th_cci = ta._ta_lib.CCI(th_high_np, th_low_np, th_close_np, 9)
th_cci60 = ta._ta_lib.CCI(th_high_np, th_low_np, th_close_np, 60)
##rsi
th_rsi = ta._ta_lib.RSI(th_close_np, 14)
th_rsi5 = ta._ta_lib.RSI(th_close_np, 5)
th_OBV = ta._ta_lib.OBV(th_close_np, th_volume_np)
th_macd, th_macd_signal, th_macd_hist = ta._ta_lib.MACD(th_close_np, fastperiod=12, slowperiod=26,
signalperiod=9)
th_stoch_slowk, th_stoch_slowd = ta._ta_lib.STOCH(th_high_np, th_low_np, th_close_np,
fastk_period=10, slowk_period=2, slowk_matype=0,
slowd_period=2, slowd_matype=0)
##책에따라 12일선 기준으로 바꿈
th_BBAND_U, th_BBAND_M, th_BBAND_L = ta._ta_lib.BBANDS(th_close_np, timeperiod=20, nbdevup=2, nbdevdn=2,
matype=0)
th_BBAND_U14, th_BBAND_M14, th_BBAND_L14 = ta._ta_lib.BBANDS(th_close_np, timeperiod=14, nbdevup=2, nbdevdn=2,
matype=0)
th_BBAND_WIDE = (th_BBAND_U-th_BBAND_L)/th_BBAND_M
th_BBAND_WIDE14 = (th_BBAND_U14 - th_BBAND_L14) / th_BBAND_M14
th_pb=(th_close_np-th_BBAND_L) / (th_BBAND_U-th_BBAND_L)
th_pb14 = (th_close_np - th_BBAND_L14) / (th_BBAND_U14 - th_BBAND_L14)
th_sar = ta._ta_lib.SAR(th_high_np, th_low_np,0.04,0.4)
th_ibs = (th_close_np -th_low_np)/(th_high_np-th_low_np)
th_dema5 = ta._ta_lib.DEMA(th_close_np, 5)
th_dema20 = ta._ta_lib.DEMA(th_close_np,20)
th_dema60 = ta._ta_lib.DEMA(th_close_np, 60)
th_tema5 = ta._ta_lib.TEMA(th_close_np,5)
th_tema20 = ta._ta_lib.TEMA(th_close_np, 20)
th_tema60 = ta._ta_lib.TEMA(th_close_np, 60)
#ema = 지수이동평균
th_ema5 = ta._ta_lib.EMA(th_close_np, 5)
th_ema20 = ta._ta_lib.EMA(th_close_np, 20)
th_ema60 = ta._ta_lib.EMA(th_close_np, 60)
th_ema112 = ta._ta_lib.EMA(th_close_np, 112)
th_ema224 = ta._ta_lib.EMA(th_close_np, 224)
th_ema448 = ta._ta_lib.EMA(th_close_np, 448)
th_ema448diff = ((th_close_np-th_ema448)/th_close_np * 100)
th_ema224diff = ((th_close_np-th_ema224)/th_close_np*100)
th_ema112diff = ((th_close_np-th_ema112)/th_close_np*100)
#ma 이동평균
th_ma112 = ta._ta_lib.MA(th_close_np, 112)
th_ma224 = ta._ta_lib.MA(th_close_np, 224)
th_ma448 = ta._ta_lib.MA(th_close_np, 448)
th_clo5diff = ((th_close_np - ta._ta_lib.MA(th_close_np, 5)) / th_close_np * 100)
th_clo20diff = ((th_close_np - ta._ta_lib.MA(th_close_np, 20)) / th_close_np * 100)
#dmi값들 14->11로 고쳐씀
th_pdi = ta._ta_lib.PLUS_DI(th_high_np,th_low_np,th_close_np, 11)
th_mdi = ta._ta_lib.MINUS_DI(th_high_np, th_low_np, th_close_np, 11)
th_dm = ta._ta_lib.PLUS_DM(th_high_np,th_low_np, 11)
th_adx = ta._ta_lib.ADX(th_high_np,th_low_np,th_close_np, 14)
th_adxr = ta._ta_lib.ADXR(th_high_np, th_low_np, th_close_np, 14)
th_obvsig9 =ta._ta_lib.MA(ta._ta_lib.OBV(th_close_np, th_volume_np),9)
#윌리엄 변동율
th_williumr = ta._ta_lib.WILLR(th_high_np,th_low_np,th_close_np, 14)
th_mfi = ta._ta_lib.MFI(th_high_np,th_low_np,th_close_np,th_volume_np, 14)
#거래량 오실레이터공식 10일
th_ad = ((th_close_np-th_open_np)/(th_high_np-th_low_np) * th_volume_np / th_vol10_np*10)
# #일중강도
th_ll = (2*th_close_np-th_high_np-th_low_np)/(th_high_np-th_low_np) * th_volume_np
# nan을 모두 0으로 전환
np.nan_to_num(th_cci, copy=False)
np.nan_to_num(th_cci60, copy=False)
np.nan_to_num(th_rsi, copy=False)
np.nan_to_num(th_macd, copy=False)
np.nan_to_num(th_macd_signal, copy=False)
np.nan_to_num(th_macd_hist, copy=False)
np.nan_to_num(th_stoch_slowk, copy=False)
np.nan_to_num(th_stoch_slowd, copy=False)
np.nan_to_num(th_BBAND_L, copy=False)
np.nan_to_num(th_BBAND_M, copy=False)
np.nan_to_num(th_BBAND_U, copy=False)
np.nan_to_num(th_BBAND_L14, copy=False)
np.nan_to_num(th_BBAND_M14, copy=False)
np.nan_to_num(th_BBAND_U14, copy=False)
np.nan_to_num(th_OBV, copy=False)
np.nan_to_num(th_sar, copy=False)
np.nan_to_num(th_dema5, copy=False)
np.nan_to_num(th_dema20, copy=False)
np.nan_to_num(th_dema60, copy=False)
np.nan_to_num(th_tema5, copy=False)
np.nan_to_num(th_tema20, copy=False)
np.nan_to_num(th_tema60, copy=False)
np.nan_to_num(th_ema5, copy=False)
np.nan_to_num(th_ema112diff, copy=False)
np.nan_to_num(th_ema224diff, copy=False)
np.nan_to_num(th_ema448diff, copy=False)
np.nan_to_num(th_ema20, copy=False)
np.nan_to_num(th_ema60, copy=False)
np.nan_to_num(th_ema112, copy=False)
np.nan_to_num(th_ema224, copy=False)
np.nan_to_num(th_ema448, copy=False)
np.nan_to_num(th_ma112, copy=False)
np.nan_to_num(th_ma224, copy=False)
np.nan_to_num(th_ma448, copy=False)
np.nan_to_num(th_pdi, copy=False)
np.nan_to_num(th_mdi, copy=False)
np.nan_to_num(th_dm, copy=False)
np.nan_to_num(th_adx, copy=False)
np.nan_to_num(th_adxr, copy=False)
np.nan_to_num(th_williumr, copy=False)
np.nan_to_num(th_pb, copy=False)
np.nan_to_num(th_pb14, copy=False)
np.nan_to_num(th_BBAND_WIDE, copy=False)
np.nan_to_num(th_BBAND_WIDE14, copy=False)
np.nan_to_num(th_mfi, copy=False)
np.nan_to_num(th_ll, copy=False)
np.nan_to_num(th_ad, copy=False)
np.nan_to_num(th_rsi5, copy=False)
np.nan_to_num(th_ibs, copy=False)
np.nan_to_num(th_diff, copy=False)
np.nan_to_num(th_diff30, copy=False)
np.nan_to_num(th_obvsig9, copy=False)
# DataFrame 화 하기
df_ad = pd.DataFrame(th_ad, columns=['ad'])
df_cci = pd.DataFrame(th_cci, columns=['cci'])
df_cci60 = pd.DataFrame(th_cci, columns=['cci60'])
df_rsi5 = pd.DataFrame(th_rsi5, columns=['rsi5'])
df_rsi = pd.DataFrame(th_rsi, columns=['rsi'])
df_macd = pd.DataFrame(th_macd, columns=['macd'])
df_macd_signal = pd.DataFrame(th_macd_signal, columns=['macd_signal'])
df_macd_hist = pd.DataFrame(th_macd_hist, columns=['macd_hist'])
df_stoch_slowk = pd.DataFrame(th_stoch_slowk, columns=['stoch_slowk'])
df_stoch_slowd = pd.DataFrame(th_stoch_slowd, columns=['stoch_slowd'])
#볼린저밴드
df_BBand_U = pd.DataFrame(th_BBAND_U, columns=['BBand_U'])
df_BBand_M = pd.DataFrame(th_BBAND_M, columns=['BBand_M'])
df_BBand_L = pd.DataFrame(th_BBAND_L, columns=['BBand_L'])
df_BBand_U14 = pd.DataFrame(th_BBAND_U, columns=['BBand_U14'])
df_BBand_M14 = pd.DataFrame(th_BBAND_M, columns=['BBand_M14'])
df_BBand_L14 = pd.DataFrame(th_BBAND_L, columns=['BBand_L14'])
df_ibs = pd.DataFrame(th_ibs, columns=['ibs'])
df_pb14 = pd.DataFrame(th_pb, columns=['pb14'])
df_obvsig9 = | pd.DataFrame(th_obvsig9, columns=['obvsig9']) | pandas.DataFrame |
# Fed Interest Rate Data (Dates are approximate- representing the Sunday of the week when the rate was announced)
import pandas as pd
import numpy as np
from datetime import timedelta
file0319 = pd.read_html('https://www.federalreserve.gov/monetarypolicy/openmarket.htm')
file9002 = pd.read_html('https://www.federalreserve.gov/monetarypolicy/openmarket_archive.htm')
# DF containing all instances where the Fed released a new funds rate
rates_df = pd.DataFrame()
file2_yrs = list(range(1990,2003))[::-1]
file2_yrs.remove(1993)
years = [[2019, 2018, 2017, 2016, 2015, 2008, 2007, 2006, 2005, 2004, 2003], file2_yrs]
counter1 = 0
for file in [file0319, file9002]:
counter2 = 0
file_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import time
def patient(rdb):
""" Returns list of patients """
patients = """SELECT "Name" FROM patient ORDER BY index"""
try:
patients = pd.read_sql(patients, rdb)
patients = patients["Name"].values.tolist()
except:
patients = ['Patient']
return patients
def label(rdb):
""" Returns list of parameter for linear and bar drop down """
sql = """SELECT type FROM name WHERE type IN ('Heart Rate','Heart Rate Variability SDNN', 'Resting Heart Rate',
'VO2 Max','Walking Heart Rate Average')"""
sql2 = """SELECT type FROM name WHERE type NOT IN ('Heart Rate','Heart Rate Variability SDNN',
'Resting Heart Rate','VO2 Max','Walking Heart Rate Average')"""
try:
df, df2 = pd.read_sql(sql, rdb), pd.read_sql(sql2, rdb)
label_linear, label_bar = df["type"].values.tolist(), df2["type"].values.tolist()
except:
label_linear, label_bar = [], []
return label_linear, label_bar
def month(rdb, patient):
""" Returns list of months in database for selected patient """
sql = """SELECT DISTINCT TO_CHAR("Date",'YYYY-MM') AS month
FROM applewatch_numeric
WHERE "Name"='{}'
AND type ='Resting Heart Rate'
ORDER BY month""".format(patient)
try:
df = pd.read_sql(sql, rdb)
months = df['month'].to_list()
except:
months = []
return months
def week(rdb, patient):
""" Returns list of weeks in database for selected patient """
sql = """SELECT DISTINCT TO_CHAR("Date", 'IYYY/IW') AS week
FROM applewatch_numeric
WHERE "Name"='{}'
AND type ='Resting Heart Rate'
ORDER BY week """.format(patient)
try:
df = pd.read_sql(sql, rdb)
weeks = df['week'].to_list()
except:
weeks = []
return weeks
def min_max_date(rdb, patient):
""" Returns min and max date for selected patient """
sql = """SELECT min_date,max_date FROM patient WHERE "Name"='{}'""".format(patient)
try:
df = pd.read_sql(sql, rdb)
min_date, max_date = df['min_date'].iloc[0].date(), df['max_date'].iloc[0].date()
except:
min_date, max_date = '', ''
return min_date, max_date
def age_sex(rdb, patient):
""" Returns age and gender for selected patient"""
sql = """SELECT "Age","Sex" from patient where "Name"='{}' """.format(patient)
try:
df = pd.read_sql(sql, rdb)
age, sex = df['Age'][0], df['Sex'][0]
except:
age, sex = '', ''
return age, sex
def classification_ecg(rdb, patient):
""" Returns ecg classification for patient information card """
sql = """SELECT "Classification",count(*) FROM ecg WHERE "Patient"='{}' GROUP BY "Classification" """.format(patient)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def number_of_days_more_6(rdb, patient):
""" Returns number of days the patient had the Apple Watch on their hand for more than 6 hours"""
sql = """SELECT count (*)
FROM (SELECT "Date"::date
FROM applewatch_categorical
WHERE "Name" = '{}'
AND "type" = 'Apple Stand Hour'
GROUP BY "Date"::date
HAVING count("Date"::date) > 6) days """.format(patient)
try:
df = pd.read_sql(sql, rdb)
df = df.iloc[0]['count']
except:
df = '0'
return df
def card(rdb, patient, group, date, value):
""" Returns DataFrame with resting, working, mean hear rate, step count, exercise time, activity for the cards """
if group == 'M':
to_char = """ TO_CHAR("Date",'YYYY-MM') """
group_by = "month"
elif group == 'W':
to_char = """ TO_CHAR("Date", 'IYYY/IW') """
group_by = "week"
elif group == 'DOW':
to_char = """ TRIM(TO_CHAR("Date", 'Day')) """
group_by = "DOW"
else:
to_char = """ "Date"::date """
group_by = "date"
value = date
sql = """SELECT {0} AS {3},type,
CASE
WHEN type in ('Active Energy Burned','Step Count','Apple Exercise Time') THEN SUM("Value")
WHEN type in ('Heart Rate','Walking Heart Rate Average','Resting Heart Rate') THEN AVG("Value")
END AS "Value"
FROM applewatch_numeric
WHERE "Name" = '{1}'
AND type in ('Active Energy Burned','Step Count','Apple Exercise Time','Heart Rate',
'Walking Heart Rate Average','Resting Heart Rate')
AND {0}='{2}'
GROUP BY {3},type""".format(to_char, patient, value, group_by)
try:
df = pd.read_sql(sql, rdb)
df["Value"] = df["Value"].round(2)
except:
df = pd.DataFrame()
return df
def table(rdb, patient, group, linear, bar):
""" Returns a table with the patient and parameters that were selected from drop downs """
if isinstance(linear, list):
linear = "'" + "','".join(linear) + "'"
else:
linear = "'" + linear + "'"
if group == 'M':
to_char = """ TO_CHAR("Date",'YYYY-MM')"""
group_by = "month"
elif group == 'W':
to_char = """ TO_CHAR("Date", 'IYYY/IW') """
group_by = "week"
elif group == 'DOW':
to_char = """ TRIM(TO_CHAR("Date",'Day')) """
group_by = ' "DOW" '
else:
to_char = """ "Date"::date """
group_by = "date"
sql = """SELECT {0} as {4},"type",
CASE WHEN type IN ('Heart Rate','Heart Rate Variability SDNN','Resting Heart Rate','VO2 Max',
'Walking Heart Rate Average') THEN AVG("Value") ELSE SUM("Value")
END AS "Value"
FROM applewatch_numeric
WHERE "Name" = '{1}'
AND "type" in ({2},'{3}')
GROUP BY {0},type
ORDER BY "type",{4} """.format(to_char, patient, linear, bar, group_by)
try:
df = pd.read_sql(sql, rdb)
if group == 'DOW':
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
df['DOW'] = pd.Categorical(df['DOW'], categories=cats, ordered=True)
df = df.sort_values('DOW')
group_by = "DOW"
df = df.pivot(index=group_by, columns='type', values='Value').reset_index()
except:
df = pd.DataFrame()
return df, group_by
def day_figure(rdb, patient, bar, date):
""" Returns DataFrame for day figure with heart rate and selected parameter and patient """
sql = """ SELECT "Date","type","Value"
FROM applewatch_numeric
WHERE "Name" = '{}'
AND "Date"::date='{}'
AND "type" in ('Heart Rate','{}')
ORDER BY "type","Date" """.format(patient, date, bar)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def trend_figure(rdb, patient, group, start_date, end_date):
""" Returns DataFrame for trend figure """
if group == 'M':
to_char = """ TO_CHAR("Date",'YYYY-MM')"""
group_by = "month"
elif group == 'W':
to_char = """ TO_CHAR("Date", 'IYYY/IW') """
group_by = "week"
elif group == 'DOW':
to_char = """TRIM(TO_CHAR("Date", 'Day')) """
group_by = """ "DOW" """
else:
to_char = """ "Date"::date """
group_by = "date"
""" TRIM(TO_CHAR("Date", 'Day')) in ()"""
sql = """SELECT {0} as {1},extract('hour' from "Date") as hour,AVG("Value") AS "Value"
FROM applewatch_numeric
WHERE "Name" = '{2}'
AND type='Heart Rate'
AND "Date" BETWEEN '{3}' AND '{4}'
GROUP BY {0},extract('hour' from "Date")
ORDER BY {1},hour """.format(to_char, group_by, patient, start_date, end_date)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
# Query data for ECG_analyse
def ecgs(rdb, patient):
""" Returns DataFrame for table_ecg"""
sql2 = """SELECT "Day","Date"::time AS Time, "Classification"
FROM ecg
WHERE "Patient"='{}'
ORDER BY "Day" """.format(patient)
try:
df = pd.read_sql(sql2, rdb)
except:
df = pd.DataFrame()
return df
def ecg_data(rdb, day, patient, time):
""" Returns DatFrame to plot ecg signal """
sql = """SELECT * FROM ECG where "Day"='{0}' and "Patient"='{1}' and "Date"::time='{2}' """.format(day, patient, time)
try:
df = | pd.read_sql(sql, rdb) | pandas.read_sql |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import pathlib
import pytest
import pandas as pd
import numpy as np
import gradelib
EXAMPLES_DIRECTORY = pathlib.Path(__file__).parent / "examples"
GRADESCOPE_EXAMPLE = gradelib.Gradebook.from_gradescope(
EXAMPLES_DIRECTORY / "gradescope.csv"
)
CANVAS_EXAMPLE = gradelib.Gradebook.from_canvas(EXAMPLES_DIRECTORY / "canvas.csv")
# the canvas example has Lab 01, which is also in Gradescope. Let's remove it
CANVAS_WITHOUT_LAB_EXAMPLE = gradelib.Gradebook(
points=CANVAS_EXAMPLE.points.drop(columns="lab 01"),
maximums=CANVAS_EXAMPLE.maximums.drop(index="lab 01"),
late=CANVAS_EXAMPLE.late.drop(columns="lab 01"),
dropped=CANVAS_EXAMPLE.dropped.drop(columns="lab 01"),
)
# given
ROSTER = gradelib.read_egrades_roster(EXAMPLES_DIRECTORY / "egrades.csv")
def assert_gradebook_is_sound(gradebook):
assert gradebook.points.shape == gradebook.dropped.shape == gradebook.late.shape
assert (gradebook.points.columns == gradebook.dropped.columns).all()
assert (gradebook.points.columns == gradebook.late.columns).all()
assert (gradebook.points.index == gradebook.dropped.index).all()
assert (gradebook.points.index == gradebook.late.index).all()
assert (gradebook.points.columns == gradebook.maximums.index).all()
# assignments property
# -----------------------------------------------------------------------------
def test_assignments_are_produced_in_order():
assert list(GRADESCOPE_EXAMPLE.assignments) == list(
GRADESCOPE_EXAMPLE.points.columns
)
# keep_pids()
# -----------------------------------------------------------------------------
def test_keep_pids():
# when
actual = GRADESCOPE_EXAMPLE.keep_pids(ROSTER.index)
# then
assert len(actual.pids) == 3
assert_gradebook_is_sound(actual)
def test_keep_pids_raises_if_pid_does_not_exist():
# given
pids = ["A12345678", "ADNEDNE00"]
# when
with pytest.raises(KeyError):
actual = GRADESCOPE_EXAMPLE.keep_pids(pids)
# keep_assignments() and remove_assignments()
# -----------------------------------------------------------------------------
def test_keep_assignments():
# when
actual = GRADESCOPE_EXAMPLE.keep_assignments(["homework 01", "homework 02"])
# then
assert set(actual.assignments) == {"homework 01", "homework 02"}
assert_gradebook_is_sound(actual)
def test_keep_assignments_raises_if_assignment_does_not_exist():
# given
assignments = ["homework 01", "this aint an assignment"]
# then
with pytest.raises(KeyError):
GRADESCOPE_EXAMPLE.keep_assignments(assignments)
def test_remove_assignments():
# when
actual = GRADESCOPE_EXAMPLE.remove_assignments(
GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
)
# then
assert set(actual.assignments) == {
"homework 01",
"homework 02",
"homework 03",
"homework 04",
"homework 05",
"homework 06",
"homework 07",
"project 01",
"project 02",
}
assert_gradebook_is_sound(actual)
def test_remove_assignments_raises_if_assignment_does_not_exist():
# given
assignments = ["homework 01", "this aint an assignment"]
# then
with pytest.raises(KeyError):
GRADESCOPE_EXAMPLE.remove_assignments(assignments)
# combine()
# -----------------------------------------------------------------------------
def test_combine_with_keep_pids():
# when
combined = gradelib.Gradebook.combine(
[GRADESCOPE_EXAMPLE, CANVAS_WITHOUT_LAB_EXAMPLE], keep_pids=ROSTER.index
)
# then
assert "homework 01" in combined.assignments
assert "midterm exam" in combined.assignments
assert_gradebook_is_sound(combined)
def test_combine_raises_if_duplicate_assignments():
# the canvas example and the gradescope example both have lab 01.
# when
with pytest.raises(ValueError):
combined = gradelib.Gradebook.combine([GRADESCOPE_EXAMPLE, CANVAS_EXAMPLE])
def test_combine_raises_if_indices_do_not_match():
# when
with pytest.raises(ValueError):
combined = gradelib.Gradebook.combine(
[CANVAS_WITHOUT_LAB_EXAMPLE, GRADESCOPE_EXAMPLE]
)
# number_of_lates()
# -----------------------------------------------------------------------------
def test_number_of_lates():
# when
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
actual = GRADESCOPE_EXAMPLE.number_of_lates(within=labs)
# then
assert list(actual) == [1, 4, 2, 2]
def test_number_of_lates_with_empty_assignment_list_raises():
# when
with pytest.raises(ValueError):
actual = GRADESCOPE_EXAMPLE.number_of_lates(within=[])
def test_number_of_lates_with_no_assignment_list_uses_all_assignments():
# when
actual = GRADESCOPE_EXAMPLE.number_of_lates()
# then
assert list(actual) == [1, 5, 2, 2]
# forgive_lates()
# -----------------------------------------------------------------------------
def test_forgive_lates():
# when
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=3, within=labs)
# then
assert list(actual.number_of_lates(within=labs)) == [0, 1, 0, 0]
assert_gradebook_is_sound(actual)
def test_forgive_lates_with_empty_assignment_list_raises():
# when
with pytest.raises(ValueError):
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=3, within=[])
def test_forgive_lates_forgives_the_first_n_lates():
# by "first", we mean in the order specified by the `within` argument
# student A10000000 had late lab 01, 02, 03, and 07
assignments = ["lab 02", "lab 07", "lab 01", "lab 03"]
# when
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=2, within=assignments)
# then
assert not actual.late.loc["A10000000", "lab 02"]
assert not actual.late.loc["A10000000", "lab 07"]
assert actual.late.loc["A10000000", "lab 01"]
assert actual.late.loc["A10000000", "lab 03"]
def test_forgive_lates_does_not_forgive_dropped():
# given
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
dropped = GRADESCOPE_EXAMPLE.dropped.copy()
dropped.iloc[:, :] = True
example = gradelib.Gradebook(
points=GRADESCOPE_EXAMPLE.points,
maximums=GRADESCOPE_EXAMPLE.maximums,
late=GRADESCOPE_EXAMPLE.late,
dropped=dropped,
)
# when
actual = example.forgive_lates(n=3, within=labs)
# then
assert list(actual.number_of_lates(within=labs)) == [1, 4, 2, 2]
assert_gradebook_is_sound(actual)
# drop_lowest()
# -----------------------------------------------------------------------------
def test_drop_lowest_on_simple_example_1():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# if we are dropping 1 HW, the right strategy is to drop the 50 point HW
# for A1 and to drop the 100 point homework for A2
# when
actual = gradebook.drop_lowest(1, within=homeworks)
# then
assert actual.dropped.iloc[0, 1]
assert actual.dropped.iloc[1, 2]
assert list(actual.dropped.sum(axis=1)) == [1, 1]
assert_gradebook_is_sound(actual)
def test_drop_lowest_on_simple_example_2():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = | pd.Series([2, 50, 100, 20], index=columns) | pandas.Series |
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from msticpy.analysis.anomalous_sequence import sessionize
class TestSessionize(unittest.TestCase):
def setUp(self):
self.df1 = pd.DataFrame({"UserId": [], "time": [], "operation": []})
self.df1_with_ses_col = pd.DataFrame(
{"UserId": [], "time": [], "operation": [], "session_ind": []}
)
self.df1_sessionized = pd.DataFrame(
{
"UserId": [],
"time_min": [],
"time_max": [],
"operation_list": [],
"duration": [],
"number_events": [],
}
)
self.df2 = pd.DataFrame(
{
"UserId": [1, 1, 2, 3, 1, 2, 2],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-06 11:06:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
],
"operation": ["A", "B", "C", "A", "A", "B", "C"],
}
)
self.df2_with_ses_col_1 = pd.DataFrame(
{
"UserId": [1, 1, 1, 2, 2, 2, 3],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation": ["A", "B", "A", "C", "B", "C", "A"],
"session_ind": [0, 0, 1, 2, 3, 4, 5],
}
)
self.df2_sessionized_1 = pd.DataFrame(
{
"UserId": [1, 1, 2, 2, 2, 3],
"time_min": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"time_max": [
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation_list": [["A", "B"], ["A"], ["C"], ["B"], ["C"], ["A"]],
"duration": [
pd.to_timedelta(1, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
| pd.to_timedelta(0, "min") | pandas.to_timedelta |
from tqdm import tqdm
import pandas as pd
import sys, os
import collections
"""
Small script to concat ENCODE files into a single dataframe to process it easily
5 cols = SRS sequencing
12 cols = LRS sequencing
"""
encode_dl_directory = "/gstock/biolo_datasets/ENCODE/DL/"
dict_df = collections.defaultdict(list)
for file in tqdm(os.listdir(encode_dl_directory)):
# print(file)
cols = open(encode_dl_directory + file, "r").readline().strip().split("\t")
cols_length = len(cols)
if cols_length == 5:
dict_df[cols_length].append(pd.read_csv(encode_dl_directory + file, sep="\t"))
elif cols_length == 12:
dict_df[cols_length].append( | pd.read_csv(encode_dl_directory + file, sep="\t") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#Importing all the necessary packages and operators
import os
import pandas as pd
import dask.dataframe as dd
import json
import datetime
from airflow import models
from airflow.providers.google.cloud.operators.bigquery import (
BigQueryCreateEmptyDatasetOperator,
BigQueryDeleteDatasetOperator,
)
from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator
from airflow.utils.dates import days_ago
from airflow.operators.python import PythonOperator
DATASET_NAME = os.environ.get("GCP_DATASET_NAME", 'Cap2_DS')
TABLE_NAME = os.environ.get("GCP_TABLE_NAME", 'Mod2_Proj2')
dag = models.DAG(
dag_id='gcs_to_bigquery_operator',
start_date=days_ago(1),
schedule_interval='@daily',
tags=['example']
)
#Converting string column into dict column
def string_to_dict(dict_string):
# Convert to proper json format
dict_string = dict_string.replace("'", '"')
return json.loads(dict_string)
# Function to concatanate all the csv files and perform necessary transformations
def transform_data():
#Read .CSV files into dask dataframe
dask_df = dd.read_csv('gs://saums_bucket/egen_cap1/*.csv')
#Convert the dask df into pandas df
pandas_df = dask_df.compute()
pandas_df['dict_col']=pandas_df['1d'].apply(string_to_dict)
#Flattening the dict column and merging it with original dataframe
flattened_df = pd.json_normalize(pandas_df['dict_col'])
mergedDf = pandas_df.merge(flattened_df, left_index=True, right_index=True)
transformed_df = mergedDf[['name','price','price_timestamp','market_cap','volume','price_change', 'price_change_pct', 'volume_change',
'volume_change_pct', 'market_cap_change', 'market_cap_change_pct']]
transformed_df['price_timestamp'] = pd.to_datetime(transformed_df['price_timestamp'])
transformed_df = transformed_df[['name', 'price','price_change','price_change_pct','price_timestamp',
'market_cap','market_cap_change', 'market_cap_change_pct','volume','volume_change', 'volume_change_pct']]
transformed_df['price_timestamp'] = transformed_df['price_timestamp'].dt.strftime("%m-%d-%Y %H:%M:%S")
transformed_df['price_timestamp'] = | pd.to_datetime(transformed_df['price_timestamp']) | pandas.to_datetime |
import pandas as pd
import numpy as np
#series
'''
s = pd.Series(np.random.randn(5))
print(s)
print (s.tail(2))
'''
#dataFrame
d={'name':['anish','harshal','shivam','joyal'],
'age':[20,20,19,19],
'home':['kol','mum','del','ker'],
'rate':[4.4,4.3,5.4,6.5],
'rate2':[1,2,3,4]}
pf= | pd.DataFrame(d,index=[1,2,3,4]) | pandas.DataFrame |
"""Feature extraction of image for training ML models"""
import os
import cv2
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
from skimage.filters import sobel, scharr, roberts, prewitt
from skimage.feature import canny
from scipy import ndimage as nd
# Extract the features using different gabor kernels
def addGabor(df,img):
num = 1
kernels = []
ksize = 9
for theta in range(2):
theta = theta/4.*np.pi
for sigma in (1,3):
for lamda in np.arange(0,np.pi,np.pi/4):
for gamma in (0.05,0.5):
gabor_label = "gabor" + str(num)
kernel = cv2.getGaborKernel((ksize,ksize),sigma,theta,lamda,gamma,0,cv2.CV_32F)
kernels.append(kernel)
fimg = cv2.filter2D(img,cv2.CV_8UC3,kernel)
filtered_img = fimg.reshape(-1)
df[gabor_label] = filtered_img
num += 1
return df
# Extract the features using different edge detector methods
def addEdges(df,gray):
canny_edges = canny(gray,0.6)
roberts_edges = roberts(gray)
sobel_edges = sobel(gray)
scharr_edges = scharr(gray)
prewitt_edges = prewitt(gray)
df['canny_edges'] = canny_edges.reshape(-1)
df['roberts_edge'] = roberts_edges.reshape(-1)
df['sobel_edge'] = sobel_edges.reshape(-1)
df['scharr_edge'] = scharr_edges.reshape(-1)
df['prewitt_edge'] = prewitt_edges.reshape(-1)
return df
# Extract feutures using gaussian and median filters
def addFilter(df,gray):
gaussian_3 = nd.gaussian_filter(gray,sigma=3)
gaussian_7 = nd.gaussian_filter(gray,sigma=7)
median_img = nd.median_filter(gray,size=3)
df['gaussian_3'] = gaussian_3.reshape(-1)
df['gaussian_3'] = gaussian_7.reshape(-1)
df['gaussian_3'] = median_img.reshape(-1)
return df
# Add ground truth of the given input image
def addTruthLabel(df,truthfile):
gd = cv2.imread(truthfile,0)
df['ground_truth'] = gd.reshape(-1)
return df
# Add all the feautes in the dataframe
def calc_features(imgfile):
df = pd.DataFrame([])
img = cv2.imread(imgfile)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img1 = np.reshape(gray,(-1))
df['pixels'] = img1
df = addGabor(df,img1)
df = addEdges(df,gray)
df = addFilter(df,gray)
return df
imgpath = r"C:\Users\shubh\Documents\steel_detection\metal_nut\test\scratch"
gdpath = r"C:\Users\shubh\Documents\steel_detection\metal_nut\ground_truth\scratch"
imglist = ["021.png", "022.png"]
gdlist = ["021_mask.png", "021_mask.png"]
imgfiles = [os.path.join(imgpath,file) for file in imglist]
gdfiles = [os.path.join(gdpath,file) for file in gdlist]
df = | pd.DataFrame([]) | pandas.DataFrame |
import numpy as np
import scipy
import matplotlib
import pandas as pd
import sklearn
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import keras
import matplotlib.pyplot as plt
from datetime import datetime
from loss_mse import loss_mse_warmup
from custom_generator import batch_generator
#Keras
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Input, Dense, GRU, Embedding
from tensorflow.python.keras.optimizers import RMSprop
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau
#########Wczytanie danych############
dataset = pd.read_csv('dataset_PSE (1).csv', delimiter = ',')
#print(dataset)
########Wizualizacja surowych probek###########
#Vds= dataset.iloc[:,3] # ZAP, raw, unstamped.
#visualize ZAP
#plt.figure()
#Vds.plot()
#plt.show()
#plt.savefig('RAW_Unstamped_ZAP.pdf')
############Sprawdzenie i naprawa danych#############
#identyfikacja wadliwych rekordow
#dataset['Godzina'].map(int)
dataset = dataset[dataset['Godzina'] != '2A']
#odrzucono 2 rekordow
#sprawdzenie danych
HCheck = dataset[dataset['Godzina'].map(int).isin([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24])]
#print(HCheck)
#19669 rekordow
#############Analiza sasiedztwa uszkodzonych danych##############
#dataset['DH'] = dataset['Data'].map(str)+ dataset['Godzina']
#db_ds = dataset[dataset['Godzina'] == '2A']
#print(db_ds)
#print(dataset)
#db_ds = dataset[dataset['Data'] == 20171029]
#print(db_ds)
#print(dataset)
##########Konwersja etykiet probek#############
timeStamps = | pd.to_datetime(dataset.Data,format='%Y%m%d') | pandas.to_datetime |
# Copyright 2019 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
from statsmodels.tsa.vector_ar.vecm import coint_johansen
from moonshot import Moonshot
from moonshot.commission import PerShareCommission
class USStockCommission(PerShareCommission):
IB_COMMISSION_PER_SHARE = 0.005
class PairsStrategy(Moonshot):
"""
Pairs trading strategy that uses the Johansen test to re-calculate
hedge ratios daily and uses Bollinger Bands to time entries and exits.
Buys (sells) the spread when it crosses below (above) its lower (upper)
Bollinger Band and exits when it crosses its moving average.
To use the strategy, subclass this base class and define the appropriate
DB and CONIDS.
"""
CODE = "pairs"
DB = None
DB_FIELDS = ["Close", "Open"]
CONIDS = []
LOOKBACK_WINDOW = 20 # Calculate hedge ratios and Bollinger Bands using this lookback
BBAND_STD = 1 # Set Bollinger Bands this many standard deviations away from mean
COMMISSION_CLASS = USStockCommission
def get_hedge_ratio(self, pair_prices):
"""
Helper function that uses the Johansen test to calculate hedge ratio. This is applied
to the pair prices on a rolling basis in prices_to_signals.
"""
pair_prices = pair_prices.dropna()
# Skip if we don't have at least 75% of the expected observations
if len(pair_prices) < self.LOOKBACK_WINDOW * 0.75:
return | pd.Series(0, index=pair_prices.columns) | pandas.Series |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import re
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import preprocessing, model_selection, metrics
import lightgbm as lgb
import gc
train_df = pd.read_csv('../input/train.csv', parse_dates=["activation_date"])
test_df = pd.read_csv('../input/test.csv', parse_dates=["activation_date"])
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import random
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from fuzzywuzzy import fuzz
from nltk.corpus import stopwords
from tqdm import tqdm
from scipy.stats import skew, kurtosis
from scipy.spatial.distance import cosine, cityblock, jaccard, canberra, euclidean, minkowski, braycurtis
from nltk import word_tokenize
stopwords = stopwords.words('russian')
def genFeatures(x):
x["activation_weekday"] = x["activation_date"].dt.weekday
x["monthday"] = x["activation_date"].dt.day
x["weekinmonday"] = x["monthday"] // 7
##################Added in set 1 - 0.01 Improvement
x['price_new'] = np.log1p(x.price) # log transform improves co-relation with deal_price
x['count_null_in_row'] = x.isnull().sum(axis=1)# works
x['has_description'] = x.description.isnull().astype(int)
x['has_image'] = x.image.isnull().astype(int)
x['has_image_top'] = x.image_top_1.isnull().astype(int)
x['has_param1'] = x.param_1.isnull().astype(int)
x['has_param2'] = x.param_2.isnull().astype(int)
x['has_param3'] = x.param_3.isnull().astype(int)
x['has_price'] = x.price.isnull().astype(int)
#################Added in set 2 - 0.00x Improvement
x["description"].fillna("NA", inplace=True)
x["desc_nwords"] = x["description"].apply(lambda x: len(x.split()))
x['len_description'] = x['description'].apply(lambda x: len(x))
x["title_nwords"] = x["title"].apply(lambda x: len(x.split()))
x['len_title'] = x['title'].apply(lambda x: len(x))
x['params'] = x['param_1'].fillna('') + ' ' + x['param_2'].fillna('') + ' ' + x['param_3'].fillna('')
x['params'] = x['params'].str.strip()
x['len_params'] = x['params'].apply(lambda x: len(x))
x['words_params'] = x['params'].apply(lambda x: len(x.split()))
x['symbol1_count'] = x['description'].str.count('↓')
x['symbol2_count'] = x['description'].str.count('\*')
x['symbol3_count'] = x['description'].str.count('✔')
x['symbol4_count'] = x['description'].str.count('❀')
x['symbol5_count'] = x['description'].str.count('➚')
x['symbol6_count'] = x['description'].str.count('ஜ')
x['symbol7_count'] = x['description'].str.count('.')
x['symbol8_count'] = x['description'].str.count('!')
x['symbol9_count'] = x['description'].str.count('\?')
x['symbol10_count'] = x['description'].str.count(' ')
x['symbol11_count'] = x['description'].str.count('-')
x['symbol12_count'] = x['description'].str.count(',')
####################
return x
train_df = genFeatures(train_df)
test_df = genFeatures(test_df)
test_df['deal_probability']=10.0
############################
english_stemmer = nltk.stem.SnowballStemmer('russian')
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
def stem_tokens(tokens, stemmer):
stemmed = []
for token in tokens:
#stemmed.append(stemmer.lemmatize(token))
stemmed.append(stemmer.stem(token))
return stemmed
def preprocess_data(line,
exclude_stopword=True,
encode_digit=False):
## tokenize
line = clean_text(line)
tokens = [x.lower() for x in nltk.word_tokenize(line)]
## stem
tokens_stemmed = stem_tokens(tokens, english_stemmer)#english_stemmer
if exclude_stopword:
tokens_stemmed = [x for x in tokens_stemmed if x not in stopwords]
return ' '.join(tokens_stemmed)
train_test = pd.concat((train_df, test_df), axis = 'rows')
## After cleaning => then find intersection
train_test["title_clean"]= list(train_test[["title"]].apply(lambda x: preprocess_data(x["title"]), axis=1))
train_test["desc_clean"]= list(train_test[["description"]].apply(lambda x: preprocess_data(x["description"]), axis=1))
train_test["params_clean"]= list(train_test[["params"]].apply(lambda x: preprocess_data(x["params"]), axis=1))
train_test['count_common_words_title_desc'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
train_test['count_common_words_title_params'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['params_clean']).lower().split()))), axis=1)
train_test['count_common_words_params_desc'] = train_test.apply(lambda x: len(set(str(x['params_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
print("Cleaned texts..")
###################
# Count Nouns
import pymorphy2
morph = pymorphy2.MorphAnalyzer(result_type=None)
from fastcache import clru_cache as lru_cache
@lru_cache(maxsize=1000000)
def lemmatize_pos(word):
_, tag, norm_form, _, _ = morph.parse(word)[0]
return norm_form, tag.POS
def getPOS(x, pos1 = 'NOUN'):
lemmatized = []
x = clean_text(x)
#x = re.sub(u'[.]', ' ', x)
for s in x.split():
s, pos = lemmatize_pos(s)
if pos != None:
if pos1 in pos:
lemmatized.append(s)
return ' '.join(lemmatized)
train_test['get_nouns_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'NOUN'), axis=1))
train_test['get_nouns_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'NOUN'), axis=1))
train_test['get_adj_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'ADJ'), axis=1))
train_test['get_adj_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'ADJ'), axis=1))
train_test['get_verb_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'VERB'), axis=1))
train_test['get_verb_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'VERB'), axis=1))
# Count digits
def count_digit(x):
x = clean_text(x)
return len(re.findall(r'\b\d+\b', x))
train_test['count_of_digit_in_title'] = list(train_test.apply(lambda x: count_digit(x['title']), axis=1))
train_test['count_of_digit_in_desc'] = list(train_test.apply(lambda x: count_digit(x['description']), axis=1))
train_test['count_of_digit_in_params'] = list(train_test.apply(lambda x: count_digit(x['params']), axis=1))
## get unicode features
count_unicode = lambda x: len([c for c in x if ord(c) > 1105])
count_distunicode = lambda x: len({c for c in x if ord(c) > 1105})
train_test['count_of_unicode_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_unicode_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
train_test['count_of_distuni_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_distuni_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
###
count_caps = lambda x: len([c for c in x if c.isupper()])
train_test['count_caps_in_title'] = list(train_test.apply(lambda x: count_caps(x['title']), axis=1))
train_test['count_caps_in_desc'] = list(train_test.apply(lambda x: count_caps(x['description']), axis=1))
import string
count_punct = lambda x: len([c for c in x if c in string.punctuation])
train_test['count_punct_in_title'] = list(train_test.apply(lambda x: count_punct(x['title']), axis=1))
train_test['count_punct_in_desc'] = list(train_test.apply(lambda x: count_punct(x['description']), axis=1))
print("Computed POS Features and others..")
train_test['count_common_nouns'] = train_test.apply(lambda x: len(set(str(x['get_nouns_title']).lower().split()).intersection(set(str(x['get_nouns_desc']).lower().split()))), axis=1)
train_test['count_common_adj'] = train_test.apply(lambda x: len(set(str(x['get_adj_title']).lower().split()).intersection(set(str(x['get_adj_desc']).lower().split()))), axis=1)
train_test['ratio_of_unicode_in_title'] = train_test['count_of_unicode_in_title'] / train_test['len_title']
train_test['ratio_of_unicode_in_desc'] = train_test['count_of_unicode_in_desc'] / train_test['len_description']
train_test['ratio_of_punct_in_title'] = train_test['count_punct_in_title'] / train_test['len_title']
train_test['ratio_of_punct_in_desc'] = train_test['count_punct_in_desc'] / train_test['len_description']
train_test['ratio_of_cap_in_title'] = train_test['count_caps_in_title'] / train_test['len_title']
train_test['ratio_of_cap_in_desc'] = train_test['count_caps_in_desc'] / train_test['len_description']
train_test['count_nouns_in_title'] = train_test["get_nouns_title"].apply(lambda x: len(x.split()))
train_test['count_nouns_in_desc'] = train_test['get_nouns_desc'].apply(lambda x: len(x.split()))
train_test['count_adj_in_title'] = train_test["get_adj_title"].apply(lambda x: len(x.split()))
train_test['count_adj_in_desc'] = train_test['get_adj_desc'].apply(lambda x: len(x.split()))
train_test['count_verb_title'] = train_test['get_verb_title'].apply(lambda x: len(x.split()))
train_test['count_verb_desc'] = train_test['get_verb_desc'].apply(lambda x: len(x.split()))
train_test['ratio_nouns_in_title'] = train_test["count_nouns_in_title"] / train_test["title_nwords"]
train_test['ratio_nouns_in_desc'] = train_test["count_nouns_in_desc"] / train_test["desc_nwords"]
train_test['ratio_adj_in_title'] = train_test["count_adj_in_title"] / train_test["title_nwords"]
train_test['ratio_adj_in_desc'] = train_test["count_adj_in_desc"] / train_test["desc_nwords"]
train_test['ratio_vrb_in_title'] = train_test["count_verb_title"] / train_test["title_nwords"]
train_test['ratio_vrb_in_desc'] = train_test["count_verb_desc"] / train_test["desc_nwords"]
train_test["title"]= list(train_test[["title"]].apply(lambda x: clean_text(x["title"]), axis=1))
train_test["description"]= list(train_test[["description"]].apply(lambda x: clean_text(x["description"]), axis=1))
train_test["params"]= list(train_test[["params"]].apply(lambda x: clean_text(x["params"]), axis=1))
#######################
### Save
#######################
train_df = train_test.loc[train_test.deal_probability != 10].reset_index(drop = True)
test_df = train_test.loc[train_test.deal_probability == 10].reset_index(drop = True)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
train_df.to_feather('../train_basic_features.pkl')
test_df.to_feather('../test__basic_features.pkl')
#######################
### Label Enc
#######################
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
cat_vars = ["user_id", "region", "city", "parent_category_name", "category_name", "user_type", "param_1", "param_2", "param_3"]
for col in cat_vars:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train_df[col].values.astype('str')) + list(test_df[col].values.astype('str')))
train_df[col] = lbl.transform(list(train_df[col].values.astype('str')))
test_df[col] = lbl.transform(list(test_df[col].values.astype('str')))
train_df.to_feather('../train_basic_features_lblencCats.pkl')
test_df.to_feather('../test__basic_features_lblencCats.pkl')
#######################
### One hots
#######################
train_df=pd.read_feather('../train_basic_features_lblencCats.pkl')
test_df=pd.read_feather('../test__basic_features_lblencCats.pkl')
from sklearn.externals import joblib
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.user_id.values.tolist() + test_df.user_id.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_id_onehot.pkl")
X = le.fit_transform(np.array(train_df.region.values.tolist() + test_df.region.values.tolist()).reshape(-1,1))
joblib.dump(X, "../region_onehot.pkl")
X = le.fit_transform(np.array(train_df.city.values.tolist() + test_df.city.values.tolist()).reshape(-1,1))
joblib.dump(X, "../city_onehot.pkl")
X = le.fit_transform(np.array(train_df.parent_category_name.values.tolist() + test_df.parent_category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../parent_category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.category_name.values.tolist() + test_df.category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.user_type.values.tolist() + test_df.user_type.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_type_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_1.values.tolist() + test_df.param_1.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_1_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_2.values.tolist() + test_df.param_2.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_2_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_3.values.tolist() + test_df.param_3.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_3_onehot.pkl")
train_df.drop(cat_vars, inplace = True, axis = 'columns')
test_df.drop(cat_vars, inplace = True, axis = 'columns')
train_df.to_feather('../train_basic_features_woCats.pkl')
test_df.to_feather('../test__basic_features_woCats.pkl')
#######################
### Tfidf
#######################
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from sklearn.externals import joblib
### TFIDF Vectorizer ###
train_df['params'] = train_df['params'].fillna('NA')
test_df['params'] = test_df['params'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000,#min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf.pkl")
### TFIDF Vectorizer ###
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),max_features = 20000,#,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
### TFIDF Vectorizer ###
train_df['desc_clean'] = train_df['desc_clean'].fillna(' ')
test_df['desc_clean'] = test_df['desc_clean'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2), max_features = 20000, #,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
### TFIDF Vectorizer ###
train_df['get_nouns_title'] = train_df['get_nouns_title'].fillna(' ')
test_df['get_nouns_title'] = test_df['get_nouns_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_title'].values.tolist() + test_df['get_nouns_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_title_tfidf.pkl")
del full_tfidf
print("TDIDF Title Noun..")
### TFIDF Vectorizer ###
train_df['get_nouns_desc'] = train_df['get_nouns_desc'].fillna(' ')
test_df['get_nouns_desc'] = test_df['get_nouns_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_desc'].values.tolist() + test_df['get_nouns_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Noun..")
### TFIDF Vectorizer ###
train_df['get_adj_title'] = train_df['get_adj_title'].fillna(' ')
test_df['get_adj_title'] = test_df['get_adj_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_title'].values.tolist() + test_df['get_adj_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Adj..")
### TFIDF Vectorizer ###
train_df['get_adj_desc'] = train_df['get_adj_desc'].fillna(' ')
test_df['get_adj_desc'] = test_df['get_adj_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_desc'].values.tolist() + test_df['get_adj_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Adj..")
### TFIDF Vectorizer ###
train_df['get_verb_title'] = train_df['get_verb_title'].fillna(' ')
test_df['get_verb_title'] = test_df['get_verb_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_title'].values.tolist() + test_df['get_verb_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Verb..")
### TFIDF Vectorizer ###
train_df['get_verb_desc'] = train_df['get_verb_desc'].fillna(' ')
test_df['get_verb_desc'] = test_df['get_verb_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_desc'].values.tolist() + test_df['get_verb_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Verb..")
###############################
# Sentence to seq
###############################
print('Generate Word Sequences')
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
MAX_NUM_OF_WORDS = 100000
TIT_MAX_SEQUENCE_LENGTH = 100
df = pd.concat((train_df, test_df), axis = 'rows')
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['title'].tolist())
sequences = tokenizer.texts_to_sequences(df['title'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../titleSequences.pkl")
MAX_NUM_OF_WORDS = 10000
TIT_MAX_SEQUENCE_LENGTH = 20
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['params'].tolist())
sequences = tokenizer.texts_to_sequences(df['params'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../paramSequences.pkl")
MAX_NUM_OF_WORDS = 100000
TIT_MAX_SEQUENCE_LENGTH = 100
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['description'].tolist())
sequences = tokenizer.texts_to_sequences(df['description'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../descSequences.pkl")
#######OHC WeekDay
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.activation_weekday.values.tolist() + test_df.activation_weekday.values.tolist()).reshape(-1,1))
################################################
# Cat encoding
################################################
train_df=pd.read_feather('../train_basic_features.pkl')
test_df=pd.read_feather('../test__basic_features.pkl')
def catEncode(train_char, test_char, y, colLst = [], nbag = 10, nfold = 20, minCount = 3, postfix = ''):
train_df = train_char.copy()
test_df = test_char.copy()
if not colLst:
print("Empty ColLst")
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby([c]).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = c + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len_': ('y_len' + ind), 'y_median_': ('y_median' + ind),}, inplace = True)
# datax[c+'_medshftenc'] = datax['y_median']-med_y
# datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=[c], how='left').fillna(np.mean(y))
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
else:
print("Not Empty ColLst")
data = train_char[colLst].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(colLst).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = '_'.join(colLst) + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len': ('y_len_' + ind), 'y_median': ('y_median_' + ind),}, inplace = True)
datatst = test_char[colLst].copy()
val_X = val_X.join(datax,on=colLst, how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=colLst, how='left').fillna(np.mean(y))
print(val_X[list(set(datax.columns)-set(colLst))].columns)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set(colLst))]
enc_mat_test += datatst[list(set(datax.columns)-set(colLst))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
print(train_df.columns)
print(test_df.columns)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
return train_df, test_df
catCols = ['user_id', 'region', 'city', 'parent_category_name',
'category_name', 'user_type']
train_df, test_df = catEncode(train_df[catCols].copy(), test_df[catCols].copy(), train_df.deal_probability.values, nbag = 10, nfold = 10, minCount = 0)
train_df.to_feather('../train_cat_targetenc.pkl')
test_df.to_feather('../test_cat_targetenc.pkl')
################################################################
# Tfidf - part 2
################################################################
import os; os.environ['OMP_NUM_THREADS'] = '1'
from sklearn.decomposition import TruncatedSVD
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from nltk.corpus import stopwords
import time
from typing import List, Dict
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer as Tfidf
from sklearn.model_selection import KFold
from sklearn.externals import joblib
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import gc
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
from sklearn import model_selection
english_stemmer = nltk.stem.SnowballStemmer('russian')
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
def stem_tokens(tokens, stemmer):
stemmed = []
for token in tokens:
#stemmed.append(stemmer.lemmatize(token))
stemmed.append(stemmer.stem(token))
return stemmed
def preprocess_data(line,
exclude_stopword=True,
encode_digit=False):
## tokenize
line = clean_text(line)
tokens = [x.lower() for x in nltk.word_tokenize(line)]
## stem
tokens_stemmed = stem_tokens(tokens, english_stemmer)#english_stemmer
if exclude_stopword:
tokens_stemmed = [x for x in tokens_stemmed if x not in stopwords]
return ' '.join(tokens_stemmed)
stopwords = stopwords.words('russian')
train_per=pd.read_csv('../input/train_active.csv', usecols = ['param_1', 'param_2', 'param_3'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['param_1', 'param_2', 'param_3'])#,'title','description'])
train_test = pd.concat((train_per, test_per), axis = 'rows')
del train_per, test_per; gc.collect()
train_test['params'] = train_test['param_1'].fillna('') + ' ' + train_test['param_2'].fillna('') + ' ' + train_test['param_3'].fillna('')
import re
train_test.drop(['param_1', 'param_2', 'param_3'], axis = 'columns', inplace=True)
train_test["params"]= list(train_test[["params"]].apply(lambda x: clean_text(x["params"]), axis=1))
import re
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from sklearn.externals import joblib
### TFIDF Vectorizer ###
train_df['params'] = train_df['params'].fillna('NA')
test_df['params'] = test_df['params'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000,#min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_test['params'].values.tolist() + train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf2.pkl")
tfidf_vec = TfidfVectorizer(ngram_range=(1,1),max_features = 10000,max_df=.4,#min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_test['params'].values.tolist() + train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf3.pkl")
del(train_test); gc.collect()
train_per=pd.read_csv('../input/train_active.csv', usecols = ['title'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['title'])#,'title','description'])
train_test = pd.concat((train_per, test_per), axis = 'rows')
del train_per, test_per; gc.collect()
train_test.fillna('NA', inplace=True)
train_test["title_clean"]= list(train_test[["title"]].apply(lambda x: preprocess_data(x["title"]), axis=1))
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),max_features = 20000,#,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['title_clean'].values.tolist()+train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf2.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1),max_features = 20000, max_df=.4,#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['title_clean'].values.tolist()+train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf3.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
del(train_test); gc.collect()
###Too slow###
'''
train_per=pd.read_csv('../input/train_active.csv', usecols = ['description'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['description'])#,'title','description'])
train_per.fillna(' ', inplace=True)
test_per.fillna(' ', inplace=True)
train_test["desc_clean"]= list(train_test[["description"]].apply(lambda x: preprocess_data(x["description"]), axis=1))
### TFIDF Vectorizer ###
train_df['desc_clean'] = train_df['desc_clean'].fillna(' ')
test_df['desc_clean'] = test_df['desc_clean'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2), max_features = 20000, stop_words = stopwords#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['desc_clean'].values.tolist()+train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf2.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 20000, max_df=.4,#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['desc_clean'].values.tolist()+train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf3.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
'''
##########################################
# 13. Chargram -- too slow
##########################################
from collections import Counter
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
def char_ngrams(s):
s = s.lower()
s = s.replace(u' ', '')
result = Counter()
len_s = len(s)
for n in [3, 4, 5]:
result.update(s[i:i+n] for i in range(len_s - n + 1))
return ' '.join(list(result))
data = pd.concat((train_df, test_df), axis = 'rows')
data['param_chargram'] = list(data[['params']].apply(lambda x: char_ngrams(x['params']), axis=1))
data['title_chargram'] = list(data[['title']].apply(lambda x: char_ngrams(x['title']), axis=1))
#data['desc_chargram'] = list(data[['description']].apply(lambda x: char_ngrams(x['description']), axis=1))
#data['count_common_chargram'] = data.apply(lambda x: len(set(str(x['title_chargram']).lower().split()).intersection(set(str(x['desc_chargram']).lower().split()))), axis=1)
train_df = data.loc[data.deal_probability != 10].reset_index(drop = True)
test_df = data.loc[data.deal_probability == 10].reset_index(drop = True)
del(data); gc.collect()
#####Chargram -TFIDF
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['title_chargram'].values.tolist() + test_df['title_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../title_chargram_tfidf.pkl')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['param_chargram'].values.tolist() + test_df['param_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['param_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['param_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../param_chargram_tfidf.pkl')
#######Chargram of Cat and Parent cat
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
train_df = pd.read_feather('../train_basic_features.pkl')
test_df = pd.read_feather('../test__basic_features.pkl')
data = pd.concat([train_df, test_df], axis= 'rows')
data['categories'] = data["parent_category_name"].fillna(' ') + data["category_name"].fillna(' ')
data['cat_chargram'] = list(data[['categories']].apply(lambda x: char_ngrams(x['categories']), axis=1))
train_df = data.loc[data.deal_probability != 10].reset_index(drop = True)
test_df = data.loc[data.deal_probability == 10].reset_index(drop = True)
del(data); gc.collect()
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 1000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['cat_chargram'].values.tolist() + test_df['cat_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['cat_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['cat_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../cat_chargram_tfidf.pkl')
##############################
## New Kaggle Ftr
##############################
import pandas as pd
import gc
used_cols = ['item_id', 'user_id']
train = pd.read_csv('../input/train.csv', usecols=used_cols)
train_active = pd.read_csv('../input/train_active.csv', usecols=used_cols)
test = pd.read_csv('../input/test.csv', usecols=used_cols)
test_active = pd.read_csv('../input/test_active.csv', usecols=used_cols)
train_periods = pd.read_csv('../input/periods_train.csv', parse_dates=['date_from', 'date_to'])
test_periods = pd.read_csv('../input/periods_test.csv', parse_dates=['date_from', 'date_to'])
train.head()
all_samples = pd.concat([
train,
train_active,
test,
test_active
]).reset_index(drop=True)
all_samples.drop_duplicates(['item_id'], inplace=True)
del train_active
del test_active
gc.collect()
all_periods = pd.concat([
train_periods,
test_periods
])
del train_periods
del test_periods
gc.collect()
all_periods.head()
all_periods['days_up'] = (all_periods['date_to'] - all_periods['date_from']).dt.days
gp = all_periods.groupby(['item_id'])[['days_up']]
gp_df = pd.DataFrame()
gp_df['days_up_sum'] = gp.sum()['days_up']
gp_df['times_put_up'] = gp.count()['days_up']
gp_df.reset_index(inplace=True)
gp_df.rename(index=str, columns={'index': 'item_id'})
gp_df.head()
all_periods.drop_duplicates(['item_id'], inplace=True)
all_periods = all_periods.merge(gp_df, on='item_id', how='left')
all_periods.head()
del gp
del gp_df
gc.collect()
all_periods = all_periods.merge(all_samples, on='item_id', how='left')
all_periods.head()
gp = all_periods.groupby(['user_id'])[['days_up_sum', 'times_put_up']].mean().reset_index() \
.rename(index=str, columns={
'days_up_sum': 'avg_days_up_user',
'times_put_up': 'avg_times_up_user'
})
gp.head()
n_user_items = all_samples.groupby(['user_id'])[['item_id']].count().reset_index() \
.rename(index=str, columns={
'item_id': 'n_user_items'
})
gp = gp.merge(n_user_items, on='user_id', how='left')
gp.head()
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = train.merge(gp, on='user_id', how='left')
test = test.merge(gp, on='user_id', how='left')
agg_cols = list(gp.columns)[1:]
del gp
gc.collect()
train.head()
train = train[['avg_days_up_user','avg_times_up_user','n_user_items']]
test = test[['avg_days_up_user','avg_times_up_user','n_user_items']]
train.to_feather('../train_kag_agg_ftr.ftr')
test.to_feather('../test_kag_agg_ftr.ftr')
def catEncode(train_char, test_char, y, colLst = [], nbag = 10, nfold = 20, minCount = 3, postfix = ''):
train_df = train_char.copy()
test_df = test_char.copy()
if not colLst:
print("Empty ColLst")
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby([c]).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = c + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len_': ('y_len' + ind), 'y_median_': ('y_median' + ind),}, inplace = True)
# datax[c+'_medshftenc'] = datax['y_median']-med_y
# datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=[c], how='left').fillna(np.mean(y))
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
else:
print("Not Empty ColLst")
data = train_char[colLst].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(colLst).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = '_'.join(colLst) + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len': ('y_len_' + ind), 'y_median': ('y_median_' + ind),}, inplace = True)
datatst = test_char[colLst].copy()
val_X = val_X.join(datax,on=colLst, how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=colLst, how='left').fillna(np.mean(y))
print(val_X[list(set(datax.columns)-set(colLst))].columns)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set(colLst))]
enc_mat_test += datatst[list(set(datax.columns)-set(colLst))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = | pd.DataFrame(enc_mat_test) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from vc.definitions import ROOT_DIR
####################################################################
# Common variables.
####################################################################
# Get the the root dir of the module.
# Folder path to data files.
folder_path = ROOT_DIR + "/datasets/temp_brazil_cities/raw_data/"
# Set the year you want to look at.
year = 1977
####################################################################
# Load and clean data for each city individually.
####################################################################
# Load data into Pandas DataFrame with first row as column names and first column as index names.
belem_df = pd.read_csv(folder_path + "station_belem.csv", header=0, index_col=0)
# Remove pre-generated average columns.
belem_df = belem_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
# Set erroneous values to NaN so they don't disturb the results.
belem_df[belem_df > 100] = np.nan
curitiba_df = pd.read_csv(folder_path + "station_curitiba.csv", header=0, index_col=0)
curitiba_df = curitiba_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
curitiba_df[curitiba_df > 100] = np.nan
fortaleza_df = pd.read_csv(folder_path + "station_fortaleza.csv", header=0, index_col=0)
fortaleza_df = fortaleza_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
fortaleza_df[fortaleza_df > 100] = np.nan
goiania_df = pd.read_csv(folder_path + "station_goiania.csv", header=0, index_col=0)
goiania_df = goiania_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
goiania_df[goiania_df > 100] = np.nan
macapa_df = pd.read_csv(folder_path + "station_macapa.csv", header=0, index_col=0)
macapa_df = macapa_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
macapa_df[macapa_df > 100] = np.nan
manaus_df = pd.read_csv(folder_path + "station_manaus.csv", header=0, index_col=0)
manaus_df = manaus_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
manaus_df[manaus_df > 100] = np.nan
recife_df = pd.read_csv(folder_path + "station_recife.csv", header=0, index_col=0)
recife_df = recife_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
recife_df[recife_df > 100] = np.nan
rio_df = pd.read_csv(folder_path + "station_rio.csv", header=0, index_col=0)
rio_df = rio_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
rio_df[rio_df > 100] = np.nan
salvador_df = pd.read_csv(folder_path + "station_salvador.csv", header=0, index_col=0)
salvador_df = salvador_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
salvador_df[salvador_df > 100] = np.nan
sao_luiz_df = pd.read_csv(folder_path + "station_sao_luiz.csv", header=0, index_col=0)
sao_luiz_df = sao_luiz_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
sao_luiz_df[sao_luiz_df > 100] = np.nan
sao_paulo_df = pd.read_csv(folder_path + "station_sao_paulo.csv", header=0, index_col=0)
sao_paulo_df = sao_paulo_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
sao_paulo_df[sao_paulo_df > 100] = np.nan
vitoria_df = pd.read_csv(folder_path + "station_vitoria.csv", header=0, index_col=0)
vitoria_df = vitoria_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
vitoria_df[vitoria_df > 100] = np.nan
####################################################################
# Make mean df by adding data for each city with data from that year.
####################################################################
mean_df = pd.DataFrame()
if year in belem_df.index:
mean_df = pd.concat([mean_df, pd.DataFrame({"Belem": belem_df.loc[year]})], axis=1)
if year in curitiba_df.index:
mean_df = pd.concat(
[mean_df, pd.DataFrame({"Curitiba": curitiba_df.loc[year]})], axis=1
)
if year in fortaleza_df.index:
mean_df = pd.concat(
[mean_df, pd.DataFrame({"Fortaleza": fortaleza_df.loc[year]})], axis=1
)
if year in goiania_df.index:
mean_df = pd.concat(
[mean_df, pd.DataFrame({"Goiania": goiania_df.loc[year]})], axis=1
)
if year in macapa_df.index:
mean_df = pd.concat(
[mean_df, pd.DataFrame({"Macapa": macapa_df.loc[year]})], axis=1
)
if year in manaus_df.index:
mean_df = pd.concat(
[mean_df, pd.DataFrame({"Manaus": manaus_df.loc[year]})], axis=1
)
if year in recife_df.index:
mean_df = pd.concat(
[mean_df, pd.DataFrame({"Recife": recife_df.loc[year]})], axis=1
)
if year in rio_df.index:
mean_df = pd.concat([mean_df, pd.DataFrame({"Rio": rio_df.loc[year]})], axis=1)
if year in salvador_df.index:
mean_df = pd.concat(
[mean_df, pd.DataFrame({"Salvador": salvador_df.loc[year]})], axis=1
)
if year in sao_luiz_df.index:
mean_df = pd.concat(
[mean_df, pd.DataFrame({"Sao Luiz": sao_luiz_df.loc[year]})], axis=1
)
if year in sao_paulo_df.index:
mean_df = pd.concat(
[mean_df, pd.DataFrame({"Sao Paulo": sao_paulo_df.loc[year]})], axis=1
)
if year in vitoria_df.index:
mean_df = pd.concat(
[mean_df, | pd.DataFrame({"Vitoria": vitoria_df.loc[year]}) | pandas.DataFrame |
# Analysis of *rXiv clusters
# %%
import logging
import re
from datetime import datetime
import altair as alt
import pandas as pd
import statsmodels.api as sm
from numpy.random import choice
from scipy.spatial.distance import cityblock
from statsmodels.api import OLS, Poisson, ZeroInflatedPoisson
from eurito_indicators import PROJECT_DIR
from eurito_indicators.getters.arxiv_getters import (
get_arxiv_articles,
get_cluster_names,
get_arxiv_tokenised,
get_arxiv_topic_model,
get_covid_papers,
query_arxiv_institute,
)
from eurito_indicators.pipeline.clustering_naming import (
k_check_clusters,
make_distance_to_clusters,
make_doc_comm_lookup,
)
from eurito_indicators.pipeline.processing_utils import make_lq
from eurito_indicators.pipeline.topic_modelling import post_process_model_clusters
from eurito_indicators.pipeline.topic_utils import make_topic_mix, train_topic_model
from eurito_indicators.utils.altair_save_utils import (
ch_resize,
google_chrome_driver_setup,
save_altair,
)
from eurito_indicators.utils.other_utils import clean_table
VAL_PATH = f"{PROJECT_DIR}/outputs/reports/val_figures"
FIG_PATH = f"{PROJECT_DIR}/outputs/reports/final_report_deck"
def plot_k_check_outputs(val_results, cluster_names):
"""
Plots the results of a validation of clustering results using kmeans
"""
logging.info("Cluster overlaps")
cluster_check = (
pd.concat([x[0] for x in kmeans_validation_results], axis=1)
.mean(axis=1)
.reset_index(name="co_occ")
.assign(c1_name=lambda df: df["c1"].map(cluster_names))
.assign(c2_name=lambda df: df["c2"].map(cluster_names))
)
hm = (
alt.Chart(cluster_check)
.mark_point(filled=True, stroke="black", strokeWidth=1)
.encode(
x=alt.X("c1_name:N", sort=alt.EncodingSortField("c1")),
y=alt.Y("c2_name:N", sort=alt.EncodingSortField("c2")),
size=alt.Size("co_occ", title="Number of co-occurrences"),
color=alt.Color("co_occ", scale=alt.Scale(scheme="oranges")),
tooltip=["c1_name", "c2_name"],
)
)
logging.info("Correlations between assignment shares")
dists_df = (
pd.concat([x[1] for x in kmeans_validation_results], axis=1)
.mean(axis=1)
.reset_index(name="share_corr")
.assign(c1_name=lambda df: df["c1"].map(cluster_names))
.assign(c2_name=lambda df: df["c2"].map(cluster_names))
)
pl = (
alt.Chart(dists_df)
.mark_rect()
.encode(
x=alt.X("c1_name:N", sort=alt.EncodingSortField("c1")),
y=alt.Y("c2_name:N", sort=alt.EncodingSortField("c2")),
color="share_corr",
tooltip=["c1_name", "c2_name", "share_corr"],
)
)
logging.info("Distribution of correlations with other clusters")
melted = (
dists_df[["c1", "c2", "share_corr"]]
.melt(id_vars=["share_corr"])
.assign(cl_name=lambda df: df["value"].map(cluster_names))
)
sort_clusters = (
melted.groupby("cl_name")["share_corr"]
.mean()
.sort_values(ascending=False)
.index.tolist()
)
boxp = (
alt.Chart(melted)
.mark_boxplot()
.encode(y=alt.Y("cl_name:N", sort=sort_clusters), x="share_corr")
)
return hm, pl, boxp
def make_clean_cluster_names(cluster_names):
clean_clusters = {
k: " ".join([x.capitalize() for x in re.sub("_", " ", v).split(" ")])
for k,v in cluster_names.items()
}
return clean_clusters
def tag_covid_cluster(table, cluster_lookup, cluster_name):
t = table.assign(
cluster=lambda df: df["article_id"].map(cluster_lookup).map(cluster_name)
).assign(is_covid=lambda df: ~df["cluster"].isna())
return t
def tag_month_year(table, art):
return table["article_id"].map(art.set_index("article_id")["month_year"].to_dict())
def make_temp_reg_table(inst_cov, all_arts, focus_countries):
"""Creates a regression table to analyse the link
between research nationality, topic and timeliness of Covid-19 response
"""
inst_cov["created"] = inst_cov["article_id"].map(
all_arts.set_index("article_id")["created"]
)
number_collabs = inst_cov.groupby("article_id")["country"].apply(
lambda x: len(set(x))
)
inst_cov["n_collabs"] = inst_cov["article_id"].map(number_collabs)
reg_data = (
inst_cov.query("is_covid==True")
.query("month_year < '2021-01-01'")
.query("month_year >= '2020-01-01'")
.copy()[["country", "cluster", "month_year", "created", "n_collabs"]]
.reset_index(drop=True)
)
reg_data["y"] = [x.month for x in reg_data["month_year"]]
reg_data["time_since_cov"] = [
(x - datetime(2019, 12, 30)).days for x in reg_data["created"]
]
reg_data = reg_data.loc[reg_data["country"].isin(focus_countries)].reset_index(
drop=True
)
return reg_data
def time_reg_comparison(reg_data):
X_count = sm.add_constant(pd.get_dummies(reg_data["country"]))
X_clust = sm.add_constant(
pd.concat(
[
pd.get_dummies(reg_data["country"]),
pd.get_dummies(reg_data["cluster"]),
reg_data["n_collabs"],
],
axis=1,
)
)
results = [
OLS(endog=reg_data["time_since_cov"], exog=exog).fit()
for exog in [X_count, X_clust]
]
return results
def make_comparison_reg(arts, cluster, clust_assign, cluster_control):
"""Regression analysis comparing citations and collaboration levels
between covid and non-covid papers
"""
ids = list(clust_assign[cluster]) + list(cluster_control[cluster])
logging.info(len(ids))
arts_sel = (
arts.loc[arts["article_id"].isin(ids)]
.reset_index(drop=True)
.dropna(axis=0, subset=["citation_count", "abstract"])
)
arts_sel["is_covid"] = arts_sel["article_id"].isin(clust_assign[cluster])
# Citation analysis
exog = arts_sel["is_covid"].astype(float)
cit_res = ZeroInflatedPoisson(
endog=arts_sel["citation_count"].astype(float), exog=exog
).fit()
collab_b = (
inst_cov.loc[inst_cov["article_id"].isin(arts_sel["article_id"])]
.groupby("article_id")
.apply(lambda x: len(set(x["country"])))
)
arts_sel["n_collab"] = arts_sel["article_id"].map(collab_b)
# Collaboration analysis
arts_sel_2 = arts_sel.dropna(axis=0, subset=["n_collab"])
exog_2 = sm.add_constant(arts_sel_2["is_covid"]).astype(float)
collab_res = Poisson(endog=arts_sel_2["n_collab"].astype(float), exog=exog_2).fit()
return cit_res, collab_res, arts_sel
def plot_regression_coefficients(long):
"""Plots regression confidence intervals"""
reg_int = (
alt.Chart(long)
.mark_line(color="red")
.encode(
y=alt.Y(
"name",
sort=alt.EncodingSortField("value", "mean", order="descending"),
title="Reseach cluster",
),
x=alt.X("value", title="Confidence interval"),
detail="name",
)
)
hor = (
alt.Chart(long)
.transform_calculate(x="0")
.mark_rule(color="black", strokeDash=[2, 2])
.encode(x="x:Q")
)
return reg_int + hor
if __name__ == "__main__":
driv = google_chrome_driver_setup()
logging.info("Reading data")
arx_tm = get_arxiv_topic_model()
arts = get_covid_papers()
cluster_names = get_cluster_names()
clean_names = make_clean_cluster_names(cluster_names)
tm, cl = post_process_model_clusters(
model=arx_tm[0], top_level=0, cl_level=1, top_thres=0.7
)
clust_assign = {k: [el[0] for el in v] for k, v in cl.items()}
paper_cluster_lookup = {k[0]: v for k, v in make_doc_comm_lookup(cl).items()}
cov_cl = (
arts.copy()
.assign(cluster=lambda df: df["article_id"].map(paper_cluster_lookup))
.dropna(axis=0, subset=["cluster"])
.reset_index(drop=True)
.assign(tokenised=lambda df: arx_tm[3])
)
logging.info("K-check cluster outputs")
kmeans_validation_results = [
k_check_clusters(tm, n_clust=k, cluster_assignments=clust_assign)
for k in [23] * 5 + [15] * 5 + [30] * 5
]
heatmap, corr_shares, corr_distr = plot_k_check_outputs(
kmeans_validation_results, cluster_names
)
save_altair(heatmap, "cluster_cooccurrences", driver=driv, path=FIG_PATH)
save_altair(corr_shares, "cluster_correlations", driver=driv, path=FIG_PATH)
save_altair(corr_distr, "correlation_distributions", driver=driv, path=FIG_PATH)
cov_cl["cluster_name"] = cov_cl["cluster"].map(clean_names)
cov_cl = cov_cl.loc[cov_cl["year"] >= 2020]
logging.info("Descriptive analysis")
# Distribution of sources over clusters
cluster_source = (
cov_cl.groupby("cluster_name")["article_source"]
.value_counts()
.reset_index(name="article_n")
)
source_ch = (
alt.Chart(cluster_source)
.mark_bar()
.encode(
y=alt.Y(
"cluster_name",
sort=alt.EncodingSortField("article_n", order="descending"),
),
x="article_n",
color="article_source",
)
).properties(height=500, width=300)
save_altair(ch_resize(source_ch), "cluster_sources", driver=driv, path=FIG_PATH)
logging.info("Evolution of activity")
cluster_time = (
cov_cl.groupby("cluster_name")["month_year"]
.value_counts(normalize=True)
.reset_index(name="article_n")
)
#cluster_time = clean_table(cluster_time, ["cluster_name"], clean_names)
# Gets the countries that responded fastest to Covid-19
timeliness_studies = (
cluster_time.pivot_table(
index="month_year", columns="cluster_name", values="article_n"
)
.fillna(0)
.cumsum()
.iloc[6]
.sort_values(ascending=False)
.index.tolist()
)
cluster_agg = cov_cl["month_year"].value_counts(normalize=True)
cluster_time["average"] = cluster_time["month_year"].map(cluster_agg)
source_ch = (
alt.Chart(cluster_time)
.mark_line(point=True, size=2)
.encode(
y=alt.Y(
"article_n", title=["Share", "of papers"], axis=alt.Axis(format="%")
),
x=alt.X("month_year", axis=alt.Axis(format="%b-%Y")),
tooltip=["cluster_name", "article_n"],
)
).properties(height=70, width=120)
source_avg = (
alt.Chart(cluster_time)
.mark_line(color="darkorange", strokeWidth=1)
.encode(
y=alt.Y("average"), x=alt.X("month_year", axis=alt.Axis(format="%b-%Y"))
)
).properties(height=70, width=120)
topic_evolution = ch_resize(
(source_ch + source_avg)
.facet(
facet=alt.Facet(
"cluster_name", sort=timeliness_studies, title="Topical cluster"
),
columns=5,
)
.configure_point(size=15)
)
save_altair(topic_evolution, "arxiv_topic_evolution", driver=driv, path=FIG_PATH)
#### CONTINUE HERE
logging.info("International analysis")
logging.info("Read data")
all_arts = get_arxiv_articles().query("article_source!='cord'")
inst = query_arxiv_institute().query("is_multinational == 0").reset_index(drop=True)
inst_cov = tag_covid_cluster(inst, paper_cluster_lookup, clean_names)
inst_cov["month_year"] = tag_month_year(inst_cov, all_arts)
inst_cov = (
inst_cov.query("month_year>='2020-01-01'")
# .query("month_year<='2021-02-01'")
.reset_index(drop=True)
)
focus_countries = (
inst_cov.query("is_covid==True")["country"].value_counts().iloc[:30].index
)
country_shares = (
inst_cov.groupby(["month_year", "is_covid", "country"])
.size()
.unstack(level=[1, 2])
.fillna(0)
.apply(lambda x: x / x.sum())
.stack(level=[0, 1])
.reset_index(name="paper_shares")
)
country_shares = country_shares.loc[
country_shares["country"].isin(focus_countries)
].reset_index(drop=True)
countries_sorted = (
country_shares.query("month_year<'2020-06-01'")
.query("is_covid==True")
.groupby("country")["paper_shares"]
.sum()
.sort_values(ascending=False)
.index.tolist()
)
# %%
country_shares = clean_table(
country_shares, ["is_covid"], {True: "Covid", False: "Not Covid"}
)
country_chart = (
(
alt.Chart(country_shares)
.mark_line(point=True, size=1)
.transform_window(
mean="mean(paper_shares)",
frame=[-1, 1],
groupby=["is_covid_clean", "country"],
)
.encode(
x=alt.X("month_year", title=None, axis=alt.Axis(format="%b%y")),
y=alt.Y(
"mean:Q", axis=alt.Axis(format="%"), title=["Share", "of papers"]
),
color=alt.Color("is_covid_clean", title="Category"),
facet=alt.Facet(
"country", columns=5, sort=countries_sorted, title="Country"
),
)
)
.properties(height=70, width=120)
.configure_point(size=10)
)
save_altair(
ch_resize(country_chart), "arxiv_country_trends", driver=driv, path=FIG_PATH
)
logging.info("Analysis of specialisation")
focus_countries = (
inst_cov.query("is_covid==True")["country"].value_counts().iloc[:30].index
)
totals = (
inst_cov.dropna(axis=0, subset=["cluster"])
.groupby(["country", "cluster"])
.size()
.unstack()
.fillna(0)
.stack()
.loc[focus_countries]
)
lq = (
make_lq(
inst_cov.dropna(axis=0, subset=["cluster"])
.groupby(["country", "cluster"])
.size()
.unstack()
.fillna(0)
)
.stack()
.loc[focus_countries]
)
countries_sorted = totals.unstack(level=0).sum().index.tolist()
clusters_sorted = (
totals.unstack(level=1).sum().sort_values(ascending=False).index.tolist()
)
combi = pd.concat([totals, lq], axis=1)
combi.columns = ["total", "specialisation"]
combi = combi.reset_index(drop=False)
# Plot heatmap
heatmap = (
alt.Chart(combi.query("total>0"))
.mark_point(filled=True, shape="square", stroke="darkgrey", strokeWidth=0.5)
.encode(
y=alt.Y("country", sort=countries_sorted),
x=alt.X("cluster", sort=clusters_sorted, axis=alt.Axis(labelAngle=320)),
size=alt.Size("total", scale=alt.Scale(type="log")),
tooltip=["country", "cluster"],
color=alt.X(
"specialisation",
scale=alt.Scale(scheme="redblue", type="log", domainMid=1),
sort="descending",
),
)
)
save_altair(heatmap, "country_specialisation", driver=driv, path=FIG_PATH)
logging.info("Regression analysis")
inst_cov["created"] = inst_cov["article_id"].map(
all_arts.set_index("article_id")["created"]
)
reg_data = make_temp_reg_table(inst_cov, all_arts, focus_countries)
reg_results = time_reg_comparison(reg_data)
regression_coefficients = pd.concat(
[
reg_results[n].conf_int().loc[focus_countries].stack()
for n, name in enumerate(["Country", "Cluster"])
],
axis=1,
)
regression_coefficients.columns = ["country", "cluster"]
regression_df = regression_coefficients.stack().reset_index(name="coefficient")
# %%
ch = (
alt.Chart()
.mark_line()
.encode(
y=alt.Y(
"level_2",
axis=alt.Axis(ticks=False, labels=False, title=None),
scale=alt.Scale(domain=["country", "cluster"]),
),
color=alt.Color("level_2", scale=alt.Scale(domain=["country", "cluster"])),
x=alt.X("coefficient", title="Time coefficient"),
detail="level_0",
)
).properties(height=15, width=100)
hor = (
alt.Chart().transform_calculate(x="1").mark_rule(color="black").encode(x="x:Q")
)
regression_comparison = alt.layer(ch, hor, data=regression_df).facet(
facet=alt.Facet(
"level_0",
sort=alt.EncodingSortField("coefficient", op="mean"),
title=None,
header=alt.Header(orient="top"),
),
columns=3,
)
logging.info("Covid / non covid regression comparison")
tok = get_arxiv_tokenised()
arts_2020 = (
all_arts.query("article_source!='cord'")
.query("month_year < '2020-09-01'")
.query("month_year > '2020-03-01'")
.reset_index(drop=True)
.assign(tok=lambda df: df["article_id"].map(tok))
.dropna(axis=0, subset=["tok"])
)
mdl, ids = train_topic_model(150, arts_2020["tok"], arts_2020["article_id"])
topic_mix = make_topic_mix(mdl, 150, ids)
# Measure distance between all documents and cluster centroids
centroids = (
topic_mix.assign(cl=lambda df: df.index.map(paper_cluster_lookup))
.dropna(axis=0, subset=["cl"])
.melt(id_vars="cl")
.groupby(["cl", "variable"])["value"]
.mean()
.unstack()
)
arxiv_dist_to_clust = make_distance_to_clusters(topic_mix, cityblock, clust_assign)
arx_dist_not_covid = arxiv_dist_to_clust.loc[
~arxiv_dist_to_clust.index.isin(set(arts["article_id"]))
]
logging.info("Get controls by cluster")
cluster_control = {
c: set(arx_dist_not_covid.sort_values(c, ascending=True).index[:500])
for c in arx_dist_not_covid.columns
}
covid_comparisons = [
make_comparison_reg(arts_2020, cl, clust_assign, cluster_control)
for cl in range(23)
]
cits, collabs = [
[
x[n].conf_int().assign(name=name)
for x, name in zip(covid_comparisons, clean_names.values())
]
for n in [0, 1]
]
cit_long = (
pd.concat(cits)
.query("index!='const'")
.sort_values(0, ascending=False)
.reset_index(drop=True)
.melt(id_vars="name")
)
cit_reg_plot = plot_regression_coefficients(cit_long).properties(
title="Regression analysis: Citation counts"
)
logging.info("plotting results")
coll_long = (
| pd.concat(collabs) | pandas.concat |
import torch
from torch.utils import data as D
import os, io
from datetime import datetime
from . import preprocessing
from zeiss_umbrella.fundus.adversarial import get_diff
from zeiss_umbrella.fundus.quality_augmentation.transform import preset_augment
from zeiss_umbrella.fundus.quality_augmentation.make_dataset import convert_to_hdf5, convert_to_hdf5_jpeg
import pandas as pd
from skimage.io import imread
import numpy as np
import cv2
import warnings
import h5py
class FundusDataset(D.Dataset):
"""Fundus Dataset loader"""
def __init__(self, root_dir=None, csv_name=None, df=None, corruption_dic=None, phase=None,
transform_dic=None, flip_labels=False, gaussian_noise=None):
"""
Three ways of loading dataset:
1. from a .csv file
2. from a pandas dataframe
3. from a .h5 corruption dataset
These ways are mutually exclusive
root_dir: directory where images and label csv are stored
csv_name: the name of the csv label file stored in root_dir
df: dataframe with image names and labels
corruption_dic: dictionary storing setting of corruption dataset
phase: 'train' or 'valid' (specifically for .h5 files)
transform_dic: dictionary storing setting of transformation.
format: {'type': 'normalize'/'default'/'augmented'}
'corruption': 'true' or 'false' perform corruption before above transformation or not
if true should define 'ops' and 'or_less' in the transform_dic
flip_labels: flip labels or not
gaussian_noise: amplitude of gaussian noise added after transformations defined in transform_dic are performed
"""
self.corruption_dic = corruption_dic
self.transform_dic = transform_dic
self.gaussian_noise = gaussian_noise
if self.corruption_dic:
h5_file = h5py.File(corruption_dic['h5_path'], "r")
if self.corruption_dic['valid_corruption']:
self.data_x = h5_file.get(phase).get('augmented')
else:
if phase == 'train':
self.data_x = h5_file.get(phase).get('augmented')
elif phase == 'valid':
self.data_x = h5_file[h5_file['valid']['original'][0]]
else:
raise ValueError("unknown phase")
self.labels = h5_file.get(phase).get('label')
elif df.__class__.__name__ != 'NoneType':
self.labels = df.copy()
else:
self.labels = pd.read_csv(os.path.join(root_dir, csv_name))
self.root_dir = root_dir
self.flip_labels = flip_labels
if transform_dic['type'] == 'normalize':
transform = preprocessing.normalize()
elif transform_dic['type'] == 'augmented':
transform = preprocessing.augmented()
else:
transform = preprocessing.default()
self.transform = transform
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if self.corruption_dic:
if self.corruption_dic['output_type'] == 'jpeg':
img = imread(io.BytesIO(self.data_x[idx]))
label = int(self.labels[idx])
else:
img = self.data_x[idx]
label = int(self.labels[idx])
else:
img_name = os.path.join(self.root_dir, self.labels['image'][idx] + '.jpeg')
img = imread(img_name)
label = self.labels['level'][idx]
if self.transform_dic.get('corruption', None):
img = preset_augment(img, ops=self.transform_dic['ops'], or_less=self.transform_dic['or_less'])
if self.transform:
img = self.transform(img)
if self.gaussian_noise:
img += self.gaussian_noise * torch.randn_like(img)
if self.flip_labels:
label = (label != 1).astype(label.dtype)
return img, torch.tensor(label).item()
def normal_balance(df, root_dir, csv_name, class_name, seed):
"""
Generate balanced dataframe saved as .csv file from input dataframe
:param seed: seed for reproducibility
:param class_name: Name of the class to be balanced
:param root_dir: path to the folder saving the resulting .csv file
:param csv_name: name of the csv file
:param df: input dataframe
"""
df_copy = df.copy()
dominant_class = df_copy.groupby(class_name).count().idxmax()[0]
num_dominant_class = df_copy.groupby(class_name).count().max()[0]
for label_class in df_copy[class_name].unique():
if label_class != dominant_class:
sample_class = df_copy[df_copy[class_name] == label_class].copy()
for _ in range(int(num_dominant_class / len(sample_class)) - 1):
df_copy = df_copy.append(sample_class)
rest_length = num_dominant_class - int(num_dominant_class / len(sample_class)) * len(sample_class)
df_copy = df_copy.append(sample_class.sample(n=rest_length, random_state=seed))
df_copy.to_csv(os.path.join(root_dir, csv_name))
print('Generating {}'.format(csv_name))
def generate_distance_stat(input_csv_name, root_dir, class_name, device, batch_size):
"""
Calculate the Euclidean distance between all sample pairs and save it as a .csv file.
:param input_csv_name: The original dataset csv
:param root_dir: dir where samples and dataset csv are stored
:param class_name: the name of the label (level for fundus dataset)
:param device: the device used to calculate distance
:param batch_size
:return: a .csv file with head "image1 image2 dist"
"""
d = preprocessing.default_transform()
df = pd.read_csv(os.path.join(root_dir, input_csv_name))
dominant_class = df.groupby(class_name).count().idxmax()[0]
distance_dict = {'image1': [], 'image2': [], 'dist': []}
for label in df['level'].unique():
if label != dominant_class:
df_label = df[df[class_name] == label]
# stack all the samples as a tensor
image_tensor = torch.stack(
[d(imread(os.path.join(root_dir, img_name + '.jpeg'))) for img_name in df_label['image']])
counter = 0
for ind, img_name1 in enumerate(df_label['image']):
counter += 1
print('progress: {:.3f}'.format(counter / len(df_label)))
# process batch by batch
for i in range(0, len(df_label), batch_size):
step_size = min(batch_size, len(df_label) - i)
image1_tensor = image_tensor[ind].repeat(step_size, 1, 1, 1)
distance = (
get_diff(image_tensor[i:i + step_size], image1_tensor, device=device).max(dim=1)[0]).tolist()
distance_dict['dist'] += distance
distance_dict['image2'] += df_label['image'].tolist()
distance_dict['image1'] += [img_name1] * len(df_label)
df_distance = | pd.DataFrame(distance_dict) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import datetime as dt
from scipy import stats
import pymannkendall as mk
from Modules import Read
from Modules.Utils import Listador, FindOutlier, Cycles
from Modules.Graphs import GraphSerieOutliers, GraphDataFrames, GraphSingleDF
from TestRandomnes import RunsTest,ChangePointTest,SpearmanCoefTest,AndersonTest,MannKendall_modified
Path_out = os.path.abspath(os.path.join(os.path.dirname(__file__), 'Sedimentos'))
Est_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'CleanSedimentos'))
Res_est = Listador(Est_path, inicio='resolidos', final='.csv')
Trn_est = Listador(Est_path, inicio='Trans', final='.csv')
Men_est = Listador(Est_path, inicio='Valores_', final='.csv')
Armen_r = pd.read_csv(os.path.join(Est_path, Res_est[0]), index_col=0)
Coque_r = pd.read_csv(os.path.join(Est_path, Res_est[1]), index_col=0)
Esper_r = pd.read_csv(os.path.join(Est_path, Res_est[2]), index_col=0)
Rayar_r = pd.read_csv(os.path.join(Est_path, Res_est[3]), index_col=0)
Magan_r = pd.read_csv(os.path.join(Est_path, Res_est[4]), index_col=0)
Monte_r = pd.read_csv(os.path.join(Est_path, Res_est[5]), index_col=0)
Palen_r = pd.read_csv(os.path.join(Est_path, Res_est[6]), index_col=0)
Armen_r.index = pd.DatetimeIndex(Armen_r.index)
Coque_r.index = pd.DatetimeIndex(Coque_r.index)
Esper_r.index = pd.DatetimeIndex(Esper_r.index)
Rayar_r.index = pd.DatetimeIndex(Rayar_r.index)
Magan_r.index = pd.DatetimeIndex(Magan_r.index)
Monte_r.index = pd.DatetimeIndex(Monte_r.index)
Palen_r.index = pd.DatetimeIndex(Palen_r.index)
Armen_t = pd.read_csv(os.path.join(Est_path, Trn_est[0]), index_col=0)
Coque_t = pd.read_csv(os.path.join(Est_path, Trn_est[1]), index_col=0)
Esper_t = pd.read_csv(os.path.join(Est_path, Trn_est[2]), index_col=0)
Rayar_t = pd.read_csv(os.path.join(Est_path, Trn_est[3]), index_col=0)
Magan_t = pd.read_csv(os.path.join(Est_path, Trn_est[4]), index_col=0)
Monte_t = pd.read_csv(os.path.join(Est_path, Trn_est[5]), index_col=0)
Palen_t = pd.read_csv(os.path.join(Est_path, Trn_est[6]), index_col=0)
Armen_t.index = pd.DatetimeIndex(Armen_t.index)
Coque_t.index = pd.DatetimeIndex(Coque_t.index)
Esper_t.index = pd.DatetimeIndex(Esper_t.index)
Rayar_t.index = | pd.DatetimeIndex(Rayar_t.index) | pandas.DatetimeIndex |
import os
import wx
import datetime
from pubsub import pub
import xlwings as xlw
import pandas as pd
import numpy as np
import wx.lib.mixins.listctrl as listmix
import image_viewer
import analyzer
wildcard = "Python source (*.py)|*.py|" \
"Compiled Python (*.pyc)|*.pyc|" \
"Comma sep(csv) (*.csv)|*.csv|" \
"Space sep (*.txt)|*.txt|" \
"Excel workbook (*.xlsx)|*.xlsx|" \
"All files (*.*)|*.*"
# the context is example, thus need to clear first
mw_listctrldata = {
1: ("Hey!", "You can edit", "me!"),
2: ("Try changing the contents", "by", "clicking"),
3: ("in", "a", "cell"),
4: ("See how the length columns", "change", "?"),
5: ("You can use", "TAB,", "cursor down,"),
6: ("and cursor up", "to", "navigate"),
7: ("But editing", "the cells", "don't change the data !!!"),
}
types_listctrldata = {
1: ("So this is", "another table", "!"),
2: ("you can input", "a", "xlsx file"),
3: ("with", "two", "sheets"),
4: ("like", "the", "input_data.xlsx"),
5: ("molecular weight", "sheet for", "upper table,"),
6: ("peptide types", "sheet for", "this table"),
}
input_listctrldata = dict()
result_listctrldata = dict()
name_list = ['lambda=0', 'lambda=0.05', 'lambda=0.0', 'lambda=0.15']
num_list = [52.4, 57.8, 59.1, 54.6]
class TestListCtrl(wx.ListCtrl,
listmix.ListCtrlAutoWidthMixin,
listmix.TextEditMixin):
def __init__(self, parent, ID, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
listmix.TextEditMixin.__init__(self)
def Populate(self):
# for normal, simple columns, you can add them like this:
self.InsertColumn(0, "A")
self.InsertColumn(1, "B")
self.InsertColumn(2, "C")
items = mw_listctrldata.items()
for key, data in items:
index = self.InsertItem(self.GetItemCount(), str(data[0]))
self.SetItem(index, 1, str(data[1]))
self.SetItem(index, 2, str(data[2]))
self.SetItemData(index, key)
self.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.SetColumnWidth(1, wx.LIST_AUTOSIZE)
self.SetColumnWidth(2, wx.LIST_AUTOSIZE)
self.currentItem = 0
def Populate2(self):
self.InsertColumn(0, "A")
self.InsertColumn(1, "B")
self.InsertColumn(2, "C")
items = types_listctrldata.items()
for key, data in items:
index = self.InsertItem(self.GetItemCount(), str(data[0]))
self.SetItem(index, 1, str(data[1]))
self.SetItem(index, 2, str(data[2]))
self.SetItemData(index, key)
self.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.SetColumnWidth(1, wx.LIST_AUTOSIZE)
self.SetColumnWidth(2, wx.LIST_AUTOSIZE)
self.currentItem = 0
def Populate3(self):
self.InsertColumn(0, "A")
items = input_listctrldata.items()
for key, data in items:
index = self.InsertItem(self.GetItemCount(), str(data[0]))
self.SetItemData(index, key)
self.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.currentItem = 0
def Populate4(self):
self.InsertColumn(0, "A")
self.InsertColumn(1, "B")
items = result_listctrldata.items()
for key, data in items:
index = self.InsertItem(self.GetItemCount(), str(data[0]))
self.SetItem(index, 1, str(data[1]))
self.SetItemData(index, key)
self.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.SetColumnWidth(1, wx.LIST_AUTOSIZE)
self.currentItem = 0
# well... This function didn't be used, but search for self.GetItem only
def SetStringItem(self, index, col, data):
if col in range(3):
wx.ListCtrl.SetItem(self, index, col, data)
wx.ListCtrl.SetItem(self, index, 3 + col, str(len(data)))
else:
try:
datalen = int(data)
except:
return
wx.ListCtrl.SetItem(self, index, col, data)
data = self.GetItem(index, col - 3).GetText()
wx.ListCtrl.SetItem(self, index, col - 3, data[0:datalen])
class Frame(wx.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
font_large = wx.Font(20, wx.ROMAN, wx.NORMAL, wx.NORMAL, False, '')
font_medium = wx.Font(16, wx.ROMAN, wx.NORMAL, wx.NORMAL, False, '')
super().SetFont(font_medium)
pnl = wx.Panel(self)
pnl.SetFont(font_large)
mw_list = TestListCtrl(pnl, 10, size=(1080, 250),
style=wx.LC_REPORT
| wx.BORDER_NONE
# | wx.LC_SORT_ASCENDING
# Content of list as instructions is nonsense with auto-sort enabled
| wx.LC_HRULES | wx.LC_VRULES
)
mw_list.Populate()
types_list = TestListCtrl(pnl, 11, size=(1080, 250),
style=wx.LC_REPORT
| wx.BORDER_NONE
# | wx.LC_SORT_ASCENDING
| wx.LC_HRULES | wx.LC_VRULES
)
types_list.Populate2()
input_list = TestListCtrl(pnl, 12, size=(540, 200),
style=wx.LC_REPORT
| wx.BORDER_NONE
# | wx.LC_SORT_ASCENDING
| wx.LC_HRULES | wx.LC_VRULES
)
input_list.Populate3()
result_list = TestListCtrl(pnl, 13, size=(540, 200),
style=wx.LC_REPORT
| wx.BORDER_NONE
# | wx.LC_SORT_ASCENDING
| wx.LC_HRULES | wx.LC_VRULES
)
result_list.Populate4()
self.mw_list = mw_list
self.types_list = types_list
self.input_list = input_list
self.result_list = result_list
for_mw = wx.StaticText(pnl, label="Molecular weight of Amino acid")
for_types = wx.StaticText(pnl, label="Types of Peptide")
for_input = wx.StaticText(pnl, label="Input data")
for_result = wx.StaticText(pnl, label="Result data")
sizer = wx.BoxSizer(wx.VERTICAL)
box1 = wx.BoxSizer(wx.VERTICAL)
box1.Add(for_mw)
box1.Add(mw_list, 20, wx.EXPAND)
box1.Add(for_types)
box1.Add(types_list, 20, wx.EXPAND)
box2 = wx.BoxSizer(wx.HORIZONTAL)
sub_box = wx.BoxSizer(wx.VERTICAL)
sub_box2 = wx.BoxSizer(wx.VERTICAL)
sub_box.Add(for_input)
sub_box.Add(input_list, 22)
sub_box2.Add(for_result)
sub_box2.Add(result_list, 23)
box2.Add(sub_box, 21)
box2.Add(sub_box2, 21)
sizer.Add(box1)
sizer.Add(box2)
pnl.SetSizer(sizer)
self.makemanuBar()
self.CreateStatusBar()
now = datetime.datetime.now()
self.SetStatusText(f"Program started at {datetime.datetime.strftime(now, '%Y, %m %d %T')} (static)")
# event handlers
self.analyzer = analyzer.Control()
self.Bind(wx.EVT_CLOSE, self.OnExit)
# *args
self.opentype = 4
self.savetype = 4
self.input_df = None
self.query_type = "1"
self.query_type2 = ""
self.origin_path = None
self.output_path = None
def makemanuBar(self):
file_menu = wx.Menu()
introItem = file_menu.Append(-1, "&Introduction\tCtrl-I", "The introduction about this program")
openItem = file_menu.Append(-1, "&Open\tCtrl-O", "Open the file")
save_asItem = file_menu.Append(-1, "&Save As\tCtrl-Shift-S", "Save as a self-named file")
file_menu.AppendSeparator()
exitItem = file_menu.Append(wx.ID_EXIT, "Exit\tESC", "Terminate the program")
work_menu = wx.Menu()
analyzeItem = work_menu.Append(-1, "&Analyze\tCtrl-A",
"Analyze a series of molecular and rank the possible types")
appendMatterItem = work_menu.Append(-1, "Append &Matter\tCtrl-M", "tail added matter")
queryItem = work_menu.Append(-1, "&Query the type\tCtrl-Q", "To see information about single type peptide")
work_menu.AppendSeparator()
drawItem = work_menu.Append(-1, "&Draw\tCtrl-D", "Draw the diagram")
viewItem = work_menu.Append(-1, "&View\tCtrl-V", "Open the image viewer")
help_menu = wx.Menu()
aboutItem = help_menu.Append(wx.ID_ABOUT, "", "Who created this?")
menuBar = wx.MenuBar()
menuBar.Append(file_menu, "&File")
menuBar.Append(work_menu, "&Work")
menuBar.Append(help_menu, "&Help")
self.SetMenuBar(menuBar)
# menu event
self.Bind(wx.EVT_MENU, self.OnIntro, introItem)
self.Bind(wx.EVT_MENU, self.OnOpen, openItem)
self.Bind(wx.EVT_MENU, self.OnSave_As, save_asItem)
self.Bind(wx.EVT_MENU, self.OnExit, exitItem)
self.Bind(wx.EVT_MENU, self.OnAnalyze, analyzeItem)
self.Bind(wx.EVT_MENU, self.OnAppend, appendMatterItem)
self.Bind(wx.EVT_MENU, self.OnQuerySingleType, queryItem)
self.Bind(wx.EVT_MENU, self.OnAbout, aboutItem)
self.Bind(wx.EVT_MENU, self.OnDraw, drawItem)
self.Bind(wx.EVT_MENU, self.OnView, viewItem)
def OnOpen(self, event):
dlg = wx.FileDialog(
self, message="Choose a file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=wildcard,
# if needed: wx.FD_MULTIPLE | to load many files
style=wx.FD_OPEN |
wx.FD_CHANGE_DIR | wx.FD_FILE_MUST_EXIST |
wx.FD_PREVIEW
)
dlg.SetFilterIndex(self.opentype)
if dlg.ShowModal() == wx.ID_OK:
paths = dlg.GetPaths()
self.origin_path = paths[0]
self.opentype = dlg.GetFilterIndex()
file_type = dlg.GetFilename().split(".")[1]
if file_type == "csv":
df = pd.read_csv(self.origin_path, header=None, sep=",")
elif file_type == "txt":
df = pd.read_csv(self.origin_path, header=None, sep=" ")
elif file_type == "xlsx":
xlw_app = xlw.App(visible=False)
wb = xlw.Book(self.origin_path)
mw_sheet = wb.sheets[0].used_range.value
types_sheet = wb.sheets[1].used_range.value
input_sheet = wb.sheets[2].used_range.value
mw_df = pd.DataFrame(mw_sheet)
types_df = pd.DataFrame(types_sheet)
input_df = pd.DataFrame(input_sheet)
wb.close()
xlw_app.kill()
else:
raise Exception("It's not usable yet!")
global mw_listctrldata
global types_listctrldata
global input_listctrldata
mw_listctrldata.clear()
types_listctrldata.clear()
for val in mw_df.itertuples():
mw_listctrldata[val[0]] = val[1:]
for val in types_df.itertuples():
types_listctrldata[val[0]] = val[1:]
for val in input_df.itertuples():
input_listctrldata[val[0]] = val[1:]
self.mw_list.ClearAll()
self.mw_list.Populate()
self.types_list.ClearAll()
self.types_list.Populate2()
self.input_list.ClearAll()
self.input_list.Populate3()
# pretreat the mw, types df for analyzing
self.analyzer.aa = analyzer.pretreat_mw(mw_listctrldata)
self.analyzer.pep = analyzer.pretreat_types(types_listctrldata)
self.input_df = list(input_listctrldata.values())
dlg.Destroy()
def OnSave_As(self, event):
mw_text = self.gettext(self.mw_list, self.mw_list.GetItemCount, self.mw_list.GetColumnCount)
types_text = self.gettext(self.types_list, self.types_list.GetItemCount, self.types_list.GetColumnCount)
input_text = self.gettext(self.input_list, self.input_list.GetItemCount, self.input_list.GetColumnCount)
result_text = self.gettext(self.result_list, self.result_list.GetItemCount, self.result_list.GetColumnCount)
dlg = wx.FileDialog(
self, message="Save file as ...", defaultDir=os.getcwd(),
defaultFile="", wildcard=wildcard, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT
)
dlg.SetFilterIndex(self.savetype)
if dlg.ShowModal() == wx.ID_OK:
self.output_path = dlg.GetPath()
mw_output = pd.DataFrame(mw_text).to_string(index=False, header=False)
types_output = pd.DataFrame(types_text).to_string(index=False, header=False)
input_output = | pd.Series(input_text) | pandas.Series |
from typing import NoReturn, Tuple, Any, Union, Optional, List, Callable, Dict
from timeatlas.abstract.abstract_base_generator import AbstractBaseGenerator
from timeatlas.time_series import TimeSeries
from timeatlas.time_series_dataset import TimeSeriesDataset
from timeatlas.config.constants import COMPONENT_VALUES
from .anomalies import AnomalyABC
from .utils import get_operator
from .labeler import AnomalySetLabeler
from .config import AnomalyConfigParser
import pandas as pd
import numpy as np
from itertools import cycle
from copy import copy
import math
from os import path
class AnomalyGenerator(AbstractBaseGenerator):
"""
A generator that introcudes an anomaly into a given TimeSeriesDataset.
The types and parameters are controlled with a .ini file,
that can be created with "AnomalyGeneratorTemplate"
"""
def __init__(self, data: TimeSeriesDataset, conf_file: str, save_as: str = 'text'):
"""
Args:
data: TimeSeriesDataset containing the data
conf_file: config file created with AnomalyGeneratorTemplate
"""
# Each generator set a label_suffix
# Here: AGM -> Anomaly Generator Manual
super().__init__()
self.label_suffix = "AGM"
assert save_as == 'text' or save_as == 'pickle' or save_as == 'tsd'
self.save_as = save_as
# assertions
assert isinstance(data, TimeSeriesDataset)
assert all(isinstance(x, TimeSeries) for x in
data), "One or more elements are not a TimeSeries-object"
assert path.isfile(
conf_file), f"No config file found under given path '{conf_file}'"
# set data
self.data = data.copy(deep=True)
# read the config file
self.config = AnomalyConfigParser(config_file=conf_file)
self.GLOBAL = self.config['GLOBAL']
self.ANOMALIES = self.config['ANOMALIES']
self.selection = self.GLOBAL['selection']
self.percent = self.GLOBAL['percent']
self.amount = self.GLOBAL['amount']
self.outfile = self.GLOBAL['outfile']
# create numpy-random.RandomState object
self.seed = self.GLOBAL['seed']
# functions for anomaly
self.ABC = AnomalyABC(self.seed)
self.anomaly_functions = self.get_anomaly_function()
# adding a label column to the dataframe and creating the results anomaly labels
self.labels = AnomalySetLabeler()
# figure out the precision of the data
self.precision = self.generation_precision()
@staticmethod
def precision_and_scale(x: float):
"""
Get the precision of a value
Args:
x: a (float) number
Returns: the number of positions after the comma
"""
# 14 is the maximal number of digits python can handle (more is also unrealistic)
max_digits = 14
# if the number is NaN return nothing
if math.isnan(x):
return
# figure out the magniture -> the numbers before the comma
int_part = int(abs(x))
magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1
if magnitude >= max_digits:
return (magnitude, 0)
# shift the number after the comma in front of the comma and figure out the amount
frac_part = abs(x) - int_part
multiplier = 10 ** (max_digits - magnitude)
frac_digits = multiplier + int(multiplier * frac_part + 0.5)
while frac_digits % 10 == 0:
frac_digits /= 10
scale = int(math.log10(frac_digits))
return scale
@staticmethod
def clean_parameters(values) -> Dict:
"""
Function to cleanup the parameters. If the parameter in the config-file are None, they are removed.
Args:
values: parameter values from he config files
Returns: Dict of the paramters without the None
"""
return {k: v for k, v in values['PARAMETERS'].items() if v is not None}
@staticmethod
def create_zip_object(data, anomaly_f):
'''
combines the two lists of the data, where the anomalies are added to and the anomaly-function
if the function list is shorter it will cycle through them until all data has 1 anomaly
if the data is shorter it will only assign one anomaly function
Args:
data: pd.Series of data
anomaly_f: function of ABC.anomalies creating the anomaly
Returns: zip-object
'''
# warnings.warn("Length of data > length of anomalies: Not all anomalies will be assigned.")
zip_list = zip(data, cycle(anomaly_f))
return zip_list
def generation_precision(self):
'''
Set the rounded average precision of the values inside a dataframe
Returns: rounded average number of digits after the comma
'''
precision_df = np.array(
[self.precision_and_scale(x) for ts in self.data for x in
ts._data.values])
# This is more of a security. A correctly formated TimeSeries-object has no None elements
precision_df = precision_df[precision_df != None]
return int(round(precision_df.mean()))
def save(self) -> NoReturn:
"""
Saving the labels and the new TimeSeriesDataset to file.
Returns: NoReturn
"""
self.labels.finalize()
if self.save_as == 'text':
self.data.to_text(path=f'./{self.outfile}_data')
elif self.save_as == 'pickle':
self.data.to_pickle(path=f'./{self.outfile}_data.pkl')
elif self.save_as == 'tsd':
return self.data
# This function is no longer needed, since we save the labels now in the TimeSeries
# self.labels.annotation.to_csv(f'./{self.outfile}_data/{self.outfile}_labels.csv', index=False)
def get_anomaly_function(self) -> List:
'''
Get all functions in the config file
Returns: list of tuples with the functions as (function, parameters)
'''
functions = []
for key, values in self.ANOMALIES.items():
function = getattr(self.ABC, values['function'])
# removing the keys with None
parameters = self.clean_parameters(values)
functions.append((function, parameters))
return functions
def chose_amount(self) -> List:
"""
Chose the number of time windows based on a fixed amount given by the user in the config file:
eg. amount = 10, will select 10 elements
Returns: List of pair of indices and data
"""
ind, data = self.data.select_components_randomly(n=self.amount, seed=self.seed, indices=True)
return list(zip(ind, data))
def chose_selection(self) -> List:
"""
Chose the number of time windows based on a user selection given by the user in the config file:
eg. selection = [0,1,5,9] will select the first, second, sixth and tenth element.
Returns: List of pair of indices and data
"""
data = self.data[self.selection]
return list(zip(self.selection, data))
def chose_percentage(self) -> List:
"""
Chose the number of time windows based on a user selection given by the user in the config file:
e.g. percent = 0.2 will select 20% of the TimeSeriesDataset (min=0, max=1)
Returns: List of pair of indices and data
"""
ind, data = self.data.select_components_by_percentage(percent=self.percent, seed=self.seed, indices=True)
return list(zip(ind, data))
def add_data(self, new_data: TimeSeries, index: int) -> NoReturn:
"""
Replacing the old TimeSeries with the new TimeSeries containing the anomaly.
Args:
new_data: new TimeSeries that will replace the old one
index: index of the TimeSeries to replace in the TimeSeriesDataset
Returns: NoReturn
"""
self.data[index]._data[f'0_{COMPONENT_VALUES}'].replace(to_replace= | pd.Series(new_data) | pandas.Series |
"""
Create ensemble forecast.
"""
import numpy as np
import pandas as pd
from pywtk.site_lookup import get_3tiersites_from_wkt
from pywtk.wtk_api import get_nc_data, WIND_FCST_DIR
from pywtk import site_lookup
from . import stats
class Ensemble:
"""Creation of ensemble forecasts."""
_allowable_horizons = (1, 4, 6, 24)
# 1 and 24 hour values estimated from NREL wind power error report; others
# are arbitrary
_default_error_kurtosis = {1: 18, 4: 14, 6: 10, 24: 2.5}
def __init__(self, coordinates, horizon=24, isclose_distance=10,
error_kurtosis=None):
"""
:param tuple coordinates: lat / long of forecast location (decimal deg)
:param int_or_list horizon: Forecast horizon(s), hours.
:param dict error: Forecast errors for each horizon
:param float isclose_distance: Largest distance [km?] between wind
toolkit data point and forecast coordinates to be `close`,
otherwise will take the mean of the three closest points
TODO: Should really do an inverse-distance mean...
"""
if isinstance(horizon, int):
horizon = [horizon]
for h in horizon:
assert h in self._allowable_horizons
self.latitude, self.longitude = coordinates
self.horizon = horizon
self.isclose_distance = isclose_distance
if error_kurtosis is None:
self.error_kurtosis = self._default_error_kurtosis.copy()
else:
self.error_kurtosis = error_kurtosis
def create_ensemble(self, forecast, n, kurtosis):
"""Create *n* p.u. forecasts from the base pywtk forecast.
:param DataFrame forecast: pywtk site forecast; should be normalized to
p.u. power output.
:param int n: Number of data points to generate.
:param float kurtosis: Kurtosis for p.u. power prediction error
"""
# For each timestep, we fit a hyperbolic distribution to the
# _p90 10th percentile, mean, and _p10 90th percentile to estimate the
# errors, then generate n power predictions at that point.
ten_percentile_col = [c for c in forecast.columns if '_p90' in c][0]
ninety_percentile_col = [c for c in forecast.columns if '_p10' in c][0]
mean_col = set(forecast.columns) - \
set((ten_percentile_col, ninety_percentile_col))
if len(mean_col) > 1:
raise ValueError("Ambiguous mean column")
mean_col = list(mean_col)[0]
forecasts = []
for i, row in forecast.iterrows():
percentiles = {0.1: row[ten_percentile_col],
0.9: row[ninety_percentile_col]}
error_dist, _, _ = stats.fit_hyperbolic(
row[mean_col], percentiles=percentiles, kurtosis=kurtosis,
x0=None
)
forecasts.append(error_dist.rvs(size=n))
forecasts = np.array(forecasts)
forecasts = np.around(np.clip(forecasts, 0, 1), 4)
return | pd.DataFrame(forecasts, index=forecast.index) | pandas.DataFrame |
import os
import pandas as pd
CURRENT_DIR = os.path.dirname(__file__)
INPUT_DIR = os.path.join(CURRENT_DIR, "input")
TMP_DIR = os.path.join(CURRENT_DIR, "tmp")
GRAPHER_DIR = os.path.join(CURRENT_DIR, "grapher")
def main():
# GCP data
gas_gcp = pd.read_excel(
os.path.join(INPUT_DIR, "country_fuel/gas_by_country.xlsx"),
sheet_name="Data", skiprows=1
)
oil_gcp = pd.read_excel(
os.path.join(INPUT_DIR, "country_fuel/oil_by_country.xlsx"),
sheet_name="Data", skiprows=1
)
coal_gcp = pd.read_excel(
os.path.join(INPUT_DIR, "country_fuel/coal_by_country.xlsx"),
sheet_name="Data", skiprows=1
)
flaring_gcp = pd.read_excel(
os.path.join(INPUT_DIR, "country_fuel/flaring_by_country.xlsx"),
sheet_name="Data", skiprows=1
)
cement_gcp = pd.read_excel(
os.path.join(INPUT_DIR, "country_fuel/cement_by_country.xlsx"),
sheet_name="Data", skiprows=1
)
country_gcp = pd.read_csv(
os.path.join(INPUT_DIR, "shared/gcp_country_standardized.csv")
)
gas_gcp = pd.melt(gas_gcp, id_vars=["Year"], var_name=["Country"], value_name="Gas")
oil_gcp = pd.melt(oil_gcp, id_vars=["Year"], var_name=["Country"], value_name="Oil")
coal_gcp = | pd.melt(coal_gcp, id_vars=["Year"], var_name=["Country"], value_name="Coal") | pandas.melt |
import os
import re
from retry import retry
from typing import List, Union
import pandas as pd
import requests
from tqdm import tqdm
import multitasking
import signal
from .config import EastmoneyFundHeaders
from ..utils import to_numeric
from jsonpath import jsonpath
signal.signal(signal.SIGINT, multitasking.killall)
@to_numeric
def get_quote_history(fund_code: str, pz: int = 40000) -> pd.DataFrame:
"""
根据基金代码和要获取的页码抓取基金净值信息
Parameters
----------
fund_code : str
6 位基金代码
pz : int, optional
页码, 默认为 40000 以获取全部历史数据
Returns
-------
DataFrame
包含基金历史净值等数据
Examples
--------
>>> import efinance as ef
>>> ef.fund.get_quote_history('161725')
日期 单位净值 累计净值 涨跌幅
0 2021-06-11 1.5188 3.1499 -3.09
1 2021-06-10 1.5673 3.1984 1.69
2 2021-06-09 1.5412 3.1723 0.11
3 2021-06-08 1.5395 3.1706 -6.5
4 2021-06-07 1.6466 3.2777 1.61
... ... ... ... ...
1469 2015-06-08 1.0380 1.0380 2.5692
1470 2015-06-05 1.0120 1.0120 1.5045
1471 2015-06-04 0.9970 0.9970 --
1472 2015-05-29 0.9950 0.9950 --
1473 2015-05-27 1.0000 1.0000 --
"""
data = {
'FCODE': f'{fund_code}',
'IsShareNet': 'true',
'MobileKey': '1',
'appType': 'ttjj',
'appVersion': '6.2.8',
'cToken': '1',
'deviceid': '1',
'pageIndex': '1',
'pageSize': f'{pz}',
'plat': 'Iphone',
'product': 'EFund',
'serverVersion': '6.2.8',
'uToken': '1',
'userId': '1',
'version': '6.2.8'
}
url = 'https://fundmobapi.eastmoney.com/FundMNewApi/FundMNHisNetList'
json_response = requests.get(
url, headers=EastmoneyFundHeaders, data=data).json()
rows = []
columns = ['日期', '单位净值', '累计净值', '涨跌幅']
if json_response is None:
return pd.DataFrame(rows, columns=columns)
datas = json_response['Datas']
if len(datas) == 0:
return pd.DataFrame(rows, columns=columns)
rows = []
for stock in datas:
date = stock['FSRQ']
rows.append({
'日期': date,
'单位净值': stock['DWJZ'],
'累计净值': stock['LJJZ'],
'涨跌幅': stock['JZZZL']
})
df = | pd.DataFrame(rows) | pandas.DataFrame |
import warnings
from collections import Counter
from typing import Dict
from unittest.mock import patch
import numpy as np
import pandas as pd
import pyarrow
import pytest
from pandas import DataFrame
import ray
from ray.data import Dataset
from ray.data.aggregate import Max
from ray.data.preprocessor import Preprocessor, PreprocessorNotFittedException
from ray.data.preprocessors import (
BatchMapper,
Chain,
CustomStatefulPreprocessor,
LabelEncoder,
MinMaxScaler,
OneHotEncoder,
OrdinalEncoder,
SimpleImputer,
StandardScaler,
)
from ray.data.preprocessors.encoder import Categorizer, MultiHotEncoder
from ray.data.preprocessors.hasher import FeatureHasher
from ray.data.preprocessors.normalizer import Normalizer
from ray.data.preprocessors.scaler import MaxAbsScaler, RobustScaler
from ray.data.preprocessors.tokenizer import Tokenizer
from ray.data.preprocessors.transformer import PowerTransformer
from ray.data.preprocessors.utils import simple_hash, simple_split_tokenizer
from ray.data.preprocessors.vectorizer import CountVectorizer, HashingVectorizer
@pytest.fixture
def create_dummy_preprocessors():
class DummyPreprocessorWithNothing(Preprocessor):
_is_fittable = False
class DummyPreprocessorWithPandas(DummyPreprocessorWithNothing):
def _transform_pandas(self, df: "pd.DataFrame") -> "pd.DataFrame":
return df
class DummyPreprocessorWithArrow(DummyPreprocessorWithNothing):
def _transform_arrow(self, table: "pyarrow.Table") -> "pyarrow.Table":
return table
class DummyPreprocessorWithPandasAndArrow(DummyPreprocessorWithNothing):
def _transform_pandas(self, df: "pd.DataFrame") -> "pd.DataFrame":
return df
def _transform_arrow(self, table: "pyarrow.Table") -> "pyarrow.Table":
return table
yield (
DummyPreprocessorWithNothing(),
DummyPreprocessorWithPandas(),
DummyPreprocessorWithArrow(),
DummyPreprocessorWithPandasAndArrow(),
)
def test_standard_scaler():
"""Tests basic StandardScaler functionality."""
col_a = [-1, 0, 1, 2]
col_b = [1, 1, 5, 5]
col_c = [1, 1, 1, None]
in_df = | pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c}) | pandas.DataFrame.from_dict |
#!/conda/bin/python2.7
import sys
import argparse
import vcf
import pandas
from vcf.parser import _vcf_metadata_parser as vcf_parser
class vep_converter():
def __init__(self, input_name, output_name, transcript_file):
self._input_name = input_name
self._output_name = output_name
self._transcript_dic = {}
with open(transcript_file, 'r') as transcript_data:
for input_line in transcript_data:
# Remove header
if input_line[0] == '#':
pass
else:
input_list = input_line.strip().split('\t')
ensembl = input_list[0]
if len(input_list) == 2:
refseq = 'NONE'
elif input_list[2] == '':
refseq = 'NONE'
else:
refseq = input_list[2]
self._transcript_dic[ensembl] = refseq
self._conseq_dict = {'transcript_ablation' : ['Splice_Site', 1], # A feature ablation whereby the deleted region includes a transcript feature
'exon_loss_variant' : ['Splice_Site', 1], # A sequence variant whereby an exon is lost from the transcript
'splice_donor_variant' : ['Splice_Site', 2], # A splice variant that changes the 2 base region at the 5' end of an intron
'splice_acceptor_variant' : ['Splice_Site', 2], # A splice variant that changes the 2 base region at the 3' end of an intron
'stop_gained' : ['Nonsense_Mutation', 3], # A sequence variant whereby at least one base of a codon is changed, resulting in a premature stop codon, leading to a shortened transcript
'frameshift_variant' : ['Frame_Shift', 3], # A sequence variant which causes a disruption of the translational reading frame, because the number of nucleotides inserted or deleted is not a multiple of three
'stop_lost' : ['Nonstop_Mutation', 3], # A sequence variant where at least one base of the terminator codon (stop) is changed, resulting in an elongated transcript
'start_lost' : ['Translation_Start_Site', 4], # A codon variant that changes at least one base of the canonical start codon
'initiator_codon_variant' : ['Translation_Start_Site', 4], # A codon variant that changes at least one base of the first codon of a transcript
'disruptive_inframe_insertion' : ['In_Frame_Ins', 5], # An inframe increase in cds length that inserts one or more codons into the coding sequence within an existing codon
'disruptive_inframe_deletion' : ['In_Frame_Del', 5], # An inframe decrease in cds length that deletes bases from the coding sequence starting within an existing codon
'inframe_insertion' : ['In_Frame_Ins', 5], # An inframe non synonymous variant that inserts bases into the coding sequence
'inframe_deletion' : ['In_Frame_Del', 5], # An inframe non synonymous variant that deletes bases from the coding sequence
'protein_altering_variant' : ['Protein_Alter', 5], # A sequence variant which is predicted to change the protein encoded in the coding sequence
'missense_variant' : ['Missense_Mutation', 6], # A sequence variant, that changes one or more bases, resulting in a different amino acid sequence but where the length is preserved
'conservative_missense_variant' : ['Missense_Mutation', 6], # A sequence variant whereby at least one base of a codon is changed resulting in a codon that encodes for a different but similar amino acid. These variants may or may not be deleterious
'rare_amino_acid_variant' : ['Missense_Mutation', 6], # A sequence variant whereby at least one base of a codon encoding a rare amino acid is changed, resulting in a different encoded amino acid
'transcript_amplification' : ['Intron', 7], # A feature amplification of a region containing a transcript
'splice_region_variant' : ['Splice_Region', 8], # A sequence variant in which a change has occurred within the region of the splice site, either within 1-3 bases of the exon or 3-8 bases of the intron
'stop_retained_variant' : ['Silent', 9], # A sequence variant where at least one base in the terminator codon is changed, but the terminator remains
'synonymous_variant' : ['Silent', 9], # A sequence variant where there is no resulting change to the encoded amino acid
'incomplete_terminal_codon_variant' : ['Silent', 10], # A sequence variant where at least one base of the final codon of an incompletely annotated transcript is changed
'coding_sequence_variant' : ['Missense_Mutation', 11], # A sequence variant that changes the coding sequence
'mature_miRNA_variant' : ['RNA', 11], # A transcript variant located with the sequence of the mature miRNA
'exon_variant' : ['RNA', 11], # A sequence variant that changes exon sequence
'5_prime_UTR_variant' : ['5\'UTR', 12], # A UTR variant of the 5' UTR
'5_prime_UTR_premature_start_codon_gain_variant' : ['5\'UTR', 12], # snpEff-specific effect, creating a start codon in 5' UTR
'3_prime_UTR_variant' : ['3\'UTR', 12], # A UTR variant of the 3' UTR
'non_coding_exon_variant' : ['RNA', 13], # A sequence variant that changes non-coding exon sequence
'non_coding_transcript_exon_variant' : ['RNA', 13], # snpEff-specific synonym for non_coding_exon_variant
'non_coding_transcript_variant' : ['RNA', 14], # A transcript variant of a non coding RNA gene
'nc_transcript_variant' : ['RNA', 14], # A transcript variant of a non coding RNA gene (older alias for non_coding_transcript_variant)
'intron_variant' : ['Intron', 14], # A transcript variant occurring within an intron
'intragenic_variant' : ['Intron', 14], # A variant that occurs within a gene but falls outside of all transcript
'INTRAGENIC' : ['Intron', 14], # snpEff-specific synonym of intragenic_variant
'NMD_transcript_variant' : ['Silent', 15], # A variant in a transcript that is the target of NMD
'upstream_gene_variant' : ['5\'Flank', 16], # A sequence variant located 5' of a gene
'downstream_gene_variant' : ['3\'Flank', 16], # A sequence variant located 3' of a gene
'TFBS_ablation' : ['Targeted_Region', 17], # A feature ablation whereby the deleted region includes a transcription factor binding site
'TFBS_amplification' : ['Targeted_Region', 17], # A feature amplification of a region containing a transcription factor binding site
'TF_binding_site_variant' : ['Intergenic', 17], # A sequence variant located within a transcription factor binding site
'regulatory_region_ablation' : ['Targeted_Region', 17], # A feature ablation whereby the deleted region includes a regulatory region
'regulatory_region_amplification' : ['Targeted_Region', 17], # A feature amplification of a region containing a regulatory region
'regulatory_region_variant' : ['Intergenic', 17], # A sequence variant located within a regulatory region
'regulatory_region' : ['Intergenic', 17], # snpEff-specific effect that should really be regulatory_region_variant
'feature_elongation' : ['Targeted_Region', 18], # A sequence variant that causes the extension of a genomic feature, with regard to the reference sequence
'feature_truncation' : ['Targeted_Region', 18], # A sequence variant that causes the reduction of a genomic feature, with regard to the reference sequence
'intergenic_variant' : ['Intergenic', 19], # A sequence variant located in the intergenic region, between genes
'intergenic_region' : ['Intergenic', 19], # snpEff-specific effect that should really be intergenic_variant
'ETC' : ['None', 20] # none
}
self._biotype_dict = {'protein_coding' : 1, # Contains an open reading frame (ORF)
'LRG_gene' : 2, # Gene in a "Locus Reference Genomic" region known to have disease-related sequence variations
'IG_C_gene' : 2, # Immunoglobulin (Ig) variable chain genes imported or annotated according to the IMGT
'IG_D_gene' : 2, # Immunoglobulin (Ig) variable chain genes imported or annotated according to the IMGT
'IG_J_gene' : 2, # Immunoglobulin (Ig) variable chain genes imported or annotated according to the IMGT
'IG_LV_gene' : 2, # Immunoglobulin (Ig) variable chain genes imported or annotated according to the IMGT
'IG_V_gene' : 2, # Immunoglobulin (Ig) variable chain genes imported or annotated according to the IMGT
'TR_C_gene' : 2, # T-cell receptor (TcR) genes imported or annotated according to the IMGT
'TR_D_gene' : 2, # T-cell receptor (TcR) genes imported or annotated according to the IMGT
'TR_J_gene' : 2, # T-cell receptor (TcR) genes imported or annotated according to the IMGT
'TR_V_gene' : 2, # T-cell receptor (TcR) genes imported or annotated according to the IMGT
'miRNA' : 3, # Non-coding RNA predicted using sequences from RFAM and miRBase
'snRNA' : 3, # Non-coding RNA predicted using sequences from RFAM and miRBase
'snoRNA' : 3, # Non-coding RNA predicted using sequences from RFAM and miRBase
'ribozyme' : 3, # Non-coding RNA predicted using sequences from RFAM and miRBase
'tRNA' : 3, #Added by <NAME>
'sRNA' : 3, # Non-coding RNA predicted using sequences from RFAM and miRBase
'scaRNA' : 3, # Non-coding RNA predicted using sequences from RFAM and miRBase
'rRNA' : 3, # Non-coding RNA predicted using sequences from RFAM and miRBase
'lincRNA' : 3, # Long, intervening noncoding (linc) RNAs, that can be found in evolutionarily conserved, intergenic regions
'bidirectional_promoter_lncrna' : 3, # A non-coding locus that originates from within the promoter region of a protein-coding gene, with transcription proceeding in the opposite direction on the other strand
'bidirectional_promoter_lncRNA' : 3, # A non-coding locus that originates from within the promoter region of a protein-coding gene, with transcription proceeding in the opposite direction on the other strand
'known_ncrna' : 4,
'vaultRNA' : 4, # Short non coding RNA genes that form part of the vault ribonucleoprotein complex
'macro_lncRNA' : 4, # unspliced lncRNAs that are several kb in size
'Mt_tRNA' : 4, # Non-coding RNA predicted using sequences from RFAM and miRBase
'Mt_rRNA' : 4, # Non-coding RNA predicted using sequences from RFAM and miRBase
'antisense' : 5, # Has transcripts that overlap the genomic span (i.e. exon or introns) of a protein-coding locus on the opposite strand
'antisense_RNA' : 5, # Alias for antisense (Y. Boursin)
'sense_intronic' : 5, # Long non-coding transcript in introns of a coding gene that does not overlap any exons
'sense_overlapping' : 5, # Long non-coding transcript that contains a coding gene in its intron on the same strand
'3prime_overlapping_ncrna' : 5, # Transcripts where ditag and/or published experimental data strongly supports the existence of short non-coding transcripts transcribed from the 3'UTR
'3prime_overlapping_ncRNA' : 5, # Transcripts where ditag and/or published experimental data strongly supports the existence of short non-coding transcripts transcribed from the 3'UTR
'misc_RNA' : 5, # Non-coding RNA predicted using sequences from RFAM and miRBase
'non_coding' : 5, # Transcript which is known from the literature to not be protein coding
'regulatory_region' : 6, # A region of sequence that is involved in the control of a biological process
'disrupted_domain' : 6, # Otherwise viable coding region omitted from this alternatively spliced transcript because the splice variation affects a region coding for a protein domain
'processed_transcript' : 6, # Doesn't contain an ORF
'TEC' : 6, # To be Experimentally Confirmed. This is used for non-spliced EST clusters that have polyA features. This category has been specifically created for the ENCODE project to highlight regions that could indicate the presence of protein coding genes that require experimental validation, either by 5' RACE or RT-PCR to extend the transcripts, or by confirming expression of the putatively-encoded peptide with specific antibodies
'TF_binding_site' : 7, # A region of a nucleotide molecule that binds a Transcription Factor or Transcription Factor complex
'CTCF_binding_site' :7, # A transcription factor binding site with consensus sequence CCGCGNGGNGGCAG, bound by CCCTF-binding factor
'promoter_flanking_region' : 7, # A region immediately adjacent to a promoter which may or may not contain transcription factor binding sites
'enhancer' : 7, # A cis-acting sequence that increases the utilization of (some) eukaryotic promoters, and can function in either orientation and in any location (upstream or downstream) relative to the promoter
'promoter' : 7, # A regulatory_region composed of the TSS(s) and binding sites for TF_complexes of the basal transcription machinery
'open_chromatin_region' : 7, # A DNA sequence that in the normal state of the chromosome corresponds to an unfolded, un-complexed stretch of double-stranded DNA
'retained_intron' : 7, # Alternatively spliced transcript believed to contain intronic sequence relative to other, coding, variants
'nonsense_mediated_decay' : 7, # If the coding sequence (following the appropriate reference) of a transcript finishes >50bp from a downstream splice site then it is tagged as NMD. If the variant does not cover the full reference coding sequence then it is annotated as NMD if NMD is unavoidable i.e. no matter what the exon structure of the missing portion is the transcript will be subject to NMD
'non_stop_decay' : 7, # Transcripts that have polyA features (including signal) without a prior stop codon in the CDS, i.e. a non-genomic polyA tail attached directly to the CDS without 3' UTR. These transcripts are subject to degradation
'ambiguous_orf' : 7, # Transcript believed to be protein coding, but with more than one possible open reading frame
'pseudogene' : 8, # Have homology to proteins but generally suffer from a disrupted coding sequence and an active homologous gene can be found at another locus. Sometimes these entries have an intact coding sequence or an open but truncated ORF, in which case there is other evidence used (for example genomic polyA stretches at the 3' end) to classify them as a pseudogene. Can be further classified as one of the following
'processed_pseudogene' : 8, # Pseudogene that lack introns and is thought to arise from reverse transcription of mRNA followed by reinsertion of DNA into the genome
'polymorphic_pseudogene' : 8, # Pseudogene owing to a SNP/DIP but in other individuals/haplotypes/strains the gene is translated
'retrotransposed' : 8, # Pseudogene owing to a reverse transcribed and re-inserted sequence
'translated_processed_pseudogene' : 8, # Pseudogenes that have mass spec data suggesting that they are also translated
'translated_unprocessed_pseudogene' : 8, # Pseudogenes that have mass spec data suggesting that they are also translated
'transcribed_processed_pseudogene' : 8, # Pseudogene where protein homology or genomic structure indicates a pseudogene, but the presence of locus-specific transcripts indicates expression
'transcribed_unprocessed_pseudogene' : 8, # Pseudogene where protein homology or genomic structure indicates a pseudogene, but the presence of locus-specific transcripts indicates expression
'transcribed_unitary_pseudogene' : 8, #Pseudogene where protein homology or genomic structure indicates a pseudogene, but the presence of locus-specific transcripts indicates expression
'unitary_pseudogene' : 8, # A species specific unprocessed pseudogene without a parent gene, as it has an active orthologue in another species
'unprocessed_pseudogene' : 8, # Pseudogene that can contain introns since produced by gene duplication
'Mt_tRNA_pseudogene' : 8, # Non-coding RNAs predicted to be pseudogenes by the Ensembl pipeline
'tRNA_pseudogene' : 8, # Non-coding RNAs predicted to be pseudogenes by the Ensembl pipeline
'snoRNA_pseudogene' : 8, # Non-coding RNAs predicted to be pseudogenes by the Ensembl pipeline
'snRNA_pseudogene' : 8, # Non-coding RNAs predicted to be pseudogenes by the Ensembl pipeline
'scRNA_pseudogene' : 8, # Non-coding RNAs predicted to be pseudogenes by the Ensembl pipeline
'rRNA_pseudogene' : 8, # Non-coding RNAs predicted to be pseudogenes by the Ensembl pipeline
'misc_RNA_pseudogene' : 8, # Non-coding RNAs predicted to be pseudogenes by the Ensembl pipeline
'miRNA_pseudogene' : 8, # Non-coding RNAs predicted to be pseudogenes by the Ensembl pipeline
'IG_C_pseudogene' : 8, # Inactivated immunoglobulin gene
'IG_D_pseudogene' : 8, # Inactivated immunoglobulin gene
'IG_J_pseudogene' : 8, # Inactivated immunoglobulin gene
'IG_V_pseudogene' : 8, # Inactivated immunoglobulin gene
'TR_J_pseudogene' : 8, # Inactivated immunoglobulin gene
'TR_V_pseudogene' : 8, # Inactivated immunoglobulin gene
'artifact' : 9, # Used to tag mistakes in the public databases (Ensembl/SwissProt/Trembl)
'ETC' : 10 # none
}
def pick_cons(self, input_list):
best_cons = 'intergenic_variant'
best_score = 19
if input_list == []:
pass
else:
for input_cons in input_list:
if input_cons in self._conseq_dict:
one_cons, one_score = self._conseq_dict[input_cons]
else:
one_cons, one_score = self._conseq_dict['ETC']
if one_cons == 'Frame_Shift':
if self._var_type == 'Del':
one_cons = 'Frame_Shift_Del'
elif self._var_type == 'Ins':
one_cons = 'Frame_Shift_Ins'
elif one_cons == 'Protein_Alter':
if self._var_inframe == 'FALSE' and self._var_type == 'Del':
one_cons = 'Frame_Shift_Del'
elif self._var_inframe == 'FALSE' and self._var_type == 'Ins':
one_cons = 'Frame_Shift_Ins'
elif self._var_inframe == 'TRUE' and self._var_type == 'Del':
one_cons = 'In_Frame_Del'
elif self._var_inframe == 'TURE' and self._var_type == 'Ins':
one_cons = 'In_Frame_Ins'
if one_score < best_score:
best_cons = one_cons
best_score = one_score
return best_cons, best_score
def sort_csq(self, input_list):
column_names = ['#', 'gene', 'biotype', 'consequence', 'length', 'transcript', 'vep']
raw_df = | pandas.DataFrame(columns=column_names) | pandas.DataFrame |
__author__ = "<NAME>"
__copyright__ = "BMW Group"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from tsa import Logger
import sys
import numpy as np
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
import argparse
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from statsmodels.tsa.seasonal import seasonal_decompose
from copy import copy, deepcopy
from scipy import stats
class UVariateTimeSeriesClass(object):
"""
Uni-variate time series class
Attributes:
_ts_df_cols - internal column names for dataframe that will be input to model
ts_df - time series data frame
freq - frequency of time series, possibilities ['S', 'min', 'H', 'D', 'W', 'M']
p_train - float value defining which part of data is to be used as training data. Note, value of 1.0 would mean
all data will be used as training data,
hence no test data will be generated.
timeformat - time format if time series data needs to be brought into datetime
#
_mode - defines the mode as 'test' or 'forecast'
_train_dt - training data
_test_dt - test data
model_fit - fitted model
fittedvalues - computed fitted values
residuals - residuals
rmse - RMSE on test set (test data and the forecast on test data)
upper_whisker_res - upper whisker for residuals
lower_conf_int - upper confidence interval
upper_conf_int - lower confidence interval
forecast - computed forcatsed values
residuals_forecast - residuals between forecasted and real values. Note, this variable exist only if test data
existed
Methods:
ts_transform() - transforms time series using log10 or box-cox
ts_resample() - resamples time series at the chosen frequency freq
_plot_residuals() - residual plots helper function
ts_test() - evaluates fitted model on the test data, if this one has been generated
ts_forecast() - forecasts time series and plots the results
_plot_forecast() - helper function for plotting forecasted time-series
ts_decompose() - decomposes time series in seasonal, trend and resduals and plots the results
plot_decompose() - plots the results of ts_decompose()
Helper methods:
_prepare_fit() - prepares ts_fit of child class. Supposed to be called by a child class
_residuals() - helper function for calculating residuals. Supposed to be called by a child class
_check_ts_test() - checks for test. Supposed to be called by a child class
_check_ts_forecast() - checks for forecast. Supposed to be called by a child class
"""
def __init__(self, ts_df, time_format="%Y-%m-%d %H:%M:%S", freq='D', p_train=1.0, **kwds):
"""
Initializes the object UVariateTimeSeriesForecaster
"""
self._ts_df_cols = ['ds', 'y']
self.ts_df = ts_df
self.time_format = time_format
self.freq = freq
self.p_train = p_train
self.transform = None
self._boxcox_lmbda = None
self._mode = ''
self._train_dt = None
self._test_dt = None
self.model_fit = None
self.fittedvalues = None
self.residuals = None
self.rmse = None
self.upper_whisker_res = None
self.lower_conf_int = None
self.upper_conf_int = None
self.forecast = None
self.residuals_forecast = None
self.seasonal = None
self.trend = None
self.baseline = None
self._uvts_cls_logger = Logger('uvts_cls')
# Assertion Tests
try:
assert self.freq in ['S', 'min', 'H', 'D', 'W', 'M']
except AssertionError:
self._uvts_cls_logger.warning("freq should be in ['S', 'min', 'H', 'D', W', 'M']. "
"Assuming daily frequency!")
self.freq = 'D'
try:
self.p_train = float(self.p_train)
assert self.p_train > 0
except AssertionError:
self._uvts_cls_logger.error("p_train defines part of data on which you would train your model."
"This value cannot be less than or equal to zero!")
self._uvts_cls_logger.exception("Exception occurred, p_train")
except ValueError:
self._uvts_cls_logger.error("p_train must be convertible to float type!")
self._uvts_cls_logger.exception("Exception occurred, p_train")
else:
if int(self.p_train) < 1:
self._mode = 'test'
else:
self._mode = 'forecast'
try:
assert pd.DataFrame(self.ts_df).shape[1] <= 2
except AssertionError:
self._uvts_cls_logger.error(
"Time series must be uni-variate. "
"Hence, at most a time columns and a column of numeric values are expected!")
self._uvts_cls_logger.exception("Exception occurred, ts_df")
else:
self.ts_df = self.ts_df.reset_index()
self.ts_df.columns = self._ts_df_cols
self.ts_df['y'] = self.ts_df['y'].apply(np.float64, errors='coerce')
self.ts_df.set_index('ds', inplace=True)
print(type(self._uvts_cls_logger))
print(self._uvts_cls_logger)
self._uvts_cls_logger.info("Using time series data of range: " + str(min(self.ts_df.index)) + ' - ' + str(
max(self.ts_df.index)) + " and shape: " + str(self.ts_df.shape))
if not isinstance(self.ts_df.index, pd.DatetimeIndex):
self._uvts_cls_logger.warning("Time conversion required...")
self.ts_df = self.ts_df.reset_index()
try:
self.ts_df['ds'] = self.ts_df['ds'].apply(
lambda x: datetime.datetime.strptime(
str(x).translate({ord('T'): ' ', ord('Z'): None})[:-1],
self.time_format))
except ValueError as e:
self._uvts_cls_logger.warning("Zulu time conversion not successful: {}".format(e))
self._uvts_cls_logger.warning("Will try without assuming zulu time...")
try:
self.ts_df['ds'] = self.ts_df['ds'].apply(
lambda x: datetime.datetime.strptime(str(x), self.time_format))
except ValueError as e:
self._uvts_cls_logger.info("Time conversion not successful. Check your time_format: {}".format(e))
else:
self._uvts_cls_logger.info("Time conversion successful!")
else:
self._uvts_cls_logger.info("Time conversion successful!")
# set index
self.ts_df.set_index('ds', inplace=True)
#
self.ts_df.index = pd.to_datetime(self.ts_df.index)
self.ts_df.sort_index(inplace=True)
# resample
self.ts_resample()
# delegate
super(UVariateTimeSeriesClass, self).__init__(**kwds)
def __copy__(self):
"""
Copies the object
"""
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
"""
Deepcopies the object
"""
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
def ts_transform(self, transform):
"""
Transforms time series via applying casted 'transform'. Right now 'log10' and 'box-cox' possible.
"""
try:
assert transform.lower().strip() in ['log10', 'box-cox']
except AssertionError:
self._uvts_cls_logger.error(
"transform should be in ['log10', 'box-cox'] or empty. Assuming no transform! "
"Hence, if you get bad results, you would like maybe to choose e.g., log10 here.")
self._uvts_cls_logger.exception("Assertion exception occurred, transform")
self.transform = None
else:
self.transform = transform.lower()
# transform
if self.transform == 'log10':
try:
self.ts_df['y'] = self.ts_df['y'].apply(np.log10)
except ValueError:
self._uvts_cls_logger.exception("log10 transformation did not work! Possibly negative "
"values present?")
elif self.transform == 'box-cox':
if input("Do you want to provide lambda for box.cox? y/n?").strip().lower() == 'y':
self._boxcox_lmbda = float(input())
else:
self._boxcox_lmbda = None
try:
if self._boxcox_lmbda is None:
bc, lmbda_1 = stats.boxcox(self.ts_df['y'], lmbda=self._boxcox_lmbda)
self.ts_df['y'] = stats.boxcox(self.ts_df['y'], lmbda=lmbda_1)
else:
self.ts_df['y'] = stats.boxcox(self.ts_df['y'], lmbda=self._boxcox_lmbda)
except ValueError:
self._uvts_cls_logger.exception("box-cox transformation did not work! "
"Possibly negative values present or bad lmbda?")
return self
def set_frequency(self, new_freq):
"""
Sets new frequency and resamples time series to that new frequency
"""
try:
assert new_freq in ['S', 'min', 'H', 'D', 'W', 'M']
except AssertionError:
self._uvts_cls_logger.error("frequency should be in ['S', 'min', 'H', 'D', W', 'M']")
else:
self.freq = new_freq
self.ts_resample()
def ts_check_frequency(self):
"""
Checks the frequency of time series
"""
if self.ts_df.index.freq is None:
self._uvts_cls_logger.info("No specific frequency detected.")
self._uvts_cls_logger.info("Frequency chosen in initialization: " + str(
self.freq) + " enter 'n' and call ts_resample() if you are satisfied with this value.")
if input("Should a histogram of time deltas be plotted y/n?").strip().lower() == 'y':
ff = pd.Series(self.ts_df.index[1:(len(self.ts_df))] - self.ts_df.index[0:(len(self.ts_df) - 1)])
ff = ff.apply(lambda x: int(x.total_seconds() / (60 * 60)))
plt.hist(ff, bins=120)
plt.xlabel("Rounded time delta [H]")
plt.ylabel("Frequency of occurrence")
self._uvts_cls_logger.info(ff.value_counts())
self._uvts_cls_logger.info("Should hourly frequency not fit, choose a reasonable frequency and call "
"set_frequency(new_freq)")
else:
pass
else:
self._uvts_cls_logger.info("Time series frequency: " + str(self.ts_df.index.freq))
def ts_resample(self):
"""
Brings original time series to the chosen frequency freq
"""
ts_freq = pd.DataFrame(
index=pd.date_range(self.ts_df.index[0], self.ts_df.index[len(self.ts_df) - 1], freq=self.freq),
columns=['dummy'])
self.ts_df = ts_freq.join(self.ts_df).drop(['dummy'], axis=1)
self.ts_df.y = self.ts_df.y.fillna(method='ffill')
# if np.isnan ( self.ts_df.y ).any ():
# self.ts_df.y = self.ts_df.y.fillna ( method='bfill' )
if np.isnan(self.ts_df.y).any():
self._uvts_cls_logger.warning("Some NaN found, something went wrong, check the data!")
sys.exit(-1)
self._uvts_cls_logger.info("Time series resampled at frequency: " + str(self.ts_df.index.freq) +
". New shape of the data: " + str(self.ts_df.shape))
return self
def _prepare_fit(self):
"""
Prepares data for training or forecasting modes
"""
if self.ts_df.index.freq is None:
self._uvts_cls_logger.warning("Time series exhibit no frequency. Calling ts_resample()...")
try:
self.ts_resample()
except ValueError:
self._uvts_cls_logger.error("Resample did not work! Error:" + str(sys.exc_info()[0]))
sys.exit("STOP")
ts_df = self.ts_df
ts_test_df = pd.DataFrame()
if self._mode == 'forecast' or int(self.p_train) == 1:
self._train_dt = ts_df
self._test_dt = ts_test_df
elif self._mode == 'test' and int(self.p_train) < 1:
# split
ts_df = ts_df.reset_index()
ts_df.columns = self._ts_df_cols
ts_test_df = ts_df
# training
ts_df = pd.DataFrame(ts_df.loc[:int(self.p_train * len(ts_df) - 1), ])
ts_df.set_index('ds', inplace=True)
# test
ts_test_df = pd.DataFrame(ts_test_df.loc[int(self.p_train * len(ts_test_df)):, ])
ts_test_df.set_index('ds', inplace=True)
# now set
self._train_dt = ts_df
if not ts_test_df.empty:
self._test_dt = ts_test_df
return self
def _residuals(self):
"""
Calculate residuals
"""
if self.model_fit is None:
self._uvts_cls_logger.error("No model has been fitted, residuals cannot be computed!")
sys.exit("STOP")
try:
# use fittedvalues to fill in the model dictionary
self.residuals = pd.Series(np.asarray(self._train_dt['y']) - np.asarray(self.fittedvalues).flatten(),
index=self._train_dt['y'].index)
self.upper_whisker_res = self.residuals.mean() + 1.5 * (
self.residuals.quantile(0.75) - self.residuals.quantile(0.25))
except (KeyError, AttributeError):
self._uvts_cls_logger.exception("Exception occurred: Model was not fitted or ts has other structure")
return self
def _plot_residuals(self, y, yhat, _id):
"""
Plot the residuals
"""
try:
assert self.model_fit is not None
except AssertionError:
self._uvts_cls_logger.exception("Model has to be fitted first! Please call ts_fit(...)")
fig, axes = plt.subplots(2, 1, figsize=(20, 5), sharex=True)
axes[0].plot( | pd.Series(yhat, index=self._train_dt.index) | pandas.Series |
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from enum import Enum
from typing import Optional, Dict, Any, Union, Tuple, List
import pandas as pd
from pm4py.statistics.traces.generic.common import case_duration as case_duration_commons
from pm4py.util import exec_utils, constants, pandas_utils
from pm4py.util import variants_util
from pm4py.util import xes_constants as xes
from pm4py.util.business_hours import soj_time_business_hours_diff
from pm4py.util.constants import CASE_CONCEPT_NAME
from pm4py.util.xes_constants import DEFAULT_TIMESTAMP_KEY
class Parameters(Enum):
ATTRIBUTE_KEY = constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY
ACTIVITY_KEY = constants.PARAMETER_CONSTANT_ACTIVITY_KEY
TIMESTAMP_KEY = constants.PARAMETER_CONSTANT_TIMESTAMP_KEY
CASE_ID_KEY = constants.PARAMETER_CONSTANT_CASEID_KEY
MAX_VARIANTS_TO_RETURN = "max_variants_to_return"
VARIANTS_DF = "variants_df"
ENABLE_SORT = "enable_sort"
SORT_BY_COLUMN = "sort_by_column"
SORT_ASCENDING = "sort_ascending"
MAX_RET_CASES = "max_ret_cases"
BUSINESS_HOURS = "business_hours"
WORKTIMING = "worktiming"
WEEKENDS = "weekends"
def get_variant_statistics(df: pd.DataFrame, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> Union[
List[Dict[str, int]], List[Dict[List[str], int]]]:
"""
Get variants from a Pandas dataframe
Parameters
-----------
df
Dataframe
parameters
Parameters of the algorithm, including:
Parameters.CASE_ID_KEY -> Column that contains the Case ID
Parameters.ACTIVITY_KEY -> Column that contains the activity
Parameters.MAX_VARIANTS_TO_RETURN -> Maximum number of variants to return
variants_df -> If provided, avoid recalculation of the variants dataframe
Returns
-----------
variants_list
List of variants inside the Pandas dataframe
"""
if parameters is None:
parameters = {}
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME)
max_variants_to_return = exec_utils.get_param_value(Parameters.MAX_VARIANTS_TO_RETURN, parameters, None)
variants_df = exec_utils.get_param_value(Parameters.VARIANTS_DF, parameters, get_variants_df(df,
parameters=parameters))
variants_df = variants_df.reset_index()
variants_list = pandas_utils.to_dict_records(variants_df.groupby("variant").agg("count").reset_index())
variants_list = sorted(variants_list, key=lambda x: (x[case_id_glue], x["variant"]), reverse=True)
if max_variants_to_return:
variants_list = variants_list[:min(len(variants_list), max_variants_to_return)]
return variants_list
def get_variants_df_and_list(df: pd.DataFrame, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> Tuple[
pd.DataFrame, Union[List[Dict[str, int]], List[Dict[List[str], int]]]]:
"""
(Technical method) Provides variants_df and variants_list out of the box
Parameters
------------
df
Dataframe
parameters
Parameters of the algorithm, including:
Parameters.CASE_ID_KEY -> Column that contains the Case ID
Parameters.ACTIVITY_KEY -> Column that contains the activity
Returns
------------
variants_df
Variants dataframe
variants_list
List of variants sorted by their count
"""
if parameters is None:
parameters = {}
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME)
variants_df = get_variants_df(df, parameters=parameters)
variants_stats = get_variant_statistics(df, parameters=parameters)
variants_list = []
for vd in variants_stats:
variant = vd["variant"]
count = vd[case_id_glue]
variants_list.append([variant, count])
variants_list = sorted(variants_list, key=lambda x: (x[1], x[0]), reverse=True)
return variants_df, variants_list
def get_cases_description(df: pd.DataFrame, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> Dict[
str, Dict[str, Any]]:
"""
Get a description of traces present in the Pandas dataframe
Parameters
-----------
df
Pandas dataframe
parameters
Parameters of the algorithm, including:
Parameters.CASE_ID_KEY -> Column that identifies the case ID
Parameters.TIMESTAMP_KEY -> Column that identifies the timestamp
enable_sort -> Enable sorting of traces
Parameters.SORT_BY_COLUMN -> Sort traces inside the dataframe using the specified column.
Admitted values: startTime, endTime, caseDuration
Parameters.SORT_ASCENDING -> Set sort direction (boolean; it true then the sort direction is ascending,
otherwise descending)
Parameters.MAX_RET_CASES -> Set the maximum number of returned traces
Returns
-----------
ret
Dictionary of traces associated to their start timestamp, their end timestamp and their duration
"""
if parameters is None:
parameters = {}
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME)
timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY)
enable_sort = exec_utils.get_param_value(Parameters.ENABLE_SORT, parameters, True)
sort_by_column = exec_utils.get_param_value(Parameters.SORT_BY_COLUMN, parameters, "startTime")
sort_ascending = exec_utils.get_param_value(Parameters.SORT_ASCENDING, parameters, True)
max_ret_cases = exec_utils.get_param_value(Parameters.MAX_RET_CASES, parameters, None)
business_hours = exec_utils.get_param_value(Parameters.BUSINESS_HOURS, parameters, False)
worktiming = exec_utils.get_param_value(Parameters.WORKTIMING, parameters, [7, 17])
weekends = exec_utils.get_param_value(Parameters.WEEKENDS, parameters, [6, 7])
grouped_df = df[[case_id_glue, timestamp_key]].groupby(df[case_id_glue])
# grouped_df = df[[case_id_glue, timestamp_key]].groupby(df[case_id_glue])
first_eve_df = grouped_df.first()
last_eve_df = grouped_df.last()
del grouped_df
last_eve_df.columns = [str(col) + '_2' for col in first_eve_df.columns]
stacked_df = pd.concat([first_eve_df, last_eve_df], axis=1)
del first_eve_df
del last_eve_df
del stacked_df[case_id_glue]
del stacked_df[case_id_glue + "_2"]
stacked_df['caseDuration'] = stacked_df[timestamp_key + "_2"] - stacked_df[timestamp_key]
stacked_df['caseDuration'] = stacked_df['caseDuration'].astype('timedelta64[s]')
if business_hours:
stacked_df['caseDuration'] = stacked_df.apply(
lambda x: soj_time_business_hours_diff(x[timestamp_key], x[timestamp_key + "_2"], worktiming,
weekends), axis=1)
else:
stacked_df['caseDuration'] = stacked_df[timestamp_key + "_2"] - stacked_df[timestamp_key]
stacked_df['caseDuration'] = stacked_df['caseDuration'].astype('timedelta64[s]')
stacked_df[timestamp_key + "_2"] = stacked_df[timestamp_key + "_2"].astype('int64') // 10 ** 9
stacked_df[timestamp_key] = stacked_df[timestamp_key].astype('int64') // 10 ** 9
stacked_df = stacked_df.rename(columns={timestamp_key: 'startTime', timestamp_key + "_2": 'endTime'})
if enable_sort:
stacked_df = stacked_df.sort_values(sort_by_column, ascending=sort_ascending)
if max_ret_cases is not None:
stacked_df = stacked_df.head(n=min(max_ret_cases, len(stacked_df)))
ret = pandas_utils.to_dict_index(stacked_df)
return ret
def get_variants_df(df, parameters=None):
"""
Get variants dataframe from a Pandas dataframe
Parameters
-----------
df
Dataframe
parameters
Parameters of the algorithm, including:
Parameters.CASE_ID_KEY -> Column that contains the Case ID
Parameters.ACTIVITY_KEY -> Column that contains the activity
Returns
-----------
variants_df
Variants dataframe
"""
if parameters is None:
parameters = {}
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME)
activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes.DEFAULT_NAME_KEY)
if variants_util.VARIANT_SPECIFICATION == variants_util.VariantsSpecifications.STRING:
new_df = df.groupby(case_id_glue)[activity_key].agg(lambda col: constants.DEFAULT_VARIANT_SEP.join( | pd.Series.to_list(col) | pandas.Series.to_list |
'''
example of loading FinMind api
'''
from Data import Load
import requests
import pandas as pd
url = 'http://finmindapi.servebeer.com/api/data'
list_url = 'http://finmindapi.servebeer.com/api/datalist'
translate_url = 'http://finmindapi.servebeer.com/api/translation'
'''----------------TaiwanStockInfo----------------'''
form_data = {'dataset': 'TaiwanStockInfo'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------Taiwan Stock Dividend Result----------------'''
form_data = {'dataset': 'StockDividendResult'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TotalMarginPurchaseShortSale----------------'''
form_data = {'dataset': 'StockDividendResult',
'stock_id': '2330',
'date': '2010-10-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockNews----------------'''
form_data = {'dataset': 'TaiwanStockNews',
'date': '2019-10-10',
'stock_id': '2317'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPrice----------------'''
form_data = {'dataset': 'TaiwanStockPrice',
'stock_id': '2317',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPriceMinute----------------'''
form_data = {'dataset': 'TaiwanStockPriceMinute',
'stock_id': '2330',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------FinancialStatements----------------'''
form_data = {'dataset': 'FinancialStatements',
'stock_id': '2317',
'date': '2019-01-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data = Load.transpose(data)
data.head()
'''----------------TaiwanCashFlowsStatement----------------'''
form_data = {'dataset': 'TaiwanCashFlowsStatement',
'stock_id': '2330',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockStockDividend----------------'''
form_data = {'dataset': 'TaiwanStockStockDividend',
'stock_id': '2317',
'date': '2018-01-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = | pd.DataFrame(temp['data']) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 3.8.8 64-bit ('cam')
# language: python
# name: python388jvsc74a57bd0acafb728b15233fa3654ff8b422c21865df0ca42ea3b74670e1f2f098ebd61ca
# ---
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/python-logo-notext.svg"
# style="display:block;margin:auto;width:10%"/>
# <h1 style="text-align:center;">Python: Pandas Data Frames 1</h1>
# <h2 style="text-align:center;">Coding Akademie München GmbH</h2>
# <br/>
# <div style="text-align:center;">Dr. <NAME></div>
# <div style="text-align:center;"><NAME></div>
# %% [markdown] slideshow={"slide_type": "slide"}
# # Data Frames
#
# Data Frames sind die am häufigsten verwendete Datenstruktur von Pandas.
#
# Sie ermöglichen das bequeme Einlesen, Verarbeiten und Speichern von Daten.
#
# Konzeptionell besteht ein Data Frame aus mehreren `Series`-Instanzen, die einen gemeinsamen Index haben.
# %% slideshow={"slide_type": "-"}
import numpy as np
import pandas as pd
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Erzeugen eines Data Frames
# %% [markdown]
# ### Aus einem NumPy Array
# %%
def create_data_frame():
rng = np.random.default_rng(42)
array = rng.normal(size=(5, 4), scale=5.0)
index = 'A B C D E'.split()
columns = 'w x y z'.split()
return pd.DataFrame(array, index=index, columns=columns)
# %% slideshow={"slide_type": "subslide"}
df = create_data_frame()
df
# %%
type(df)
# %% [markdown]
# ### Aus einer CSV-Datei
# %%
df_csv = pd.read_csv("example_data.csv")
# %%
df_csv
# %%
df_csv = | pd.read_csv("example_data.csv", index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 12 15:18:57 2018
@author: Denny.Lehman
"""
import pandas as pd
import numpy as np
import datetime
import time
from pandas.tseries.offsets import MonthEnd
def npv(rate, df):
value = 0
for i in range(0, df.size):
value += df.iloc[i] / (1 + rate) ** (i + 1)
return value
# use this function if finance wants to use the faster CSV version. This version requires proper data cleaning steps
def get_datatape_csv(filepath):
list_of_column_names_required = ['System Project: Sunnova System ID', 'Committed Capital', 'Quote: Recurring Payment', 'Quote: Contract Type' ,'Quote: Payment Escalator', 'InService Date', 'Asset Portfolio - Partner', 'Location']
rename_list = {'System Project: Sunnova System ID':'ID','Committed Capital':'Committed Capital', 'Quote: Contract Type' :'Contract Type', 'Quote: Recurring Payment':'Recurring Payment','Quote: Payment Escalator':'Escalator', 'Location' : 'State'}
d_types = {'System Project: Sunnova System ID':str, 'Committed Capital':np.float64, 'Quote: Recurring Payment': np.float64, 'Quote: Contract Type':str, 'Quote: Payment Escalator':str, 'Asset Portfolio - Partner':str }
parse_dates = ['InService Date']
names=['ID', 'CC', 'RP', 'CT', 'PE', 'InS', 'AP']
df3 = pd.read_csv(filepath, sep=',', skiprows=0, header=2) #, usecols=list_of_column_names_required, thousands=',', dtype=d_types) #, parse_dates=parse_dates) # , nrows=10)
df3 = df3.rename(columns=rename_list)
def get_datatape(filepath):
start = time.time()
# Import S1 datatape to Pandas DataFrame
df1 = pd.read_excel(filepath, sheetname='Active', header=4, index_col=None, na_values=['N/A'], parse_cols="C:F,J:L,P,V,AI,N,AY") #, converters={'Interconnected': pd.to_datetime})
print(time.time()-start)
# Rename Columns
df1 = df1.rename(columns= {'Interconnected':'InService Date', 'Type':'Contract Type', 'Capital ($)':'Committed Capital', 'Estimate':'Annual Attribute', '$/kW':'Power Rate', '%':'Escalator', 'Year 1':'Recurring Payment', 'Location':'State', 'Quote: RIS CoverPg Monthly Pmt Without PPMT':'Monthly Pmt Without PPMT'})
return df1
def get_S1_datatape(filepath):
start = time.time()
# df1 = pd.read_excel(filepath, sheetname = 'Active header1', index_col=None, na_values=['NA'], parse_cols="A:D,J,N,T,AG")
df1 = pd.read_excel(filepath, sheetname='Active', header=2, index_col=None, na_values=['N/A'], parse_cols="D:F,I,L,O,U,AH,P,Q,AY")
print(time.time()-start)
# Rename the columns in the dataframe to be more concise and descriptive
df1 = df1.rename(columns= {'Asset Portfolio - Customer':'Asset Portfolio - Customer','System Project: Sunnova System ID':'ID', 'Committed Capital':'Committed Capital', 'Quote: Contract Type':'Contract Type','InService Date':'InService Date', 'Quote: Expected Production - Change Order':'Annual Attribute', 'Quote: Solar Rate':'Power Rate', 'Quote: Payment Escalator':'Escalator', 'Quote: Recurring Payment':'Recurring Payment', 'Quote: Installation State':'State', 'Quote: RIS CoverPg Monthly Pmt Without PPMT':'Monthly Pmt Without PPMT'})
return df1
def get_contract_type(df1, target_contracts):
# Make contracts not null
# targets = ['Lease', 'Loan', 'EZ Own']
# df1['Contract Type'].apply(lambda sentence: any(word in sentence for word in targets))
df1 = df1[df1['Contract Type'].str.contains('|'.join(target_contracts), na=False)]
return df1
def remove_non_inService_Systems(df):
#df['InService Date'] = pd.to_datetime(df['InService Date'])
remove = []
for i in range(0, df['InService Date'].size):
if type(df['InService Date'].iloc[i]) == pd._libs.tslib.NaTType:
remove.append(i)
df = df.drop(df.index[remove])
return df
#---------------------------------------------------------------------------
def remove_blank_inservice_date(df):
remove = []
for i in range(1, df['InService Date'].size):
if df['InService Date'].iloc[i] == datetime.time(0,0):
remove.append(i)
if type(df['InService Date'].iloc[i]) == pd._libs.tslib.NaTType:
remove.append(i)
df = df.drop(df.index[remove])
df['InService Date'] = pd.to_datetime(df['InService Date'])
return df
def convert_date_to_EOM(ser):
ser = ser + MonthEnd(1)
return ser
def create_first_production_first_payment_and_last_payment_date(df):
# initialize new columns of dataframe
df['First Payment Date'] = pd.to_datetime(0)
df['Last Payment Date'] = pd.to_datetime(0)
# Make first production date - the end of month one month after inservice date
df['First Production Date'] = | pd.to_datetime(df['InService Date']) | pandas.to_datetime |
import unittest
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from string_grouper.string_grouper import DEFAULT_MIN_SIMILARITY, \
DEFAULT_REGEX, DEFAULT_NGRAM_SIZE, DEFAULT_N_PROCESSES, DEFAULT_IGNORE_CASE, \
StringGrouperConfig, StringGrouper, StringGrouperNotFitException, \
match_most_similar, group_similar_strings, match_strings, \
compute_pairwise_similarities
from unittest.mock import patch, Mock
def mock_symmetrize_matrix(x: csr_matrix) -> csr_matrix:
return x
class SimpleExample(object):
def __init__(self):
self.customers_df = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address4', '', 'Description4', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address5', 'Tel5', 'Description5', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.customers_df2 = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('DD012339M', 'HyperStartup Inc.', 'Address4', 'Tel4', 'Description4', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address5', '', 'Description5', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address6', 'Tel6', 'Description6', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.a_few_strings = pd.Series(['BB016741P', 'BB082744L', 'BB098762D', 'BB099931J', 'BB072982K', 'BB059082Q'])
self.one_string = pd.Series(['BB0'])
self.two_strings = pd.Series(['Hyper', 'Hyp'])
self.whatever_series_1 = pd.Series(['whatever'])
self.expected_result_with_zeroes = pd.DataFrame(
[
(1, 'Hyper Startup Incorporated', 0.08170638, 'whatever', 0),
(0, 'Mega Enterprises Corporation', 0., 'whatever', 0),
(2, 'Hyper Startup Inc.', 0., 'whatever', 0),
(3, 'Hyper-Startup Inc.', 0., 'whatever', 0),
(4, 'Hyper Hyper Inc.', 0., 'whatever', 0),
(5, 'Mega Enterprises Corp.', 0., 'whatever', 0)
],
columns=['left_index', 'left_Customer Name', 'similarity', 'right_side', 'right_index']
)
self.expected_result_centroid = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
self.expected_result_centroid_with_index_col = pd.DataFrame(
[
(0, 'Mega Enterprises Corporation'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(4, 'Hyper Hyper Inc.'),
(0, 'Mega Enterprises Corporation')
],
columns=['group_rep_index', 'group_rep_Customer Name']
)
self.expected_result_first = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
class StringGrouperConfigTest(unittest.TestCase):
def test_config_defaults(self):
"""Empty initialisation should set default values"""
config = StringGrouperConfig()
self.assertEqual(config.min_similarity, DEFAULT_MIN_SIMILARITY)
self.assertEqual(config.max_n_matches, None)
self.assertEqual(config.regex, DEFAULT_REGEX)
self.assertEqual(config.ngram_size, DEFAULT_NGRAM_SIZE)
self.assertEqual(config.number_of_processes, DEFAULT_N_PROCESSES)
self.assertEqual(config.ignore_case, DEFAULT_IGNORE_CASE)
def test_config_immutable(self):
"""Configurations should be immutable"""
config = StringGrouperConfig()
with self.assertRaises(Exception) as _:
config.min_similarity = 0.1
def test_config_non_default_values(self):
"""Configurations should be immutable"""
config = StringGrouperConfig(min_similarity=0.1, max_n_matches=100, number_of_processes=1)
self.assertEqual(0.1, config.min_similarity)
self.assertEqual(100, config.max_n_matches)
self.assertEqual(1, config.number_of_processes)
class StringGrouperTest(unittest.TestCase):
def test_auto_blocking_single_DataFrame(self):
"""tests whether automatic blocking yields consistent results"""
# This function will force an OverflowError to occur when
# the input Series have a combined length above a given number:
# OverflowThreshold. This will in turn trigger automatic splitting
# of the Series/matrices into smaller blocks when n_blocks = None
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
# first do manual blocking
sg = StringGrouper(df1, min_similarity=0.1)
pd.testing.assert_series_equal(sg.master, df1)
self.assertEqual(sg.duplicates, None)
matches = fix_row_order(sg.match_strings(df1, n_blocks=(1, 1)))
self.assertEqual(sg._config.n_blocks, (1, 1))
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def do_test_with(OverflowThreshold):
nonlocal sg # allows reference to sg, as sg will be modified below
# Now let us mock sg._build_matches:
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
matches_auto = fix_row_order(sg.match_strings(df1, n_blocks=None))
pd.testing.assert_series_equal(sg.master, df1)
pd.testing.assert_frame_equal(matches, matches_auto)
self.assertEqual(sg._config.n_blocks, None)
# Note that _build_matches is called more than once if and only if
# a split occurred (that is, there was more than one pair of
# matrix-blocks multiplied)
if len(sg._left_Series) + len(sg._right_Series) > \
OverflowThreshold:
# Assert that split occurred:
self.assertGreater(sg._build_matches.call_count, 1)
else:
# Assert that split did not occur:
self.assertEqual(sg._build_matches.call_count, 1)
# now test auto blocking by forcing an OverflowError when the
# combined Series' lengths is greater than 10, 5, 3, 2
do_test_with(OverflowThreshold=100) # does not trigger auto blocking
do_test_with(OverflowThreshold=10)
do_test_with(OverflowThreshold=5)
do_test_with(OverflowThreshold=3)
do_test_with(OverflowThreshold=2)
def test_n_blocks_single_DataFrame(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
matches11 = fix_row_order(match_strings(df1, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
sg = StringGrouper(df1, min_similarity=0.1)
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def test_overflow_error_with(OverflowThreshold, n_blocks):
nonlocal sg
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
max_left_block_size = (len(df1)//n_blocks[0]
+ (1 if len(df1) % n_blocks[0] > 0 else 0))
max_right_block_size = (len(df1)//n_blocks[1]
+ (1 if len(df1) % n_blocks[1] > 0 else 0))
if (max_left_block_size + max_right_block_size) > OverflowThreshold:
with self.assertRaises(Exception):
_ = sg.match_strings(df1, n_blocks=n_blocks)
else:
matches_manual = fix_row_order(sg.match_strings(df1, n_blocks=n_blocks))
pd.testing.assert_frame_equal(matches11, matches_manual)
test_overflow_error_with(OverflowThreshold=100, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(2, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 2))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(4, 4))
def test_n_blocks_both_DataFrames(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df['Customer Name']
df2 = simple_example.customers_df2['Customer Name']
matches11 = fix_row_order(match_strings(df1, df2, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, df2, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
def test_n_blocks_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=2)
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(0, 2))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2.5))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2, 3))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, ))
def test_tfidf_dtype_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=None)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=0)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype='whatever')
def test_compute_pairwise_similarities(self):
"""tests the high-level function compute_pairwise_similarities"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
similarities = compute_pairwise_similarities(df1, df2)
expected_result = pd.Series(
[
1.0,
0.6336195351561589,
1.0000000000000004,
1.0000000000000004,
1.0,
0.826462625999832
],
name='similarity'
)
expected_result = expected_result.astype(np.float32)
pd.testing.assert_series_equal(expected_result, similarities)
sg = StringGrouper(df1, df2)
similarities = sg.compute_pairwise_similarities(df1, df2)
pd.testing.assert_series_equal(expected_result, similarities)
def test_compute_pairwise_similarities_data_integrity(self):
"""tests that an exception is raised whenever the lengths of the two input series of the high-level function
compute_pairwise_similarities are unequal"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
with self.assertRaises(Exception):
_ = compute_pairwise_similarities(df1, df2[:-2])
@patch('string_grouper.string_grouper.StringGrouper')
def test_group_similar_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function group_similar_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = group_similar_strings(
test_series_1,
string_ids=test_series_id_1
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_most_similar(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_most_similar utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_2 = None
test_series_id_1 = None
test_series_id_2 = None
df = match_most_similar(
test_series_1,
test_series_2,
master_id=test_series_id_1,
duplicates_id=test_series_id_2
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_matches.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = match_strings(test_series_1, master_id=test_series_id_1)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_matches.assert_called_once()
self.assertEqual(df, 'whatever')
@patch(
'string_grouper.string_grouper.StringGrouper._symmetrize_matrix',
side_effect=mock_symmetrize_matrix
)
def test_match_list_symmetry_without_symmetrize_function(self, mock_symmetrize_matrix_param):
"""mocks StringGrouper._symmetrize_matches_list so that this test fails whenever _matches_list is
**partially** symmetric which often occurs when the kwarg max_n_matches is too small"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
mock_symmetrize_matrix_param.assert_called_once()
# obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# switch the column names of lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# if the intersection is empty then _matches_list is completely non-symmetric (this is acceptable)
# if the intersection is not empty then at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertFalse(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
def test_match_list_symmetry_with_symmetrize_function(self):
"""This test ensures that _matches_list is symmetric"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
# Obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# Switch the column names of the lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# Obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# If the intersection is empty this means _matches_list is completely non-symmetric (this is acceptable)
# If the intersection is not empty this means at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertTrue(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
@patch(
'string_grouper.string_grouper.StringGrouper._fix_diagonal',
side_effect=mock_symmetrize_matrix
)
def test_match_list_diagonal_without_the_fix(self, mock_fix_diagonal):
"""test fails whenever _matches_list's number of self-joins is not equal to the number of strings"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['<NAME>']
matches = match_strings(df, max_n_matches=1)
mock_fix_diagonal.assert_called_once()
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertNotEqual(num_self_joins, num_strings)
def test_match_list_diagonal(self):
"""This test ensures that all self-joins are present"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['Customer Name']
matches = match_strings(df, max_n_matches=1)
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertEqual(num_self_joins, num_strings)
def test_zero_min_similarity(self):
"""Since sparse matrices exclude zero elements, this test ensures that zero similarity matches are
returned when min_similarity <= 0. A bug related to this was first pointed out by @nbcvijanovic"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.whatever_series_1
matches = match_strings(s_master, s_dup, min_similarity=0)
pd.testing.assert_frame_equal(simple_example.expected_result_with_zeroes, matches)
def test_zero_min_similarity_small_max_n_matches(self):
"""This test ensures that a warning is issued when n_max_matches is suspected to be too small while
min_similarity <= 0 and include_zeroes is True"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.two_strings
with self.assertRaises(Exception):
_ = match_strings(s_master, s_dup, max_n_matches=1, min_similarity=0)
def test_get_non_matches_empty_case(self):
"""This test ensures that _get_non_matches() returns an empty DataFrame when all pairs of strings match"""
simple_example = SimpleExample()
s_master = simple_example.a_few_strings
s_dup = simple_example.one_string
sg = StringGrouper(s_master, s_dup, max_n_matches=len(s_master), min_similarity=0).fit()
self.assertTrue(sg._get_non_matches_list().empty)
def test_n_grams_case_unchanged(self):
"""Should return all ngrams in a string with case"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit do not ignore case
sg = StringGrouper(test_series, ignore_case=False)
expected_result = ['McD', 'cDo', 'Don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit ignore case
sg = StringGrouper(test_series, ignore_case=True)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower_with_defaults(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Implicit default case (i.e. default behaviour)
sg = StringGrouper(test_series)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_build_matrix(self):
"""Should create a csr matrix only master"""
test_series = pd.Series(['foo', 'bar', 'baz'])
sg = StringGrouper(test_series)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
c = csr_matrix([[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
np.testing.assert_array_equal(c.toarray(), master.toarray())
np.testing.assert_array_equal(c.toarray(), dupe.toarray())
def test_build_matrix_master_and_duplicates(self):
"""Should create a csr matrix for master and duplicates"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
master_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]])
dupes_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.]])
np.testing.assert_array_equal(master_expected.toarray(), master.toarray())
np.testing.assert_array_equal(dupes_expected.toarray(), dupe.toarray())
def test_build_matches(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
expected_matches = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]])
np.testing.assert_array_equal(expected_matches, sg._build_matches(master, dupe)[0].toarray())
def test_build_matches_list(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_case_insensitive_build_matches_list(self):
"""Should create the cosine similarity matrix of two case insensitive series"""
test_series_1 = pd.Series(['foo', 'BAR', 'baz'])
test_series_2 = pd.Series(['FOO', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_get_matches_two_dataframes(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2).fit()
left_side = ['foo', 'bar']
left_index = [0, 1]
right_side = ['foo', 'bar']
right_index = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side,
'similarity': similarity,
'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_single(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz', 'foo'])
sg = StringGrouper(test_series_1)
sg = sg.fit()
left_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
right_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
left_index = [0, 3, 1, 2, 0, 3]
right_index = [0, 0, 1, 2, 3, 3]
similarity = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side,
'similarity': similarity,
'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_1_series_1_id_series(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz', 'foo'])
test_series_id_1 = pd.Series(['A0', 'A1', 'A2', 'A3'])
sg = StringGrouper(test_series_1, master_id=test_series_id_1)
sg = sg.fit()
left_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
left_side_id = ['A0', 'A3', 'A1', 'A2', 'A0', 'A3']
left_index = [0, 3, 1, 2, 0, 3]
right_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
right_side_id = ['A0', 'A0', 'A1', 'A2', 'A3', 'A3']
right_index = [0, 0, 1, 2, 3, 3]
similarity = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side, 'left_id': left_side_id,
'similarity': similarity,
'right_id': right_side_id, 'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_2_series_2_id_series(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_id_1 = pd.Series(['A0', 'A1', 'A2'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
test_series_id_2 = pd.Series(['B0', 'B1', 'B2'])
sg = StringGrouper(test_series_1, test_series_2, duplicates_id=test_series_id_2,
master_id=test_series_id_1).fit()
left_side = ['foo', 'bar']
left_side_id = ['A0', 'A1']
left_index = [0, 1]
right_side = ['foo', 'bar']
right_side_id = ['B0', 'B1']
right_index = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side, 'left_id': left_side_id,
'similarity': similarity,
'right_id': right_side_id, 'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_raises_exception_if_unexpected_options_given(self):
# When the input id data does not correspond with its string data:
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
bad_test_series_id_1 = pd.Series(['A0', 'A1'])
good_test_series_id_1 = pd.Series(['A0', 'A1', 'A2'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
bad_test_series_id_2 = pd.Series(['B0', 'B1'])
good_test_series_id_2 = pd.Series(['B0', 'B1', 'B2'])
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, master_id=bad_test_series_id_1)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, duplicates=test_series_2, duplicates_id=bad_test_series_id_2,
master_id=good_test_series_id_1)
# When the input data is ok but the option combinations are invalid:
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, test_series_2, master_id=good_test_series_id_1)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, test_series_2, duplicates_id=good_test_series_id_2)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, duplicates_id=good_test_series_id_2)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, master_id=good_test_series_id_1, duplicates_id=good_test_series_id_2)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, master_id=good_test_series_id_1, ignore_index=True, replace_na=True)
# Here we force an exception by making the number of index-levels of duplicates different from master:
# and setting replace_na=True
test_series_2.index = pd.MultiIndex.from_tuples(list(zip(list('ABC'), [0, 1, 2])))
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, duplicates=test_series_2, replace_na=True)
def test_get_groups_single_df_group_rep_default(self):
"""Should return a pd.Series object with the same length as the original df. The series object will contain
a list of the grouped strings"""
simple_example = SimpleExample()
customers_df = simple_example.customers_df
pd.testing.assert_series_equal(
simple_example.expected_result_centroid,
group_similar_strings(
customers_df['Customer Name'],
min_similarity=0.6,
ignore_index=True
)
)
sg = StringGrouper(customers_df['Customer Name'])
pd.testing.assert_series_equal(
simple_example.expected_result_centroid,
sg.group_similar_strings(
customers_df['Customer Name'],
min_similarity=0.6,
ignore_index=True
)
)
def test_get_groups_single_valued_series(self):
"""This test ensures that get_groups() returns a single-valued DataFrame or Series object
since the input-series is also single-valued. This test was created in response to a bug discovered
by <NAME>"""
pd.testing.assert_frame_equal(
pd.DataFrame([(0, "hello")], columns=['group_rep_index', 'group_rep']),
group_similar_strings(
pd.Series(["hello"]),
min_similarity=0.6
)
)
pd.testing.assert_series_equal(
pd.Series(["hello"], name='group_rep'),
group_similar_strings(
| pd.Series(["hello"]) | pandas.Series |
# -*- coding: utf-8 -*-
# run in py3 !!
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1";
import tensorflow as tf
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction=0.5
config.gpu_options.allow_growth = True
tf.Session(config=config)
import numpy as np
from sklearn import preprocessing
import tensorflow as tf
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import pandas as pd
from keras import backend as K
import keras.layers.convolutional as conv
from keras.layers import merge
from keras.wrappers.scikit_learn import KerasRegressor
from keras import utils
from keras.layers.pooling import MaxPooling1D, MaxPooling2D
from keras.layers import pooling
from keras.models import Sequential, Model
from keras.regularizers import l1, l2
from keras import layers
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Convolution1D, Convolution2D, LSTM
from keras.optimizers import SGD, RMSprop
from keras.layers.normalization import BatchNormalization
from keras import initializers
from keras.callbacks import EarlyStopping
from keras import callbacks
from keras import backend as K
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
from keras.models import Model
from keras import initializers, layers
from keras.optimizers import SGD, Adadelta, Adam
from keras.regularizers import l1, l2
from keras import regularizers
import sys
sys.path.append('.')
from hist_figure import his_figures
if len(sys.argv) > 1:
prefix = sys.argv[1]
else:
prefix = time.time()
DATAPATH = '5fold/'
RESULT_PATH = './results/'
feature_num = 25
batch_num = 2
# batch_size = 32
batch_size = 512
SEQ_LENGTH = 20
STATEFUL = False
scaler = None # tmp, for fit_transform
# id,usage,date,com_date,week,month,year
# com_date,date,id,month,usage,week,year
def get_data(path_to_dataset='df_dh.csv', sequence_length=20, stateful=False, issplit=True):
fold_index = 1
###
dtypes = {'sub': 'float', 'super': 'float', 'error': 'float', 'com_date': 'int', 'week': 'str', 'month': 'str',
'year': 'str', 'numbers': 'int', 'log': 'float', 'id': 'str', 'usage': 'float'}
parse_dates = ['date']
print(path_to_dataset)
df = pd.read_csv(DATAPATH + path_to_dataset, header=0, dtype=dtypes, parse_dates=parse_dates, encoding="utf-8")
# print(path_to_dataset)
print(df.columns)
df = df[df['error'] >= 0]
# df_test = pd.read_csv(DATAPATH+"test"+str(fold_index)+".csv", header = 0, dtype=dtypes, parse_dates=parse_dates,encoding="utf-8")
def helper(x):
split = list(map(int, x.strip('[').strip(']').split(',')))
d = {}
for counter, value in enumerate(split):
k = str(len(split)) + "-" + str(counter)
d[k] = value
return d
# df_train_temp = df_train['week'].apply(helper).apply(pd.Series)
df_week = df['week'].apply(helper).apply(pd.Series).as_matrix() # 7
df_month = df['month'].apply(helper).apply(pd.Series).as_matrix() # 12
df_year = df['year'].apply(helper).apply(pd.Series).as_matrix() # 3
df_empty = df[['super', 'com_date', 'error', 'numbers']].copy()
# print(df_empty)
df_super = df_empty.ix[:, [0]]
df_com_date = df_empty.ix[:, [1]]
df_error = df_empty.ix[:, [2]]
df_numbers = df_empty.ix[:, [3]]
X_train_ = np.column_stack((df_super, df_com_date, df_numbers, df_week, df_month))
Y_train_ = df_error.as_matrix()
ss_x = preprocessing.MaxAbsScaler()
ss_y = preprocessing.MaxAbsScaler()
global scaler
scaler = ss_y
# ss_x = preprocessing.StandardScaler()
array_new = ss_x.fit_transform(df_empty.ix[:, [0]])
df_super = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:, [1]])
df_com_date = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:, [3]])
df_numbers = pd.DataFrame(array_new)
array_new = ss_y.fit_transform(df_empty.ix[:, [2]])
df_error = pd.DataFrame(array_new)
df_week = ss_x.fit_transform(df_week)
df_week = pd.DataFrame(df_week)
df_month = ss_x.fit_transform(df_month)
df_month = | pd.DataFrame(df_month) | pandas.DataFrame |
from minder_utils.configurations import feature_config, config
import numpy as np
from .calculation import entropy_rate_from_sequence
from .TimeFunctions import rp_location_delta
from .util import *
from minder_utils.util.util import PBar
import pandas as pd
from typing import Union
import sys
def get_moving_average(df:pd.DataFrame, name, w:int = 3):
'''
This function calculates the moving average of the values in the ```'value'```
column. It will return the dataframe with the moving average in the column
```'value'```.
Arguments
---------
- df: pandas.DataFrame:
A dataframe containing at least a column called ```'value'```.
- name: string:
This string is the name that will be given in the column ```'location'```.
If this column does not already exist, it will be added.
- w: int:
The size of the moving window when calculating the moving average.
Defaults to ```3```.
Returns
--------
- df: pandas.Dataframe :
The original dataframe, with a new column containing the moving average. There
will be missing values in the first ```w-1``` rows, caused by the lack of values
to calculate a mean using the moving window.
'''
values = df['value'].values
if values.shape[0]<w:
df['value'] = pd.NA
df['location'] = name
return df
# moving average
w = 3
values_ma = np.convolve(values, np.ones(w), 'valid')/w
df['value'] = pd.NA
df.loc[df.index[w-1:],'value'] = values_ma
df['location'] = name
return df
def get_value_delta(df:pd.DataFrame, name):
'''
This function calculates the delta of the values in the ```'value'```
column. It will return the dataframe with thenew values in the column
```'value'```.
Arguments
---------
- df: pandas.DataFrame:
A dataframe containing at least a column called ```'values'```.
- name: string:
This string is the name that will be given in the column ```'location'```.
If this column does not already exist, it will be added.
Returns
--------
- df: pandas.Dataframe :
The original dataframe, with a new column containing the delta values. There
will be a missing value in the first row, since delta can not be calculated here.
'''
values = df['value'].values
if values.shape[0]<2:
df['value'] = pd.NA
df['location'] = name
return df
values_delta = values[1:]/values[:-1]
df['value'] = pd.NA
df.loc[df.index[1:],'value'] = values_delta
df['location'] = name
return df
def get_location_activity(data, location, time_range=None, name=None):
data = data[data.location == location][['id', 'time', 'value']]
data.time = pd.to_datetime(data.time)
if time_range is None:
data = data.set_index('time').reset_index()
else:
data = data.set_index('time').between_time(*time_range).reset_index()
data.time = data.time.dt.date
data = data.groupby(['id', 'time'])['value'].sum().reset_index()
data['week'] = compute_week_number(data.time)
data['location'] = name if not name is None else location
return data
def get_bathroom_activity(data, time_range=None, name=None):
data = get_location_activity(data=data,
location='bathroom1',
time_range=time_range,
name=name)
'''
data = data[data.location == 'bathroom1'][['id', 'time', 'value']]
data.time = pd.to_datetime(data.time)
data = data.set_index('time').between_time(*time_range).reset_index()
data.time = data.time.dt.date
data = data.groupby(['id', 'time'])['value'].sum().reset_index()
data['week'] = compute_week_number(data.time)
data['location'] = name
'''
return data
def get_bedroom_activity(data, time_range=None, name=None):
return get_location_activity(data=data,
location='bedroom1',
time_range=time_range,
name=name)
def get_body_temperature(data):
data = data[data.location == 'body_temperature'][['id', 'time', 'value']]
data.time = pd.to_datetime(data.time).dt.date
data = data.groupby(['id', 'time'])['value'].mean().reset_index()
data['week'] = compute_week_number(data.time)
data['location'] = 'body_temperature'
return data
def get_bathroom_delta(data, func, name):
def func_group_by(x):
x = func(input_df=x, single_location='bathroom1',
recall_value=feature_config['bathroom_urgent']['recall_value'])
return x
out = data.groupby(by=['id'])[['time', 'location']].apply(
func_group_by).reset_index()
out.columns = ['id', 'value']
out_rp = (pd.DataFrame(out.value.values.tolist())
.stack()
.reset_index(level=1)
.rename(columns={0: 'val', 'level_1': 'key'}))
out = out.drop('value', 1).join(out_rp).reset_index(drop=True).dropna()
out.columns = ['id', 'time', 'value']
out['week'] = compute_week_number(out.time)
out['location'] = name
return out
def get_bathroom_delta_v1(data, func, name):
data.time = pd.to_datetime(data.time).dt.date
results = {}
for p_id in data.id.unique():
p_data = func(data[data.id == p_id].copy(), single_location='bathroom1',
recall_value=feature_config['bathroom_urgent']['recall_value'])
if len(p_data) > 0:
results[p_id] = p_data
results = pd.DataFrame([(i, j, results[i][j].astype(float)) for i in results for j in results[i]],
columns=['id', 'time', 'value'])
results['week'] = compute_week_number(results.time)
results['location'] = name
return results
def get_weekly_activity_data(data):
data.time = pd.to_datetime(data.time).dt.date
data = data.groupby(['id', 'time', 'location'])['value'].sum().reset_index()
data['week'] = compute_week_number(data.time)
data = data[data.location.isin(config['activity']['sensors'])]
data = data.pivot_table(index=['id', 'week'], columns='location',
values='value').reset_index().replace(np.nan, 0)
return data
def get_outlier_freq(data, func, name):
data.time = | pd.to_datetime(data.time) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 21:31:56 2017
@author: Franz
"""
import scipy.signal
import numpy as np
import scipy.io as so
import os.path
import re
import matplotlib.pylab as plt
import h5py
import matplotlib.patches as patches
import numpy.random as rand
import seaborn as sns
import pandas as pd
from functools import reduce
import random
import pdb
class Mouse :
def __init__(self, idf, list=None, typ='') :
self.recordings = []
self.recordings.append(list)
self.typ = typ
self.idf = idf
def add(self, rec) :
self.recordings.append(rec)
def __len__(self) :
return len(self.recordings)
def __repr__(self) :
return ", ".join(self.recordings)
### PROCESSING OF RECORDING DATA ##############################################
def load_stateidx(ppath, name, ann_name=''):
""" load the sleep state file of recording (folder) $ppath/$name
@Return:
M,K sequence of sleep states, sequence of
0'1 and 1's indicating non- and annotated states
"""
ddir = os.path.join(ppath, name)
ppath, name = os.path.split(ddir)
if ann_name == '':
ann_name = name
sfile = os.path.join(ppath, name, 'remidx_' + ann_name + '.txt')
f = open(sfile, 'r')
lines = f.readlines()
f.close()
n = 0
for l in lines:
if re.match('\d', l):
n += 1
M = np.zeros(n, dtype='int')
K = np.zeros(n, dtype='int')
i = 0
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('\s*#', l) :
continue
if re.match('\d+\s+-?\d+', l) :
a = re.split('\s+', l)
M[i] = int(a[0])
K[i] = int(a[1])
i += 1
return M,K
def load_recordings(ppath, rec_file) :
"""
load_recordings(ppath, rec_file)
load recording listing with syntax:
[E|C] \s+ recording_name
#COMMENT
@RETURN:
(list of controls, lis of experiments)
"""
exp_list = []
ctr_list = []
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('^\s*#', l) :
continue
a = re.split('\s+', l)
if re.search('E', a[0]) :
exp_list.append(a[1])
if re.search('C', a[0]) :
ctr_list.append(a[1])
return ctr_list, exp_list
def load_dose_recordings(ppath, rec_file):
"""
load recording list with following syntax:
A line is either control or experiments; Control recordings look like:
C \s recording_name
Experimental recordings also come with an additional dose parameter
(allowing for comparison of multiple doses with controls)
E \s recording_name \s dose_1
E \s recording_name \s dose_2
"""
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
# first get all potential doses
doses = {}
ctr_list = []
for l in lines :
if re.search('^\s+$', l):
continue
if re.search('^\s*#', l):
continue
a = re.split('\s+', l)
if re.search('E', a[0]):
if a[2] in doses:
doses[a[2]].append(a[1])
else:
doses[a[2]] = [a[1]]
if re.search('C', a[0]):
ctr_list.append(a[1])
return ctr_list, doses
def get_snr(ppath, name):
"""
read and return sampling rate (SR) from file $ppath/$name/info.txt
"""
fid = open(os.path.join(ppath, name, 'info.txt'), newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + 'SR' + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return float(values[0])
def get_infoparam(ifile, field):
"""
NOTE: field is a single string
and the function does not check for the type
of the values for field.
In fact, it just returns the string following field
"""
fid = open(ifile, newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + field + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return values
def add_infoparam(ifile, field, vals):
"""
:param ifile: info file
:param field: Parameters specifier, e.g. 'SR'
:param vals: list with parameters
"""
fid = open(ifile, 'a')
vals = [str(s) for s in vals]
param = " ".join(vals)
fid.write('%s:\t%s' % (field, param))
fid.write(os.linesep)
fid.close()
def laser_start_end(laser, SR=1525.88, intval=5):
"""laser_start_end(ppath, name)
print start and end index of laser stimulation trains: For example,
if you was stimulated for 2min every 20 min with 20 Hz, return the
start and end index of the each 2min stimulation period (train)
returns the tuple (istart, iend), both indices are inclusive,
i.e. part of the sequence
@Param:
laser - laser, vector of 0s and 1s
intval - minimum time separation [s] between two laser trains
@Return:
(istart, iend) - tuple of two np.arrays with laser start and end indices
"""
idx = np.where(laser > 0.5)[0]
if len(idx) == 0 :
return ([], [])
idx2 = np.nonzero(np.diff(idx)*(1./SR) > intval)[0]
istart = np.hstack([idx[0], idx[idx2+1]])
iend = np.hstack([idx[idx2], idx[-1]])
return (istart, iend)
def load_laser(ppath, name):
"""
load laser from recording ppath/name
@RETURN:
@laser, vector of 0's and 1's
"""
# laser might be .mat or h5py file
# perhaps we could find a better way of testing that
file = os.path.join(ppath, name, 'laser_'+name+'.mat')
try:
laser = np.array(h5py.File(file,'r').get('laser'))
except:
laser = so.loadmat(file)['laser']
return np.squeeze(laser)
def laser_protocol(ppath, name):
"""
What was the stimulation frequency and the inter-stimulation interval for recording
$ppath/$name?
@Return:
iinter-stimulation intervals, avg. inter-stimulation interval, frequency
"""
laser = load_laser(ppath, name)
SR = get_snr(ppath, name)
# first get inter-stimulation interval
(istart, iend) = laser_start_end(laser, SR)
intv = np.diff(np.array(istart/float(SR)))
d = intv/60.0
print("The laser was turned on in average every %.2f min," % (np.mean(d)))
print("with a min. interval of %.2f min and max. interval of %.2f min." % (np.min(d), np.max(d)))
print("Laser stimulation lasted for %f s." % (np.mean(np.array(iend/float(SR)-istart/float(SR)).mean())))
# print laser start times
print("Start time of each laser trial:")
j=1
for t in istart:
print("trial %d: %.2f" % (j, (t / float(SR)) / 60))
j += 1
# for each laser stimulation interval, check laser stimulation frequency
dt = 1/float(SR)
freq = []
laser_up = []
laser_down = []
for (i,j) in zip(istart, iend):
part = laser[i:j+1]
(a,b) = laser_start_end(part, SR, 0.005)
dur = (j-i+1)*dt
freq.append(len(a) / dur)
up_dur = (b-a+1)*dt*1000
down_dur = (a[1:]-b[0:-1]-1)*dt*1000
laser_up.append(np.mean(up_dur))
laser_down.append(np.mean(down_dur))
print(os.linesep + "Laser stimulation freq. was %.2f Hz," % np.mean(np.array(freq)))
print("with laser up and down duration of %.2f and %.2f ms." % (np.mean(np.array(laser_up)), np.mean(np.array(laser_down))))
return d, np.mean(d), np.mean(np.array(freq))
def swap_eeg(ppath, rec, ch='EEG'):
"""
swap EEG and EEG2 or EMG with EMG2 if $ch='EMG'
"""
if ch == 'EEG':
name = 'EEG'
else:
name = ch
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'))[name]
EEG2 = so.loadmat(os.path.join(ppath, rec, name+'2.mat'))[name + '2']
tmp = EEG
EEG = EEG2
EEG2 = tmp
file_eeg1 = os.path.join(ppath, rec, '%s.mat' % name)
file_eeg2 = os.path.join(ppath, rec, '%s2.mat' % name)
so.savemat(file_eeg1, {name : EEG})
so.savemat(file_eeg2, {name+'2' : EEG2})
def eeg_conversion(ppath, rec, conv_factor=0.195):
"""
multiply all EEG and EMG channels with the given
conversion factor and write the conversion factor
as parameter (conversion:) into the info file.
Only if there's no conversion factor in the info file
specified, the conversion will be executed
:param ppath: base filder
:param rec: recording
:param conv_factor: conversion factor
:return: n/s
"""
ifile = os.path.join(ppath, rec, 'info.txt')
conv = get_infoparam(ifile, 'conversion')
if len(conv) > 0:
print("found conversion: parameter in info file")
print("returning: no conversion necessary!!!")
return
else:
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EEG', f)]
for f in files:
name = re.split('\.', f)[0]
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EEG[0].dtype == 'int16':
EEG = EEG * conv_factor
file_eeg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_eeg)
so.savemat(file_eeg, {name: EEG})
else:
print('Wrong datatype! probably already converted; returning...')
return
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EMG', f)]
for f in files:
name = re.split('\.', f)[0]
EMG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EMG[0].dtype == 'int16':
EMG = EMG * conv_factor
file_emg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_emg)
so.savemat(file_emg, {name: EMG})
else:
print('Wrong datatype! probably already converted; returning...')
return
add_infoparam(ifile, 'conversion', [conv_factor])
calculate_spectrum(ppath, rec)
### DEPRICATED ############################################
def video_pulse_detection(ppath, rec, SR=1000, iv = 0.01):
"""
return index of each video frame onset
ppath/rec - recording
@Optional
SR - sampling rate of EEG(!) recording
iv - minimum time inverval (in seconds) between two frames
@Return
index of each video frame onset
"""
V = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'videotime_' + rec + '.mat'))['video'])
TS = np.arange(0, len(V))
# indices where there's a jump in the signal
t = TS[np.where(V<0.5)];
if len(t) == 0:
idx = []
return idx
# time points where the interval between jumps is longer than iv
t2 = np.where(np.diff(t)*(1.0/SR)>=iv)[0]
idx = np.concatenate(([t[0]],t[t2+1]))
return idx
# SIGNAL PROCESSING ###########################################################
def my_lpfilter(x, w0, N=4):
"""
create a lowpass Butterworth filter with a cutoff of w0 * the Nyquist rate.
The nice thing about this filter is that is has zero-phase distortion.
A conventional lowpass filter would introduce a phase lag.
w0 - filter cutoff; value between 0 and 1, where 1 corresponds to nyquist frequency.
So if you want a filter with cutoff at x Hz, the corresponding w0 value is given by
w0 = 2 * x / sampling_rate
N - order of filter
@Return:
low-pass filtered signal
See also my hp_filter, or my_bpfilter
"""
from scipy import signal
b,a = signal.butter(N, w0)
y = signal.filtfilt(b,a, x)
return y
def my_hpfilter(x, w0, N=4):
"""
create an N-th order highpass Butterworth filter with cutoff frequency w0 * sampling_rate/2
"""
from scipy import signal
# use scipy.signal.firwin to generate filter
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
b,a = signal.butter(N, w0, 'high')
y = signal.filtfilt(b,a, x, padlen = x.shape[0]-1)
return y
def my_bpfilter(x, w0, w1, N=4,bf=True):
"""
create N-th order bandpass Butterworth filter with corner frequencies
w0*sampling_rate/2 and w1*sampling_rate/2
"""
#from scipy import signal
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
#return y
from scipy import signal
b,a = signal.butter(N, [w0, w1], 'bandpass')
if bf:
y = signal.filtfilt(b,a, x)
else:
y = signal.lfilter(b,a, x)
return y
def my_notchfilter(x, sr=1000, band=5, freq=60, ripple=10, order=3, filter_type='butter'):
from scipy.signal import iirfilter,lfilter
fs = sr
nyq = fs/2.0
low = freq - band/2.0
high = freq + band/2.0
low = low/nyq
high = high/nyq
b, a = iirfilter(order, [low, high], rp=ripple, btype='bandstop',
analog=False, ftype=filter_type)
filtered_data = lfilter(b, a, x)
return filtered_data
def downsample_vec(x, nbin):
"""
y = downsample_vec(x, nbin)
downsample the vector x by replacing nbin consecutive \
bin by their mean \
@RETURN: the downsampled vector
"""
n_down = int(np.floor(len(x) / nbin))
x = x[0:n_down*nbin]
x_down = np.zeros((n_down,))
# 0 1 2 | 3 4 5 | 6 7 8
for i in range(nbin) :
idx = list(range(i, int(n_down*nbin), int(nbin)))
x_down += x[idx]
return x_down / nbin
def smooth_data(x, sig):
"""
y = smooth_data(x, sig)
smooth data vector @x with gaussian kernel
with standard deviation $sig
"""
sig = float(sig)
if sig == 0.0:
return x
# gaussian:
gauss = lambda x, sig : (1/(sig*np.sqrt(2.*np.pi)))*np.exp(-(x*x)/(2.*sig*sig))
bound = 1.0/10000
L = 10.
p = gauss(L, sig)
while (p > bound):
L = L+10
p = gauss(L, sig)
#F = map(lambda x: gauss((x, sig)), np.arange(-L, L+1.))
# py3:
F = [gauss(x, sig) for x in np.arange(-L, L+1.)]
F = F / np.sum(F)
return scipy.signal.fftconvolve(x, F, 'same')
def power_spectrum(data, length, dt):
"""
scipy's implementation of Welch's method using hanning window to estimate
the power spectrum
The function returns power density with units V**2/Hz
see also https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.welch.html
The label on the y-axis should say PSD [V**2/Hz]
@Parameters
data - time series; float vector!
length - length of hanning window, even integer!
@Return:
power density, frequencies
The function returns power density in units V^2 / Hz
Note that
np.var(data) ~ np.sum(power density) * (frequencies[1]-frequencies[0])
"""
f, pxx = scipy.signal.welch(data, fs=1.0/dt, window='hanning', nperseg=int(length), noverlap=int(length/2))
return pxx, f
def spectral_density(data, length, nfft, dt):
"""
calculate the spectrogram for the time series given by data with time resolution dt
The powerspectrum for each window of length $length is computed using
Welch's method.
The windows for the powerspectrum calculation are half-overlapping. If length contains 5s of data,
then the first windows goes from 0s to 5s, the second window from 2.5 to 7.5s, ...
The last window ends at ceil(len(data)/length)*5s
Another example, assume we have 13 s of data, with 5 s windows, the the powerdensity is calculated for the following
time windows:
0 -- 5, 2.5 -- 7.5, 5 -- 10, 7.5 -- 12.5, 10 -- 15
In total there are thus 2*ceil(13/5)-1 = 5 windows
The last window starts at 2*3-2 * (5/2) = 10 s
Note: the returned time axis starts at time point goes from 0 to 10s in 2.5s steps
@Parameters:
data - time series
length - window length of data used to calculate powerspectrum.
Note that the time resolution of the spectrogram is length/2
nfft - size of the window used to calculate the powerspectrum.
determines the frequency resolution.
@Return:
Powspectrum, frequencies, time axis
"""
n = len(data)
k = int(np.ceil((1.0*n)/length))
data = np.concatenate((data, np.zeros((length*k-n,))))
fdt = length*dt/2 # time step for spectrogram
t = np.arange(0, fdt*(2*k-2)+fdt/2.0, fdt)
# frequency axis of spectrogram
f = np.linspace(0, 1, int(np.ceil(nfft/2.0))+1) * (0.5/dt)
# the power spectrum is calculated for 2*k-1 time points
Pow = np.zeros((len(f), k*2-1))
j = 0
for i in range(0, k-2+1):
w1=data[(length*i):(i+1)*length]
w2=data[length*i+int(length/2):(i+1)*length+int(length/2)]
Pow[:,j] = power_spectrum(w1, nfft, dt)[0]
Pow[:,j+1] = power_spectrum(w2, nfft, dt)[0]
j += 2
# last time point
Pow[:,j],f = power_spectrum(data[length*(k-1):k*length], nfft, dt)
return Pow, f, t
def calculate_spectrum(ppath, name, fres=0.5):
"""
calculate EEG and EMG spectrogram used for sleep stage detection.
Function assumes that data vectors EEG.mat and EMG.mat exist in recording
folder ppath/name; these are used to calculate the powerspectrum
fres - resolution of frequency axis
all data saved in "true" mat files
:return EEG Spectrogram, EMG Spectrogram, frequency axis, time axis
"""
SR = get_snr(ppath, name)
swin = round(SR)*5
fft_win = round(swin/5) # approximate number of data points per second
if (fres == 1.0) or (fres == 1):
fft_win = int(fft_win)
elif fres == 0.5:
fft_win = 2*int(fft_win)
else:
print("Resolution %f not allowed; please use either 1 or 0.5" % fres)
(peeg2, pemg2) = (False, False)
# Calculate EEG spectrogram
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
Pxx, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EEG2.mat')):
peeg2 = True
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG2.mat'))['EEG2'])
Pxx2, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
#save the stuff to a .mat file
spfile = os.path.join(ppath, name, 'sp_' + name + '.mat')
if peeg2 == True:
so.savemat(spfile, {'SP':Pxx, 'SP2':Pxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'SP':Pxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
# Calculate EMG spectrogram
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
Qxx, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EMG2.mat')):
pemg2 = True
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG2.mat'))['EMG2'])
Qxx2, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
# save the stuff to .mat file
spfile = os.path.join(ppath, name, 'msp_' + name + '.mat')
if pemg2 == True:
so.savemat(spfile, {'mSP':Qxx, 'mSP2':Qxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'mSP':Qxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
return Pxx, Qxx, f, t
def whiten_spectrogram(ppath, name, fmax=50):
"""
experimental
:param ppath:
:param name:
:param fmax:
:return:
"""
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
ifreq = np.where(freq <= fmax)[0]
SPE = SPE[ifreq,:]
nfilt = 5
filt = np.ones((nfilt, nfilt))
filt = np.divide(filt, filt.sum())
#SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
m = np.mean(SPE,axis=1)
SPE -= np.tile(m, (SPE.shape[1], 1)).T
SPE = SPE.T
C = np.dot(SPE.T, SPE)
[evals, L] = np.linalg.eigh(C)
idx = np.argsort(evals)
D = np.diag(np.sqrt(evals[idx]))
L = L[:,idx]
W = np.dot(L, np.dot(np.linalg.inv(D),np.dot(L.T,SPE.T)))
nfilt = 2
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
W = scipy.signal.convolve2d(W, filt, boundary='symm', mode='same')
return W, D, L
def normalize_spectrogram(ppath, name, fmax=0, band=[], vm=5, pplot=True, sptype='', filt_dim=[]):
"""
Normalize EEG spectrogram by deviding each frequency band by its average value.
:param ppath, name: base folder, recording name
:param fmax: maximum frequency; frequency axis of spectrogram goes from 0 to fmax
if fmax=0, use complete frequency axis
:param band: list or tuple, define lower and upper range of a frequency band,
if pplot=True, plot band, along with spectrogram;
if band=[], disregard
:param vm: color range for plotting spectrogram
:pplot: if True, plot spectrogram along with power band
:sptype: if sptype='fine' plot 'special' spectrogram, save under sp_fine_$name.mat;
otherwise plot 'normal' spectrogram sp_$name.mat
:filt_dim: list or tuple; the two values define the dimensions of box filter
used to filter the normalized spectrogram; if filt_dim=[], then no filtering
:return SPE, t, freq: normalized spectrogram (np.array), time axis, frequency axis
"""
if (len(sptype) == 0) or (sptype=='std'):
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
elif sptype == 'fine':
P = so.loadmat(os.path.join(ppath, name, 'sp_fine_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
t = P['t']
if fmax > 0:
ifreq = np.where(freq <= fmax)[0]
else:
ifreq = np.arange(0, len(freq))
freq = freq[ifreq]
nfilt = 4
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
SPE = SPE[ifreq,:]
# before
#SPE = SPE[ifreq]
#W = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
#sp_mean = W.mean(axis=1)
sp_mean = SPE.mean(axis=1)
SPE = np.divide(SPE, np.tile(sp_mean, (SPE.shape[1], 1)).T)
if len(filt_dim) > 0:
filt = np.ones(filt_dim)
filt = np.divide(filt, filt.sum())
SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
# get high gamma peaks
if len(band) > 0:
iband = np.where((freq >= band[0]) & (freq <= band[-1]))[0]
pow_band = SPE[iband,:].mean(axis=0)
thr = pow_band.mean() + pow_band.std()
idx = np.where(pow_band > thr)[0]
# plot normalized spectrogram, along with band
if pplot:
plt.ion()
plt.figure()
if len(band) > 0:
med = np.median(SPE.mean(axis=0))
ax1 = plt.subplot(211)
plt.pcolormesh(t, freq, SPE, vmin=0, vmax=vm*med, cmap='jet')
plt.subplot(212, sharex=ax1)
plt.plot(t,SPE[iband,:].mean(axis=0))
plt.plot(t[idx], pow_band[idx], '.')
plt.draw()
return SPE, t, freq[ifreq]
def recursive_spectrogram(ppath, name, sf=0.3, alpha=0.3, pplot=True):
"""
calculate EEG/EMG spectrogram in a way that can be implemented by a closed-loop system.
The spectrogram is temporally filtered using a recursive implementation of a lowpass filter
@Parameters:
ppath/name - mouse EEG recording
sf - smoothing factor along frequency axis
alpha - temporal lowpass filter time constant
pplot - if pplot==True, plot figure
@Return:
SE, SM - EEG, EMG spectrogram
"""
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
len_eeg = len(EEG)
fdt = 2.5
SR = get_snr(ppath, name)
# we calculate the powerspectrum for 5s windows
swin = int(np.round(SR) * 5.0)
# but we sample new data each 2.5 s
swinh = int(swin/2.0)
fft_win = int(swin / 5.0)
# number of 2.5s long samples
spoints = int(np.floor(len_eeg / swinh))
SE = np.zeros((int(fft_win/2+1), spoints))
SM = np.zeros((int(fft_win/2+1), spoints))
print("Starting calculating spectrogram for %s..." % name)
for i in range(2, spoints):
# we take the last two swinh windows (the new 2.5 s long sample and the one from
# the last iteration)
x = EEG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
# recursive low pass filtering of spectrogram:
# the current state is an estimate of the current sample and the previous state
SE[:,i] = alpha*p + (1-alpha) * SE[:,i-1]
# and the same of EMG
x = EMG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
SM[:,i] = alpha*p + (1-alpha) * SM[:,i-1]
if pplot:
# plot EEG spectrogram
t = np.arange(0, SM.shape[1])*fdt
plt.figure()
ax1 = plt.subplot(211)
im = np.where((f>=0) & (f<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.xticks(())
ix = list(range(0, 30, 10))
fi = f[im][::-1]
plt.yticks(ix, list(map(int, fi[ix])))
box_off(ax1)
plt.axis('tight')
plt.ylabel('Freq (Hz)')
# plot EMG amplitude
ax2 = plt.subplot(212)
im = np.where((f>=10) & (f<100))[0]
df = np.mean(np.diff(f))
# amplitude is the square root of the integral
ax2.plot(t, np.sqrt(SM[im,:].sum(axis=0)*df)/1000.0)
plt.xlim((0, t[-1]))
plt.ylabel('EMG Ampl (mV)')
plt.xlabel('Time (s)')
box_off(ax2)
plt.show(block=False)
return SE, SM, f
def recursive_sleepstate_rem(ppath, recordings, sf=0.3, alpha=0.3, past_mu=0.2, std_thdelta = 1.5, past_len=120, sdt=2.5, psave=False, xemg=False):
"""
predict a REM period only based on EEG/EMG history; the same algorithm is also used for
closed-loop REM sleep manipulation.
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
sf smoothing factor for each powerspectrum
alpha smoothing factor along time dimension
past_mu percentage (0 .. 1) of brain states that are allowed to have EMG power larger than threshold
during the last $past_len seconds
past_len window to calculate $past_mu
std_thdelta the hard theta/delta threshold is given by, mean(theta/delta) + $std_thdelta * std(theta/delta)
sdt time bin for brain sttate, typically 2.5s
psave if True, save threshold parameters to file.
"""
idf = re.split('_', recordings[0])[0]
# 02/05/2020 changed from int to float:
past_len = float(np.round(past_len/sdt))
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_delta = pow_delta.mean()
thr_mu = pow_mu.mean() + 0.5*np.nanstd(pow_mu)
### The actual algorithm for REM detection
rem_idx = np.zeros((ntbins,))
prem = 0 # whether or not we are in REM
for i in range(ntbins):
if prem == 0 and pow_delta[i] < thr_delta and pow_mu[i] < thr_mu:
### could be REM
if th_delta[i] > thr_th_delta1:
### we are potentially entering REM
if (i - past_len) >= 0:
sstart = int(i-past_len)
else:
sstart = 0
# count the percentage of brainstate bins with elevated EMG power
c_mu = np.sum( np.where(pow_mu[sstart:i]>thr_mu)[0] ) / (past_len*1.0)
if c_mu < past_mu:
### we are in REM
prem = 1 # turn laser on
rem_idx[i] = 1
# We are currently in REM; do we stay there?
if prem == 1:
### REM continues, if theta/delta is larger than soft threshold and if there's
### no EMG activation
if (th_delta[i] > thr_th_delta2) and (pow_mu[i] < thr_mu):
rem_idx[i] = 1
else:
prem = 0 #turn laser off
# for loop ends
# Determine which channel is EEG, EMG
ch_alloc = get_infoparam(os.path.join(ppath, recordings[0], 'info.txt'), 'ch_alloc')[0]
# plot the whole stuff:
# (1) spectrogram
# (2) EMG Power
# (3) Delta
# (4) TH_Delta
plt.figure()
t = np.arange(0, sdt*(ntbins-1)+sdt/2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq>=0) & (freq<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),))*thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),))*thr_delta, color='red')
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),))*thr_th_delta1, color='red')
ax4.plot(t, np.ones((len(t),))*thr_th_delta2, color='pink')
ax4.plot(t, rem_idx*thr_th_delta1, color='blue')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.show(block=False)
# write config file
if psave:
cfile = os.path.join(ppath, idf + '_rem.txt')
fid = open(cfile, 'w')
fid.write(('IDF: %s'+os.linesep) % idf)
fid.write(('ch_alloc: %s'+os.linesep) % ch_alloc)
fid.write(('THR_DELTA: %.2f'+os.linesep) % thr_delta)
fid.write(('THR_MU: %.2f'+os.linesep) % thr_mu)
fid.write(('THR_TH_DELTA: %.2f %.2f'+os.linesep) % (thr_th_delta1, thr_th_delta2))
fid.write(('STD_THDELTA: %.2f'+os.linesep) % std_thdelta)
fid.write(('PAST_MU: %.2f'+os.linesep) % past_mu)
fid.write(('SF: %.2f'+os.linesep) % sf)
fid.write(('ALPHA: %.2f'+os.linesep) % alpha)
fid.write(('Bern: %.2f' + os.linesep) % 0.5)
if xemg:
fid.write(('XEMG: %d'+os.linesep) % 1)
else:
fid.write(('XEMG: %d' + os.linesep) % 0)
fid.close()
print('wrote file %s' % cfile)
def recursive_sleepstate_rem_control(ppath, recordings, past_len=120, sdt=2.5, delay=120):
"""
algorithm running laser control for REM sleep dependent activation/inhibtion.
$delay s after a detected REM sleep period, the laser is turned on for the same duration. If a new REM period starts,
the laser stops, but we keep track of the missing time. The next time is laser turns on again,
it stays on for the duration of the most recent REM period + the remaining time.
The algorithm for REM detection is the same as used forclosed-loop REM sleep manipulation.
The function reads in the required parameters from the configuration file (MOUSEID_rem.txt)
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
past_len window to calculate $past_mu
sdt time bin for brain sttate, typically 2.5s
delay delay to wait after a REM sleep periods ends, till the laser is turned on.
"""
idf = re.split('_', recordings[0])[0]
past_len = int(np.round(past_len/sdt))
# load parameters
cfile = os.path.join(ppath, idf + '_rem.txt')
params = load_sleep_params(ppath, cfile)
thr_th_delta1 = params['THR_TH_DELTA'][0]
thr_th_delta2 = params['THR_TH_DELTA'][1]
thr_delta = params['THR_DELTA'][0]
thr_mu = params['THR_MU'][0]
alpha = params['ALPHA'][0]
sf = params['SF'][0]
past_mu = params['PAST_MU'][0]
xemg = params['XEMG'][0]
# calculate spectrogram
(SE, SM) = ([], [])
for rec in recordings:
A, B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x, y: np.concatenate((x, y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x, y: np.concatenate((x, y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5, 12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta, :], axis=0)
pow_theta = np.sum(SE[i_theta, :], axis=0)
pow_mu = np.sum(SM[i_mu, :], axis=0)
th_delta = np.divide(pow_theta, pow_delta)
### The actual algorithm for REM detection
rem_idx = np.zeros((ntbins,))
prem = 0 # whether or not we are in REM
# NEW variables:
laser_idx = np.zeros((ntbins,))
delay = int(np.round(delay/sdt))
delay_count = 0
curr_rem_dur = 0
dur_count = 0
on_delay = False
laser_on = False
for i in range(ntbins):
if prem == 0 and pow_delta[i] < thr_delta and pow_mu[i] < thr_mu:
### could be REM
if th_delta[i] > thr_th_delta1:
### we are potentially entering REM
if (i - past_len) >= 0:
sstart = i - past_len
else:
sstart = 0
# count the percentage of brainstate bins with elevated EMG power
c_mu = np.sum(np.where(pow_mu[sstart:i] > thr_mu)[0]) / past_len
if c_mu < past_mu:
### we are in REM
prem = 1 # turn laser on
rem_idx[i] = 1
curr_rem_dur += 1 #NEW
# We are currently in REM; do we stay there?
if prem == 1:
### REM continues, if theta/delta is larger than soft threshold and if there's
### no EMG activation
if (th_delta[i] > thr_th_delta2) and (pow_mu[i] < thr_mu):
rem_idx[i] = 1
curr_rem_dur += 1
else:
prem = 0 # turn laser off
dur_count += curr_rem_dur #NEW
delay_count = delay #NEW
curr_rem_dur = 0 #NEW
on_delay = True #NEW
# NEW:
if on_delay:
if prem == 0:
delay_count -=1
if delay_count == 0:
laser_on = True
on_delay = False
if laser_on:
if prem == 0:
if dur_count >= 0:
dur_count -= 1
laser_idx[i] = 1
else:
laser_on = False
else:
laser_on = False
# plot the whole stuff:
# (1) spectrogram
# (2) EMG Power
# (3) Delta
# (4) TH_Delta
plt.figure()
t = np.arange(0, sdt*(ntbins-1)+sdt/2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq>=0) & (freq<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),))*thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),))*thr_delta, color='red')
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),))*thr_th_delta1, color='red')
ax4.plot(t, np.ones((len(t),))*thr_th_delta2, color='pink')
ax4.plot(t, rem_idx*thr_th_delta1, color='green', label='REM')
ax4.plot(t, laser_idx * thr_th_delta1, color='blue', label='Laser')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.legend()
plt.show(block=False)
def load_sleep_params(path, param_file):
"""
load parameter file generated by &recursive_sleepstate_rem || &recursive_sleepstate_nrem
@Return:
Dictionary: Parameter --> Value
"""
fid = open(os.path.join(path, param_file), 'r')
lines = fid.readlines()
params = {}
for line in lines:
if re.match('^[\S_]+:', line):
a = re.split('\s+', line)
key = a[0][:-1]
params[key] = a[1:-1]
# transform number strings to floats
for k in params:
vals = params[k]
new_vals = []
for v in vals:
if re.match('^[\d\.]+$', v):
new_vals.append(float(v))
else:
new_vals.append(v)
params[k] = new_vals
return params
def recursive_sleepstate_nrem(ppath, recordings, sf=0.3, alpha=0.3, std_thdelta = 1.5, sdt=2.5, psave=False, xemg=False):
"""
predict NREMs period only based on EEG/EMG history; the same algorithm is also used for
closed-loop NREM sleep manipulation.
The algorithm uses for NREM sleep detection thresholds for delta power, EMG power, and theta/delta power.
For delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a NREM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
The values for hard and soft threshold are fitted using a Gaussian mixture model
:param ppath: base folder
:param recordings: list of recordings
:param sf: smoothing factor for each powerspectrum
:param alpha: spatial smoothing factor
:param std_thdelta: factor to set threshold for theta/delta
:param sdt: time step of brain state classification, typically 2.5 s
:param psave: save parameters to text file?
:param xemg: use EEG instead of EMG?
"""
# to fit Gaussian mixture model to delta power distributino
from sklearn import mixture
idf = re.split('_', recordings[0])[0]
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_mu = pow_mu.mean() + 0.5*np.nanstd(pow_mu)
med_delta = np.median(pow_delta)
pow_delta_fit = pow_delta[np.where(pow_delta<=3*med_delta)]
# fit Gaussian mixture model to delta power
# see http://www.astroml.org/book_figures/chapter4/fig_GMM_1D.html
gm = mixture.GaussianMixture(n_components=2)
fit = gm.fit(pow_delta_fit.reshape(-1, 1))
means = np.squeeze(fit.means_)
x = np.arange(0, med_delta*3, 100)
plt.figure()
plt.hist(pow_delta_fit, 100, normed=True, histtype='stepfilled', alpha=0.4)
logprob = fit.score_samples(x.reshape(-1,1))
responsibilities = fit.predict_proba(x.reshape((-1,1)))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
plt.plot(x, pdf, '-k')
plt.plot(x, pdf_individual, '--k')
plt.xlim((0, med_delta*3))
plt.ylabel('p(x)')
plt.xlabel('x = Delta power')
# get point where curves cut each other
if means[0] < means[1]:
idx = np.where((x>= means[0]) & (x<= means[1]))[0]
else:
idx = np.where((x >= means[1]) & (x <= means[0]))[0]
imin = np.argmin(pdf[idx])
xcut = x[idx[0]+imin]
plt.plot(xcut, pdf[idx[0]+imin], 'ro')
ilow = np.argmin(np.abs(x-means[0]))
plt.plot(x[ilow], pdf[ilow], 'bo')
ihigh = np.argmin(np.abs(x-means[1]))
plt.plot(x[ihigh], pdf[ihigh], 'go')
plt.show(block=False)
# set parameters for hard and soft delta thresholds
tmp = np.array([x[ihigh], xcut, x[ilow]])
tmp.sort()
thr_delta1 = tmp[-1] # x[ihigh]; right peak of distribution
thr_delta2 = tmp[1] # trough of distribution
# NREM yes or no according to thresholds
# However, this variable does not directly control whether laser should
# be on or off; whether NREM sleep is really on or off is determined
# by nrem_idx; if pnrem_hidden == 1, then all threshold critera, but not
# sleep history criteria are fulfilled
pnrem_hidden = 0
# if nrem_idx[i] == 1, time point i is NREM
nrem_idx = np.zeros((ntbins,), dtype='int8')
# NREM stays on after thresholds are NOT fulfilled to avoid interruptions by microarousals
grace_period = int(20 / sdt)
# nrem_delay: NREM only starts with some delay
nrem_delay = int(10 / sdt)
grace_count = grace_period
delay_count = nrem_delay
for i in range(ntbins):
if pnrem_hidden == 0:
### Entering NREM:
# Delta power laser than high threshold
if pow_delta[i] > thr_delta1 and pow_mu[i] < thr_mu and th_delta[i] < thr_th_delta1:
### NOT-NREM -> NREM
pnrem_hidden = 1
nrem_idx[i] = 0
delay_count -= 1
# we are fully in NREM, that's why grace_count is reset:
grace_count = grace_period
else:
### NOT-NREM -> NOT-NREM
if grace_count > 0:
grace_count -= 1
nrem_idx[i] = 1
else:
nrem_idx[i] = 0
else:
### pnrem_hidden == 1
if pow_delta[i] > thr_delta2 and pow_mu[i] < thr_mu and th_delta[i] < thr_th_delta1:
if delay_count > 0:
delay_count -= 1
nrem_idx[i] = 0
else :
nrem_idx[i] = 1
else:
### Exit NREM -> NOT-NREM
# were are fully out of NREM, so delay_count can be reset:
delay_count = nrem_delay
pnrem_hidden = 0
if grace_count > 0:
grace_count -= 1
nrem_idx[i] = 1
#### figure ##############################################
plt.figure()
t = np.arange(0, sdt * (ntbins - 1) + sdt / 2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq >= 0) & (freq <= 30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im, :]), vmin=0, vmax=med * 2, cmap='jet')
ax1.pcolorfast(t, freq[im], np.flipud(SE[im, :]), vmin=0, vmax=med * 2, cmap='jet')
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412, sharex=ax1)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),)) * thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),)) * thr_delta1, color='red')
ax3.plot(t, np.ones((len(t),)) * thr_delta2, color=[1, 0.6, 0.6])
ax3.plot(t, nrem_idx * thr_delta1, color=[0.6, 0.6, 0.6])
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),)) * thr_th_delta1, color='red')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.show(block=False)
# Determine which channel is EEG, EMG
ch_alloc = get_infoparam(os.path.join(ppath, recordings[0], 'info.txt'), 'ch_alloc')[0]
# write config file
if psave:
cfile = os.path.join(ppath, idf + '_nrem.txt')
fid = open(cfile, 'w')
fid.write(('IDF: %s' + os.linesep) % idf)
fid.write(('ch_alloc: %s' + os.linesep) % ch_alloc)
fid.write(('THR_DELTA: %.2f %.2f' + os.linesep) % (thr_delta1, thr_delta2))
fid.write(('THR_MU: %.2f' + os.linesep) % thr_mu)
fid.write(('THR_TH_DELTA: %.2f %.2f' + os.linesep) % (thr_th_delta1, thr_th_delta2))
fid.write(('STD_THDELTA: %.2f' + os.linesep) % std_thdelta)
fid.write(('SF: %.2f' + os.linesep) % sf)
fid.write(('ALPHA: %.2f' + os.linesep) % alpha)
if xemg:
fid.write(('XEMG: %d' + os.linesep) % 1)
else:
fid.write(('XEMG: %d' + os.linesep) % 0)
fid.close()
print('wrote file %s' % cfile)
def rem_online_analysis(ppath, recordings, backup='', single_mode=False, fig_file='', overlap=0):
"""
analyze results from closed-loop experiments
:param ppath: base folder
:param recordings: list of strings, recordinds
:param backup: string, potential second backup folder with recordings
:param single_mode: boolean, if True, average across all REM periods (irrespective of mouse)
and plot each single REM period as dot
:param overlap: float between 0 and 100; specifices percentage by which the online detected REM period has to
overlap with real (annotated) REM period to be further consided for analysis;
if overlap == 0, then any overlap counts, i.e. this parameter has no influence
:return: df, pd.DataFrame, with control and experimental REM durations as data columns
"""
if type(recordings) != list:
recordings = [recordings]
overlap = overlap / 100.0
paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
paths[rec] = ppath
else:
paths[rec] = backup
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
if len(mice) == 1:
single_mode=True
dur_exp = {m:[] for m in mice}
dur_ctr = {m:[] for m in mice}
for rec in recordings:
idf = re.split('_', rec)[0]
M,S = load_stateidx(paths[rec], rec)
sr = get_snr(paths[rec], rec)
nbin = int(np.round(sr)*2.5)
dt = (1.0/sr)*nbin
laser = load_laser(paths[rec], rec)
rem_trig = so.loadmat(os.path.join(paths[rec], rec, 'rem_trig_%s.mat'%rec), squeeze_me=True)['rem_trig']
laser = downsample_vec(laser, nbin)
laser[np.where(laser>0)] = 1
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
laser_idx = np.where(laser==1)[0]
rem_idx = np.where(rem_trig==1)[0]
# REM sequences from offline analysis (assumed to be the
# "ground truth"
seq = get_sequences(np.where(M==1)[0])
for s in seq:
# check true REM sequences overlapping with online detected sequences
isect = np.intersect1d(s, rem_idx)
#print(len(isect)/ len(s))
# test if real REM period s overlaps with online detected REM periods and,
# if yes, make sure that the overlap is at least overlap *100 percent
if len(np.intersect1d(s, rem_idx)) > 0 and float(len(isect)) / len(s) >= overlap:
drn = (s[-1]-s[0]+1)*dt
# does the sequence overlap with laser?
if len(np.intersect1d(isect, laser_idx))>0:
dur_exp[idf].append(drn)
else:
dur_ctr[idf].append(drn)
data = {'exp':[], 'ctr':[]}
# if single_mode put all REM periods together,
# otherwise average across REM periods for each mouse
if len(mice) == 1 or single_mode==True:
for m in mice:
data['exp'] += dur_exp[m]
data['ctr'] += dur_ctr[m]
else:
for idf in dur_ctr:
dur_ctr[idf] = np.array(dur_ctr[idf]).mean()
dur_exp[idf] = np.array(dur_exp[idf]).mean()
data['exp'] = np.array(list(dur_exp.values()))
data['ctr'] = np.array(list(dur_ctr.values()))
df = pd.DataFrame({'ctr':pd.Series(data['ctr']), 'exp' : pd.Series(data['exp'])})
# plot everything
if not single_mode:
plt.ion()
plt.figure()
ax = plt.axes([0.2, 0.15, 0.3, 0.7])
df_mean = df.mean()
plt.bar([1], [df_mean['ctr']], color='grey', label='W/o Laser')
plt.bar([2], [df_mean['exp']], color='blue', label='With laser')
plt.xticks([1,2])
box_off(ax)
#ax.set_xticklabels(['ctr', 'exp'], rotation=30)
plt.ylabel('REM duration (s)')
for (a,b) in zip(df['ctr'], df['exp']):
plt.plot([1,2], [a,b], color='black')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=1, frameon=False)
else:
plt.figure()
ax = plt.axes([0.2, 0.15, 0.3, 0.7])
df_mean = df.mean()
plt.bar([1], [df_mean['ctr']], color='grey')
plt.bar([2], [df_mean['exp']], color='blue')
plt.xticks([1,2])
box_off(ax)
#ax.set_xticklabels(['ctr', 'exp'], rotation=30)
plt.ylabel('REM duration (s)')
a = df['ctr']
b = df['exp']
plt.plot(np.ones((len(a),)), a, '.', color='black', label='W/o Laser')
plt.plot(2*np.ones((len(b),)), b, '.', color='black', label='With laser')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=1, frameon=False)
plt.show()
if len(fig_file) > 0:
save_figure(fig_file)
return df
def online_homeostasis(ppath, recordings, backup='', mode=0, single_mode=False, pplot=True, overlap=0, ma_thr=0):
"""
Further analysis of data obtained from closed loop stimulation
Assume the sleep structure looks like this
R R R R W W N N N N N W W N N N N R R R R R
REM_pre -- inter REM ---- REM_post
REM_pre is the duration of the first REM period, inter-REM is everything between REM_pre and the
next REM period REM_post.
The function calculates the inter REM duration after REM periods with laser and after REM periods w/o laser
:param ppath: base folder
:param recordings: list of recording, or file listing
:param backup: backup folder for $ppath
:param mode: mode == 0, calculate complete inter REM duration
mode == 2, only calculate duration of wake in inter REM periods
mode == 3, only calculate duration of NREM in inter REM periods
:param single_mode: consider each single recording, instead of mice
:param overlap: percentage (number between 0 and 100). Defines the percentage
how much a true (offline annotated) REM period should overlap with laser
to be considered as REM sleep with laser.
Of note, REM periods w/o laser have to have 0 overlap with laser.
All remaining REM periods are discarded.
:param pplot: if True, plot figure; errorbars show 95% confidence intervals,
calculated using bootstrapping
:param ma:
:return: df, if single_mode == True $df is a pandas DataFrame:
REM iREM laser
mouse - mouse ID
REM - REM duration
iREM - inter REM duration after REM periods with laser
laser - 'y' or 'n'; depending on whether laser was on during REM sleep period (for "REM") or during the
preceding REM sleep period (for "iREM")
if single_mode == False, mouse is the data frame index
"""
if type(recordings) != list:
recordings = [recordings]
if overlap > 0:
overlap = overlap / 100
paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
paths[rec] = ppath
else:
paths[rec] = backup
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
if len(mice) == 1:
single_mode=True
remdur_exp = {m:[] for m in mice}
remdur_ctr = {m:[] for m in mice}
itdur_exp = {m:[] for m in mice}
itdur_ctr = {m:[] for m in mice}
for rec in recordings:
idf = re.split('_', rec)[0]
M = load_stateidx(paths[rec], rec)[0]
sr = get_snr(paths[rec], rec)
nbin = int(np.round(sr)*2.5)
dt = (1.0/sr)*nbin
if ma_thr>0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
laser = load_laser(paths[rec], rec)
rem_trig = so.loadmat(os.path.join(paths[rec], rec, 'rem_trig_%s.mat' % rec), squeeze_me=True)['rem_trig']
laser = downsample_vec(laser, nbin)
laser[np.where(laser>0)] = 1
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
laser_idx = np.where(laser==1)[0]
rem_idx = np.where(rem_trig==1)[0]
# REM sequences from offline analysis (assumed to be the
# "ground truth"
seq = get_sequences(np.where(M==1)[0])
for (p,q) in zip(seq[0:-1], seq[1:]):
# check if true REM sequences do overlap with online detected sequences
# and only continue working with those:
if len(np.intersect1d(p, rem_idx)) > 0:
drn = (p[-1]-p[0]+1)*dt
it_M = M[p[-1]+1:q[0]]
if mode == 0:
it_drn = len(it_M)*dt
elif mode == 2:
it_drn = len(np.where(it_M==2)[0]) * dt
else:
it_drn = len(np.where(it_M == 3)[0]) * dt
# does the true REM sequence overlap with laser?
# by setting overlap to a value > 0, you can
# set a percentage how much the REM period should overlap with laser
# NEW 08/26/21
if len(np.intersect1d(p, laser_idx)) / len(p) > overlap:
remdur_exp[idf].append(drn)
itdur_exp[idf].append(it_drn)
elif len(np.intersect1d(p, laser_idx)) == 0:
remdur_ctr[idf].append(drn)
itdur_ctr[idf].append(it_drn)
else:
pass
# if single_mode put all REM periods together,
# otherwise average across REM periods for each mouse
if len(mice) == 1 or single_mode==True:
data = {'itexp':[], 'itctr':[], 'remexp':[], 'remctr':[]}
for m in mice:
data['itexp'] += itdur_exp[m]
data['itctr'] += itdur_ctr[m]
data['remexp'] += remdur_exp[m]
data['remctr'] += remdur_ctr[m]
df = pd.DataFrame({'REM': data['remexp']+data['remctr'], 'iREM':data['itexp']+data['itctr'], 'laser': ['y']*len(data['remexp']) + ['n']*len(data['remctr'])})
else:
for idf in mice:
itdur_ctr[idf] = np.array(itdur_ctr[idf]).mean()
itdur_exp[idf] = np.array(itdur_exp[idf]).mean()
remdur_ctr[idf] = np.array(remdur_ctr[idf]).mean()
remdur_exp[idf] = np.array(remdur_exp[idf]).mean()
data = {}
for s in ['itexp', 'itctr', 'remexp', 'remctr']:
data[s] = np.zeros((len(mice),))
i = 0
for m in mice:
data['itexp'][i] = itdur_exp[m]
data['itctr'][i] = itdur_ctr[m]
data['remexp'][i] = remdur_exp[m]
data['remctr'][i] = remdur_ctr[m]
i += 1
df = pd.DataFrame({'REM': np.concatenate((data['remexp'], data['remctr'])),
'iREM': np.concatenate((data['itexp'], data['itctr'])),
'laser': ['y']*len(mice) + ['n']*len(mice),
'mouse': mice+mice})
if pplot and not single_mode:
dfm = pd.melt(df, id_vars=['laser', 'mouse'], var_name='state')
sns.set_style('whitegrid')
plt.ion()
plt.figure()
sns.barplot(data=dfm, hue='laser', x='state', y='value', palette=['blue', 'gray'])
sns.swarmplot(data=dfm, hue='laser', x='state', y='value', dodge=True, color='black')
sns.despine()
plt.ylabel('Duration (s)')
if pplot and single_mode:
dfm = pd.melt(df, id_vars=['laser'], var_name='state')
plt.ion()
plt.figure()
sns.set(style="whitegrid")
#sns.swarmplot(data=df[['itctr', 'itexp']], color='black')
#sns.barplot(data=df[['itctr', 'itexp']], palette=['gray', 'blue'], errcolor='black')
sns.barplot(data=dfm, hue='laser', x='state', y='value', palette=['blue', 'gray'])
sns.swarmplot(data=dfm, hue='laser', x='state', y='value', dodge=True, color='black')
sns.despine()
plt.ylabel('Duration (s)')
return df
### FUNCTIONS USED BY SLEEP_STATE #####################################################
def get_sequences(idx, ibreak=1) :
"""
get_sequences(idx, ibreak=1)
idx - np.vector of indices
@RETURN:
seq - list of np.vectors
"""
diff = idx[1:] - idx[0:-1]
breaks = np.nonzero(diff>ibreak)[0]
breaks = np.append(breaks, len(idx)-1)
seq = []
iold = 0
for i in breaks:
r = list(range(iold, i+1))
seq.append(idx[r])
iold = i+1
return seq
def threshold_crossing(data, th, ilen, ibreak, m):
"""
seq = threshold_crossing(data, th, ilen, ibreak, m)
"""
if m>=0:
idx = np.where(data>=th)[0]
else:
idx = np.where(data<=th)[0]
# gather sequences
j = 0
seq = []
while (j <= len(idx)-1):
s = [idx[j]]
for k in range(j+1,len(idx)):
if (idx[k] - idx[k-1]-1) <= ibreak:
# add j to sequence
s.append(idx[k])
else:
break
if (s[-1] - s[0]+1) >= ilen and not(s[0] in [i[1] for i in seq]):
seq.append((s[0], s[-1]))
if j == len(idx)-1:
break
j=k
return seq
def closest_precessor(seq, i):
"""
find the preceding element in seq which is closest to i
helper function for sleep_state
"""
tmp = seq-i;
d = np.where(tmp<0)[0]
if len(d)>0:
id = seq[d[-1]];
else:
id = 0;
return id
def write_remidx(M, K, ppath, name, mode=1) :
"""
rewrite_remidx(idx, states, ppath, name)
replace the indices idx in the remidx file of recording name
with the assignment given in states
"""
if mode == 0 :
outfile = os.path.join(ppath, name, 'remidx_' + name + '.txt')
else :
outfile = os.path.join(ppath, name, 'remidx_' + name + '_corr.txt')
f = open(outfile, 'w')
s = ["%d\t%d\n" % (i,j) for (i,j) in zip(M[0,:],K)]
f.writelines(s)
f.close()
#######################################################################################
### MANIPULATING FIGURES ##############################################################
def set_fontsize(fs):
import matplotlib
matplotlib.rcParams.update({'font.size': fs})
def set_fontarial():
"""
set Arial as default font
"""
import matplotlib
matplotlib.rcParams['font.sans-serif'] = "Arial"
def save_figure(fig_file):
# alternative way of setting nice fonts:
#matplotlib.rcParams['pdf.fonttype'] = 42
#matplotlib.rcParams['ps.fonttype'] = 42
#matplotlib.pylab.savefig(fig_file, dpi=300)
#matplotlib.rcParams['text.usetex'] = False
#matplotlib.rcParams['text.usetex'] = True
plt.savefig(fig_file, bbox_inches="tight", dpi=200)
#matplotlib.rcParams['text.usetex'] = False
def box_off(ax):
"""
similar to Matlab's box off
"""
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
#######################################################################################
def sleep_state(ppath, name, th_delta_std=1, mu_std=0, sf=1, sf_delta=3, pwrite=0,
pplot=True, pemg=True, vmax=2.5, pspec_norm=False, use_idx=[]):
"""
automatic sleep state detection based on
delta, theta, sigma, gamma and EMG power.
New: use also sigma band: that's very helpful to classify pre-REM periods
as NREM; otherwise they tend to be classified as wake.
Gamma peaks nicely pick up during microarousals.
My strategy is the following:
I smooth delta band a lot to avoid strong fragmentation of sleep; but to
still pick up microarousals I use the gamma power.
spectrogram data has to be calculated before using calculate_spectrum
Each bin in the spectrogram gets assigned one of four states:
1-REM
2-Wake
3-NREM
0-undef
:param ppath: base folder
:param name: recording name
:param th_delta_std: threshold for theta/delta band is calculated as mean(theta/delta) + th_delta_std*std(theta/delta)
:param mu_std: threshold for EMG power is calculate as "mean(EMG) + mu_std * mean(EMG)
:param sf: smoothing factor for gamma and sigma power
:param sf_delta: smoothing factor for delta power
:param pwrite: if True, save sleep classification to file remidx_$name.txt
:param pplot: if True, plot figures
:param pemg: if True, use EMG as EMG, otherwise use EEG gamma power instead
:param vmax: float, set maximum of color range of EEG heatmap.
:param pspec_norm: boolean, if True, normalized EEG spectrogram by deviding each frequency band by its mean; only affects
plotting, no effect on sleep state calculation
:param use_idx: list, if not empty, use only given indices to calculate sleep state
:return:
"""
PRE_WAKE_REM = 30.0
# Minimum Duration and Break in
# high theta/delta, high emg, high delta, high sigma and gamma sequences
#
# duration[i,0] is the minimum duration of sequence of state i
# duration[i,1] is maximal break duration allowed in a sequence of state i
duration = np.zeros((5,2))
# high theta/delta
duration[0,:] = [5,15]
# high emg
duration[1,:] = [0, 5]
# high delta
duration[2,:] = [10, 10]
# high sigma
duration[3,:] = [10, 10]
# gamma
duration[4,:] = [0, 5]
# Frequency Bands/Ranges for delta, theta, and, gamma
r_delta = [0.5, 4]
r_sigma = [12, 20]
r_theta = [5,12]
# EMG band
r_mu = [50, 500]
if not pemg:
r_mu = [250, 500]
# high gamma power
r_gamma = [100, 150]
#load EEG and EMG spectrum, calculated by calculate_spectrum
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
if pemg:
Q = so.loadmat(os.path.join(ppath, name, 'msp_' + name + '.mat'))
else:
Q = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
SPEEG = np.squeeze(P['SP'])
if pemg == 1:
SPEMG = np.squeeze(Q['mSP'])
else:
SPEMG = np.squeeze(P['SP'])
if use_idx == []:
use_idx = range(0, SPEEG.shape[1])
freq = np.squeeze(P['freq'])
t = np.squeeze(P['t'])
dt = float(np.squeeze(P['dt']))
N = len(t)
duration = np.divide(duration,dt)
# get indices for frequency bands
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
i_sigma = np.where((freq >= r_sigma[0]) & (freq <= r_sigma[1]))[0]
i_gamma = np.where((freq >= r_gamma[0]) & (freq <= r_gamma[1]))[0]
p_delta = smooth_data( SPEEG[i_delta,:].mean(axis=0), sf_delta )
p_theta = smooth_data( SPEEG[i_theta,:].mean(axis=0), 0 )
# now filtering for EMG to pick up microarousals
p_mu = smooth_data( SPEMG[i_mu,:].mean(axis=0), sf )
p_sigma = smooth_data( SPEEG[i_sigma,:].mean(axis=0), sf )
p_gamma = smooth_data( SPEEG[i_gamma,:].mean(axis=0), 0 )
th_delta = np.divide(p_theta, p_delta)
#th_delta = smooth_data(th_delta, 2);
seq = {}
seq['high_theta'] = threshold_crossing(th_delta, np.nanmean(th_delta[use_idx])+th_delta_std*np.nanstd(th_delta[use_idx]),
duration[0,1], duration[0,1], 1)
seq['high_emg'] = threshold_crossing(p_mu, np.nanmean(p_mu[use_idx])+mu_std*np.nanstd(p_mu[use_idx]),
duration[1,0], duration[1,1], 1)
seq['high_delta'] = threshold_crossing(p_delta, np.nanmean(p_delta[use_idx]), duration[2,0], duration[2,1], 1)
seq['high_sigma'] = threshold_crossing(p_sigma, np.nanmean(p_sigma[use_idx]), duration[3,0], duration[3,1], 1)
seq['high_gamma'] = threshold_crossing(p_gamma, np.nanmean(p_gamma[use_idx]), duration[4,0], duration[4,1], 1)
# Sleep-State Rules
idx = {}
for k in seq:
tmp = [list(range(i,j+1)) for (i,j) in seq[k]]
# now idea why this works to flatten a list
# idx[k] = sum(tmp, [])
# alternative that I understand:
if len(tmp) == 0:
idx[k] = np.array([])
else:
idx[k] = np.array(reduce(lambda x,y: x+y, tmp))
idx['low_emg'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_emg']))
idx['low_delta'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_delta']))
idx['low_theta'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_theta']))
#REM Sleep: thdel up, emg down, delta down
a = np.intersect1d(idx['high_theta'], idx['low_delta'])
# non high_emg phases
b = np.setdiff1d(a, idx['high_emg'])
rem = get_sequences(b, duration[0,1])
rem_idx = reduce(lambda x,y: np.concatenate((x,y)), rem)
# SWS Sleep
# delta high, no theta, no emg
a = np.setdiff1d(idx['high_delta'], idx['high_emg']) # no emg activation
b = np.setdiff1d(a, idx['high_theta']) # no theta;
sws = get_sequences(b)
sws_idx = reduce(lambda x,y: np.concatenate((x,y)), sws)
#print a
# Wake
# low delta + high emg and not rem
a = np.unique(np.union1d(idx['low_delta'], idx['high_emg']))
b = np.setdiff1d(a, rem_idx)
wake = get_sequences(b)
wake_idx = reduce(lambda x,y: np.concatenate((x,y)), wake)
# sequences with low delta, high sigma and low emg are NREM
a = np.intersect1d(np.intersect1d(idx['high_sigma'], idx['low_delta']), idx['low_emg'])
a = np.setdiff1d(a, rem_idx)
sws_idx = np.unique(np.union1d(a, sws_idx))
wake_idx = np.setdiff1d(wake_idx, a)
#NREM sequences with high gamma are wake
a = np.intersect1d(sws_idx, idx['high_gamma'])
sws_idx = np.setdiff1d(sws_idx, a)
wake_idx = np.unique(np.union1d(wake_idx,a))
# Wake and Theta
wake_motion_idx = np.intersect1d(wake_idx, idx['high_theta'])
# Wake w/o Theta
wake_nomotion_idx = np.setdiff1d(wake_idx, idx['low_theta'])
# Are there overlapping sequences?
a = np.intersect1d(np.intersect1d(rem_idx, wake_idx), sws_idx)
# Are there undefined sequences?
undef_idx = np.setdiff1d(np.setdiff1d(np.setdiff1d(np.arange(0,N), rem_idx), wake_idx), sws_idx)
# Wake wins over SWS
sws_idx = np.setdiff1d(sws_idx, wake_idx)
# Special rules
# if there's a REM sequence directly following a short wake sequence (PRE_WAKE_REM),
# this wake sequence goes to SWS
# NREM to REM transitions are sometimes mistaken as quite wake periods
for rem_seq in rem:
if len(rem_seq) > 0:
irem_start = rem_seq[0]
# is there wake in the preceding bin?
if irem_start-1 in wake_idx:
# get the closest sws bin in the preceding history
isws_end = closest_precessor(sws_idx, irem_start)
if (irem_start - isws_end)*dt < PRE_WAKE_REM:
new_rem = np.arange(isws_end+1,irem_start)
rem_idx = np.union1d(rem_idx, new_rem)
wake_idx = np.setdiff1d(wake_idx, new_rem)
else:
new_wake = rem_seq
wake_idx = np.union1d(wake_idx, new_wake)
rem_idx = np.setdiff1d(rem_idx, new_wake)
# two different representations for the results:
S = {}
S['rem'] = rem_idx
S['nrem'] = sws_idx
S['wake'] = wake_idx
S['awake'] = wake_motion_idx
S['qwake'] = wake_nomotion_idx
M = np.zeros((N,))
if len(rem_idx) > 0:
M[rem_idx] = 1
if len(wake_idx) > 0:
M[wake_idx] = 2
if len(sws_idx) > 0:
M[sws_idx] = 3
if len(undef_idx) > 0:
M[undef_idx] = 0
# write sleep annotation to file
if pwrite:
outfile = os.path.join(ppath, name, 'remidx_' + name + '.txt')
print("writing annotation to %s" % outfile)
f = open(outfile, 'w')
s = ["%d\t%d\n" % (i,j) for (i,j) in zip(M,np.zeros((N,)))]
f.writelines(s)
f.close()
# nice plotting
plt.ion()
if pplot:
plt.figure(figsize=(18,9))
axes1=plt.axes([0.1, 0.9, 0.8, 0.05])
A = np.zeros((1,len(M)))
A[0,:] = M
cmap = plt.cm.jet
my_map = cmap.from_list('ha', [[0,0,0], [0,1,1],[0.5,0,1], [0.8, 0.8, 0.8]], 4)
#tmp = axes1.imshow(A, vmin=0, vmax=3)
tmp = axes1.pcolorfast(t, [0,1], A, vmin=0, vmax=3)
tmp.set_cmap(my_map)
axes1.axis('tight')
tmp.axes.get_xaxis().set_visible(False)
tmp.axes.get_yaxis().set_visible(False)
box_off(axes1)
# show spectrogram
axes2=plt.axes([0.1, 0.75, 0.8, 0.1], sharex=axes1)
ifreq = np.where(freq <= 30)[0]
med = np.median(SPEEG.max(axis=0))
if pspec_norm:
ifreq = np.where(freq <= 80)[0]
filt = np.ones((6, 1))
filt = filt / np.sum(filt)
SPEEG = scipy.signal.convolve2d(SPEEG, filt, mode='same')
spec_mean = SPEEG.mean(axis=1)
SPEEG = np.divide(SPEEG, np.repeat([spec_mean], SPEEG.shape[1], axis=0).T)
med = np.median(SPEEG.max(axis=0))
axes2.pcolorfast(t, freq[ifreq], SPEEG[ifreq, :], vmax = med*vmax, cmap='jet')
else:
axes2.pcolorfast(t, freq[ifreq], SPEEG[ifreq, :], vmax=med * vmax, cmap='jet')
axes2.axis('tight')
plt.ylabel('Freq (Hz)')
box_off(axes2)
# show delta power
axes3=plt.axes([0.1, 0.6, 0.8, 0.1], sharex=axes2)
axes3.plot(t,p_delta, color='gray')
plt.ylabel('Delta (a.u.)')
plt.xlim((t[0], t[-1]))
seq = get_sequences(S['nrem'])
#for s in seq:
# plt.plot(t[s],p_delta[s], color='red')
s = idx['high_delta']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_delta[s], color='red')
box_off(axes3)
axes4=plt.axes([0.1, 0.45, 0.8, 0.1], sharex=axes3)
axes4.plot(t,p_sigma, color='gray')
plt.ylabel('Sigma (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_sigma']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_sigma[s], color='red')
box_off(axes4)
axes5=plt.axes([0.1, 0.31, 0.8, 0.1], sharex=axes4)
axes5.plot(t,th_delta, color='gray')
plt.ylabel('Th/Delta (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_theta']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],th_delta[s], color='red')
box_off(axes5)
axes6=plt.axes([0.1, 0.17, 0.8, 0.1], sharex=axes5)
axes6.plot(t,p_gamma, color='gray')
plt.ylabel('Gamma (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_gamma']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_gamma[s], color='red')
box_off(axes6)
axes7=plt.axes([0.1, 0.03, 0.8, 0.1], sharex=axes6)
axes7.plot(t,p_mu, color='gray')
plt.xlabel('Time (s)')
plt.ylabel('EMG (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_emg']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_mu[s], color='red')
box_off(axes7)
plt.show()
# 2nd figure showing distribution of different bands
plt.figure(figsize=(20,3))
axes1 = plt.axes([0.05, 0.1, 0.13, 0.8])
plt.hist(p_delta, bins=100)
plt.plot(np.nanmean(p_delta), 10, 'ro')
plt.title('delta')
plt.ylabel('# Occurances')
box_off(axes1)
axes1 = plt.axes([0.25, 0.1, 0.13, 0.8])
plt.hist(th_delta, bins=100)
plt.plot(np.nanmean(th_delta)+th_delta_std*np.nanstd(th_delta), 10, 'ro')
plt.title('theta/delta')
box_off(axes1)
axes1 = plt.axes([0.45, 0.1, 0.13, 0.8])
plt.hist(p_sigma, bins=100)
plt.plot(np.nanmean(p_sigma), 10, 'ro')
plt.title('sigma')
box_off(axes1)
axes1 = plt.axes([0.65, 0.1, 0.13, 0.8])
plt.hist(p_gamma, bins=100)
plt.plot(np.nanmean(p_gamma), 10, 'ro')
plt.title('gamma')
box_off(axes1)
axes1 = plt.axes([0.85, 0.1, 0.13, 0.8])
plt.hist(p_mu, bins=100)
plt.plot(np.nanmean(p_mu)+np.nanstd(p_mu), 10, 'ro')
plt.title('EMG')
plt.show(block=False)
box_off(axes1)
plt.show()
return M,S
def plot_hypnograms(ppath, recordings, tbin=0, unit='h', ma_thr=20, title='', tstart=0, tend=-1):
"""
plot all hypnograms specified in @recordings
:param ppath: base folder
:param recordings: list of recordings
:param tbin: tbin for xticks
:param unit: time unit; h - hour, min - minute, s - second
:param ma_thr: float, wake periods shorter than $ma_thr are considered as microarousals and further converted to NREM
:param tstart: float, start time point (in seconds!) of hypnograms
:param tend: float, last shown time point (in seconds!)
:param title: optional title for figure
"""
recordings = recordings[::-1]
sr = get_snr(ppath, recordings[0])
nbin = int(np.round(sr) * 2.5)
dt_sec = (1.0 / sr) * nbin
istart = int(np.round(tstart/dt_sec))
dt = dt_sec
if unit == 'h':
dt /= 3600
elif unit == 'min':
dt /= 60
rec_len = dict()
irec = 0
ny = (1.0-0.2) / len(recordings)
dy = ny * 0.75
cmap = plt.cm.jet
my_map = cmap.from_list('brs', [[0, 0, 0], [0, 1, 1], [0.5, 0, 1], [0.8, 0.8, 0.8]], 4)
plt.ion()
plt.figure(figsize=(9,4))
axes = []
for rec in recordings:
M,K = load_stateidx(ppath, rec)
#kcut = np.where(K<0)[0]
#M = M[kcut]
#M[kcut] = 0
if tend == -1:
iend = len(M)
else:
iend = int(tend/dt_sec)
M = M[istart:iend]
if ma_thr>0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt_sec <= ma_thr:
M[s] = 3
rec_len[rec] = len(M)*dt
t = np.arange(0, len(M))*dt
ax = plt.axes([0.05, ny*irec+0.15, 0.75, dy])
tmp = ax.pcolorfast(t, [0, 1], np.array([M]), vmin=0, vmax=3, cmap=my_map)
box_off(ax)
ax.axis('tight')
tmp.axes.get_yaxis().set_visible(False)
if irec > 0:
tmp.axes.get_xaxis().set_visible(False)
if irec == 0:
plt.xlabel('Time (%s)' % unit)
irec += 1
axes.append(ax)
if len(title) > 0:
plt.title(title)
max_dur = max(rec_len.values())
if tbin > 0:
xtick = np.arange(0, max_dur, tbin)
for (ax, rec) in zip(axes, recordings):
ax.set_xlim([0, max_dur])
if tbin > 0:
ax.set_xticks(xtick)
ax.text(max_dur+max_dur*0.01, 0.5, rec)
plt.show()
def plot_swa(ppath, name, delta_win, alpha, band=[0.5, 4.5], swa_yrange=[]):
"""
plot slow wave (delta) activity during NREM
The top plot shows the hynogram.
The middle plot shows the delta power (irrespective of brain state) as line plot
The bottom plot shows for consecutive $delta_win seconds long bins, the
median delta power (SWA) during NREM, if the ration of NREM during the
corresponding bin >= $alpha
Example call:
dm=plot_swa(ppath, name, 30, 0.5, swa_yrange=[0, 0.012])
:param ppath, name: basefolder, recording name
:param delta_win: plot median swa value for each consecutive $delta_win seconds long window, if
:param alpha: the ratio of NREM in this window is larger than alpha (value between 0 and 1)
:param swa_yrange: tuple, minimun and maximum value of yrange for SWA
:return df: pd.DataFrame with SWA time points and corresponding median SWA values
"""
r_delta = band
sr = get_snr(ppath, name)
nbin = int(np.round(2.5*sr))
dt = nbin*(1.0/sr)
M,_ = load_stateidx(ppath, name)
t = np.arange(0, len(M))*dt
P = so.loadmat(os.path.join(ppath, name, 'sp_%s.mat' % name), squeeze_me=True)
SP = P['SP']
freq = P['freq']
df = freq[1]-freq[0]
idelta = np.where((freq>=r_delta[0]) & (freq<=r_delta[1]))[0]
pow_delta = SP[idelta,:].sum(axis=0)*df
# get NREM sequences contributing points for fitting
iwin = int(delta_win/dt)
#seq = get_sequences(nrem_idx, ibreak=int((delta_win/dt)*0.1))
delta_med = []
for j in range(0, len(M)-iwin, iwin):
s = range(j, j+iwin)
sc = j+int(iwin/2)
Mcut = M[s]
if (1.0*len(np.where(Mcut==3)[0])) / len(s) >= alpha:
i = np.where(Mcut==3)[0]
i = i+s[0]
a = np.median(pow_delta[i])
delta_med.append((t[sc],a))
df = pd.DataFrame(columns=['time', 'pow'], data=delta_med)
# generate figure
# show brainstate
plt.ion()
plt.figure(figsize=(10, 4))
axes_brs = plt.axes([0.1, 0.85, 0.8, 0.1])
cmap = plt.cm.jet
my_map = cmap.from_list('brs', [[0, 0, 0], [0, 1, 1], [0.6, 0, 1], [0.8, 0.8, 0.8]], 4)
tmp = axes_brs.pcolorfast(t, [0, 1], np.array([M]), vmin=0, vmax=3)
tmp.set_cmap(my_map)
axes_brs.axis('tight')
axes_brs.axes.get_xaxis().set_visible(False)
axes_brs.axes.get_yaxis().set_visible(False)
axes_brs.spines["top"].set_visible(False)
axes_brs.spines["right"].set_visible(False)
axes_brs.spines["bottom"].set_visible(False)
axes_brs.spines["left"].set_visible(False)
# plot delta power as function of time
c = 1000**2
axes_tdelta = plt.axes([0.1, 0.55, 0.8, 0.2], sharex=axes_brs)
plt.plot(t, pow_delta/c, 'k')
box_off(axes_tdelta)
axes_tdelta.axes.get_xaxis().set_visible(False)
axes_tdelta.spines["bottom"].set_visible(False)
plt.ylabel('SWA (mV$\mathrm{^2}$)')
# plot delta power medians
axes_delta = plt.axes([0.1, 0.12, 0.8, 0.35], sharex=axes_brs)
for (s,delta) in delta_med:
plt.plot(s, delta/c, 'ko')
print(t)
plt.xlim((t[0], t[-1]))
box_off(axes_delta)
plt.xlabel('Time (s)')
plt.ylabel('NREM SWA (mV$\mathrm{^2}$)')
if swa_yrange == []:
ymax = df['pow'].max()/c
plt.ylim([0, ymax+0.1*ymax])
else:
plt.ylim(swa_yrange)
plt.show()
return df
def laser_triggered_eeg(ppath, name, pre, post, f_max, pnorm=2, pplot=False, psave=False, tstart=0, tend=-1,
peeg2=False, vm=2.5, prune_trials=True, mu=[10, 200], trig_state=0, harmcs=0, iplt_level=1):
"""
calculate laser triggered, averaged EEG and EMG spectrum
:param ppath: base folder containing mouse recordings
:param name: recording
:param pre: time before laser
:param post: time after laser
:param f_max: calculate/plot frequencies up to frequency f_max
:param pnorm: normalization,
pnorm = 0, no normalization
pnorm = 1, normalize each frequency band by its average power
pnorm = 2, normalize each frequency band by the average power
during the preceding baseline period
:param vm: float to set saturation level of colormap
:param pplot: plot figure yes=True, no=False
:param psave: save the figure, yes=True, no = False
:param tstart: float, starting time point. Only lasers trials after tstart will be considered
:param tend: float, only laser trials up to tend will be considered; if tend==-1, use whole recording
:param peeg2: if True, use EEG channel 2
:param prune_trials: if True, throw out trials with EEG or EMG artifacts
:param mu: tuple; range used for EMG amplitude calculation
:param trig_state: int; if > 0, only use trials where brain is at laser onset in brainstate trig_state
1=REM, 2=Wake, 3=NREM
:param harmcs: if >0, interpolate all frequencies corresponding to multiples of harmcs by the average power
of the two neighboring frequencies.
:param iplt_level: options - 1 or 2. If 1 only take one neighboring frequency above and below the harmonic;
if 2, take 2 neighbors above and below, respectively
"""
def _interpolate_harmonics(SP, freq, f_max, harmcs, iplt_level):
df = freq[2]-freq[1]
for h in np.arange(harmcs, f_max, harmcs):
i = np.argmin(np.abs(freq - h))
if np.abs(freq[i] - h) < df and h != 60:
if iplt_level == 2:
SP[i,:] = (SP[i-2:i,:] + SP[i+1:i+3,:]).mean(axis=0) * 0.5
else:
SP[i,:] = (SP[i-1,:] + SP[i+1,:]) * 0.5
return SP
SR = get_snr(ppath, name)
NBIN = np.round(2.5*SR)
lsr = load_laser(ppath, name)
idxs, idxe = laser_start_end(lsr)
laser_dur = np.mean((idxe-idxs)/SR)
print('%s: Average laser duration: %f; Number of trials %d' % (name, laser_dur, len(idxs)))
# downsample EEG time to spectrogram time
idxs = [int(i/NBIN) for i in idxs]
idxe = [int(i/NBIN) for i in idxe]
#load EEG and EMG
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
Q = so.loadmat(os.path.join(ppath, name, 'msp_' + name + '.mat'))
if not peeg2:
SPEEG = np.squeeze(P['SP'])
else:
SPEEG = np.squeeze(P['SP2'])
SPEMG = np.squeeze(Q['mSP'])
freq = np.squeeze(P['freq'])
t = np.squeeze(P['t'])
dt = float(np.squeeze(P['dt']))
ifreq = np.where(freq<=f_max)[0]
ipre = int(np.round(pre/dt))
ipost = int(np.round(post/dt))
speeg_mean = SPEEG.mean(axis=1)
spemg_mean = SPEMG.mean(axis=1)
# interpolate frequencies corresponding to harmonics of $harmcs
if harmcs > 0:
SPEEG = _interpolate_harmonics(SPEEG, freq, f_max, harmcs)
SPEMG = _interpolate_harmonics(SPEMG, freq, f_max, harmcs)
if tend > -1:
i = np.where((np.array(idxs)*dt >= tstart) & (np.array(idxs)*dt <= tend))[0]
else:
i = np.where(np.array(idxs)*dt >= tstart)[0]
idxs = [idxs[j] for j in i]
idxe = [idxe[j] for j in i]
skips = []
skipe = []
if prune_trials:
for (i,j) in zip(idxs, idxe):
A = SPEEG[0,i-ipre:i+ipost+1] / speeg_mean[0]
B = SPEMG[0,i-ipre:i+ipost+1] / spemg_mean[0]
k = np.where(A >= np.median(A)*50)[0]
l = np.where(B >= np.median(B)*500)[0]
if len(k) > 0 or len(l) > 0:
skips.append(i)
skipe.append(j)
print("%s: kicking out %d trials" % (name, len(skips)))
idxs_new = []
idxe_new = []
for (i,j) in zip(idxs, idxe):
if not i in skips:
idxs_new.append(i)
idxe_new.append(j)
idxs = idxs_new
idxe = idxe_new
# select trials where brain state is right before laser in trig_state
if trig_state > 0:
idxs_new = []
idxe_new = []
M = load_stateidx(ppath, name)[0]
for (i,j) in zip(idxs, idxe):
if i < len(M) and M[i] == trig_state:
idxs_new.append(i)
idxe_new.append(j)
idxs = idxs_new
idxe = idxe_new
# Spectrogram for EEG and EMG normalized by average power in each frequency band
if pnorm == 1:
SPEEG = np.divide(SPEEG, np.repeat(speeg_mean, len(t)).reshape(len(speeg_mean), len(t)))
SPEMG = np.divide(SPEMG, np.repeat(spemg_mean, len(t)).reshape(len(spemg_mean), len(t)))
speeg_parts = []
spemg_parts = []
for (i,j) in zip(idxs, idxe):
if i>=ipre and j+ipost < len(t):
eeg_part = SPEEG[ifreq,i-ipre:i+ipost+1]
speeg_parts.append(eeg_part)
spemg_parts.append(SPEMG[ifreq,i-ipre:i+ipost+1])
EEGLsr = np.array(speeg_parts).mean(axis=0)
EMGLsr = np.array(spemg_parts).mean(axis=0)
# smooth spectrogram
nfilt = 3
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
EEGLsr = scipy.signal.convolve2d(EEGLsr, filt, boundary='symm', mode='same')
if pnorm == 2:
for i in range(EEGLsr.shape[0]):
EEGLsr[i,:] = np.divide(EEGLsr[i,:], np.sum(np.abs(EEGLsr[i,0:ipre]))/(1.0*ipre))
EMGLsr[i,:] = np.divide(EMGLsr[i,:], np.sum(np.abs(EMGLsr[i,0:ipre]))/(1.0*ipre))
# get time axis
dt = (1.0/SR)*NBIN
t = np.linspace(-ipre*dt, ipost*dt, ipre+ipost+1)
f = freq[ifreq]
if pplot:
# get rid of boxes around matplotlib plots
def box_off(ax):
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.ion()
plt.figure(figsize=(10,8))
ax = plt.axes([0.1, 0.55, 0.4, 0.35])
plt.pcolormesh(t,f,EEGLsr, vmin=0, vmax=np.median(EEGLsr)*vm, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
plt.title('EEG', fontsize=12)
cbar = plt.colorbar()
if pnorm >0:
cbar.set_label('Rel. Power')
else:
cbar.set_label('Power uV^2s')
ax = plt.axes([0.62, 0.55, 0.35, 0.35])
ilsr = np.where((t>=0) & (t<=120))[0]
plt.plot(f,EEGLsr[:,0:ipre].mean(axis=1), color='gray', label='baseline', lw=2)
plt.plot(f,EEGLsr[:,ilsr].mean(axis=1), color='blue', label='laser', lw=2)
box_off(ax)
plt.xlabel('Freq. (Hz)')
plt.ylabel('Power (uV^2)')
#plt.legend(loc=0)
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, borderaxespad=0.)
ax = plt.axes([0.1, 0.1, 0.4, 0.35])
plt.pcolormesh(t,f,EMGLsr, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
plt.title('EMG', fontsize=12)
cbar = plt.colorbar()
if pnorm >0:
cbar.set_label('Rel. Power')
else:
cbar.set_label('Power (uV^2s)')
ax = plt.axes([0.62, 0.1, 0.35, 0.35])
mf = np.where((f>=mu[0]) & (f <= mu[1]))[0]
df = f[1]-f[0]
# amplitude is square root of (integral over each frequency)
avg_emg = np.sqrt(EMGLsr[mf,:].sum(axis=0)*df)
m = np.max(avg_emg)*1.5
plt.plot([0,0], [0,np.max(avg_emg)*1.5], color=(0,0,0))
plt.plot([laser_dur,laser_dur], [0,np.max(avg_emg)*1.5], color=(0,0,0))
plt.xlim((t[0], t[-1]))
plt.ylim((0,m))
plt.plot(t,avg_emg, color='black', lw=2)
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('EMG ampl. (uV)')
plt.show()
if psave:
img_file = os.path.join(ppath, name, 'fig_'+name+'_spec.png')
save_figure(img_file)
return EEGLsr, EMGLsr, freq[ifreq], t
def laser_triggered_eeg_avg(ppath, recordings, pre, post, f_max, laser_dur, pnorm=1, pplot=1, tstart=0, tend=-1,
vm=[], cb_ticks=[], mu=[10, 100], trig_state=0, harmcs=0, iplt_level=1, peeg2=False, fig_file=''):
"""
calculate average spectrogram for all recordings listed in @recordings; for averaging take
mouse identity into account
:param ppath: base folder
:param recordings: list of recordings
:param pre: time before laser onset
:param post: time after laser onset
:param f_max: maximum frequency shown for EEG spectrogram
:param laser_dur: duration of laser stimulation
:param pnorm: normalization,
pnorm = 0, no normalization
pnorm = 1, normalize each frequency band by its average power
pnorm = 2, normalize each frequency band by the average power
during the preceding baseline period
:param pplot: pplot==0 - no figure;
pplot==1 - conventional figure;
pplot==2 - pretty figure showing EEG spectrogram
along with EMG amplitude
note: the errorbar for the EMG amplitude is the S.E.M.
:param tstart: only consider laser trials with laser onset after tstart seconds
:param tend: only consider laser trials with laser onset before tend seconds
:param vm: saturation of heatmap for EEG spectrogram
:param cb_ticks: ticks for colorbar (only applies for pplot==2)
:param mu: frequencies for EMG amplitude calculation
:param trig_state: if > 0, only use trials where brain is at laser onset in brainstate trig_state
1=REM, 2=Wake, 3=NREM
:param peeg2: if True, use EEG2 instead of EEG
:param harmcs: if >0, interpolate all frequencies corresponding to multiples of harmcs by the average power
of the two neighboring frequencies.
:param iplt_level: options - 1 or 2. If 1 only take one neighboring frequency above and below the harmonic;
if 2, take 2 neighbors above and below, respectively
:param fig_file: if specified, save figure to given file
:return:
t, f, EEGSpec, EMGSpec, EEGLsr
t - time axis
f - frequency axis
EEGSpec - dict with mouse id -> 2D np.array(frequency x time)
EMGSpec - dict with mouse id -> 2D np.array(frequency x time)
EEGLsr - 2D np.array(frequency x time)
"""
EEGSpec = {}
EMGSpec = {}
mice = []
for rec in recordings:
idf = re.split('_', rec)[0]
if not(idf in mice):
mice.append(idf)
EEGSpec[idf] = []
EMGSpec[idf] = []
for rec in recordings:
idf = re.split('_', rec)[0]
EEG, EMG, f, t = laser_triggered_eeg(ppath, rec, pre, post, f_max, mu=mu, pnorm=pnorm, pplot=False,
psave=False, tstart=tstart, tend=tend, trig_state=trig_state,
peeg2=peeg2, harmcs=harmcs, iplt_level=iplt_level)
EEGSpec[idf].append(EEG)
EMGSpec[idf].append(EMG)
for idf in mice:
EEGSpec[idf] = np.array(EEGSpec[idf]).mean(axis=0)
EMGSpec[idf] = np.array(EMGSpec[idf]).mean(axis=0)
EEGLsr = np.array([EEGSpec[k] for k in mice]).mean(axis=0)
EMGLsr = np.array([EMGSpec[k] for k in mice]).mean(axis=0)
mf = np.where((f >= mu[0]) & (f <= mu[1]))[0]
if harmcs > 0:
harm_freq = np.arange(0, f.max(), harmcs)
for h in harm_freq:
mf = np.setdiff1d(mf, mf[np.where(f[mf]==h)[0]])
df = f[1] - f[0]
EMGAmpl = np.zeros((len(mice), EEGLsr.shape[1]))
i=0
for idf in mice:
# amplitude is square root of (integral over each frequency)
if harmcs == 0:
EMGAmpl[i,:] = np.sqrt(EMGSpec[idf][mf,:].sum(axis=0)*df)
else:
tmp = 0
for qf in mf:
tmp += EMGSpec[idf][qf,:] * (f[qf] - f[qf-1])
EMGAmpl[i,:] = np.sqrt(tmp)
i += 1
avg_emg = EMGAmpl.mean(axis=0)
sem_emg = EMGAmpl.std(axis=0) / np.sqrt(len(mice))
if pplot==1:
plt.ion()
plt.figure(figsize=(12,10))
ax = plt.axes([0.1, 0.55, 0.4, 0.4])
if len(vm) == 2:
plt.pcolormesh(t,f,EEGLsr, cmap='jet', vmin=vm[0], vmax=vm[1])
else:
plt.pcolormesh(t, f, EEGLsr, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
plt.title('EEG')
cbar = plt.colorbar()
if pnorm >0:
cbar.set_label('Rel. Power')
else:
cbar.set_label('Power uV^2s')
ax = plt.axes([0.6, 0.55, 0.3, 0.4])
ipre = np.where(t<0)[0]
ilsr = np.where((t>=0) & (t<=120))[0]
plt.plot(f,EEGLsr[:,ipre].mean(axis=1), color='gray', label='baseline', lw=2)
plt.plot(f,EEGLsr[:,ilsr].mean(axis=1), color='blue', label='laser', lw=2)
box_off(ax)
plt.xlabel('Freq. (Hz)')
plt.ylabel('Power (uV^2)')
#plt.legend(loc=0)
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, borderaxespad=0.)
ax = plt.axes([0.1, 0.05, 0.4, 0.4])
plt.pcolormesh(t,f,EMGLsr, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
plt.title('EMG')
cbar = plt.colorbar()
if pnorm >0:
cbar.set_label('Rel. Power')
else:
cbar.set_label('Power uV^2s')
ax = plt.axes([0.6, 0.05, 0.3, 0.4])
m = np.max(avg_emg)*1.5
plt.plot([0,0], [0,np.max(avg_emg)*1.5], color=(0,0,0))
plt.plot([laser_dur,laser_dur], [0,np.max(avg_emg)*1.5], color=(0,0,0))
plt.xlim((t[0], t[-1]))
plt.ylim((0,m))
plt.plot(t,avg_emg, color='black', lw=2)
plt.fill_between(t, avg_emg-sem_emg, avg_emg+sem_emg)
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('EMG ampl. (uV)')
plt.show()
elif pplot==2:
# pretty figure
plt.figure()
if len(vm) > 0:
cb_ticks = vm
# plot EEG spectrogram
axes_cbar = plt.axes([0.8, 0.75, 0.1, 0.2])
ax = plt.axes([0.1, 0.55, 0.75, 0.4])
if len(vm) == 2:
im=ax.pcolorfast(t, f, EEGLsr, cmap='jet', vmin=vm[0], vmax=vm[1])
else:
im = ax.pcolorfast(t, f, EEGLsr, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
# colorbar for EEG spectrogram
cb = plt.colorbar(im, ax=axes_cbar, pad=0.0, aspect=10.0, orientation='vertical')
if pnorm > 0:
cb.set_label('Rel. Power')
else:
cb.set_label('Power (uV^2s)')
cb.ax.xaxis.set_ticks_position("bottom")
cb.ax.xaxis.set_label_position('top')
if len(cb_ticks) > 0:
cb.set_ticks(cb_ticks)
axes_cbar.set_alpha(0.0)
axes_cbar.spines["top"].set_visible(False)
axes_cbar.spines["right"].set_visible(False)
axes_cbar.spines["bottom"].set_visible(False)
axes_cbar.spines["left"].set_visible(False)
axes_cbar.axes.get_xaxis().set_visible(False)
axes_cbar.axes.get_yaxis().set_visible(False)
# EMG amplitude
ax = plt.axes([0.1, 0.1, 0.75, 0.3])
m = np.max(avg_emg) * 1.5
ax.add_patch(patches.Rectangle((0, 0), laser_dur, np.max(avg_emg)*1.5, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
plt.xlim((t[0], t[-1]))
plt.ylim((0, m))
plt.fill_between(t, avg_emg-sem_emg, avg_emg+sem_emg, color='gray', zorder=2)
plt.plot(t, avg_emg, color='black', lw=2)
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('EMG ampl. ($\mathrm{\mu V}$)')
plt.show()
if len(fig_file) > 0:
save_figure(fig_file)
return t, f, EEGSpec, EMGSpec, EEGLsr
def laser_brainstate(ppath, recordings, pre, post, pplot=True, fig_file='', start_time=0, end_time=-1,
ma_thr=0, edge=0, sf=0, cond=0, single_mode=False, ci=95, backup='', csv_file=''):
"""
calculate laser triggered probability of REM, Wake, NREM
ppath - base folder holding all recording
recordings - list of recording
pre - time before laser onset
post - time after laser onset
@Optional:
pplot - pplot==True: plot figure
fig_file - specify filename including ending, if you wish to save figure
start_time - in [s], only consider laser onsets starting after $start_time
end_time - in [s], only consider laset onsets starting before $end_time
sf - smoothing factor for Gaussian kernel; if sf=0, no filtering
edge - only use if $sf > 0: to avoid smoothing artifacts, set edge to a value > 0, e.g. 20
ma_thr - if > 0, smooth out microarousals with duration < $ma_thr
cond - cond==0: consider all trials; cond==[1,2,3] only plot trials,
where mouse was in REM, Wake, or NREM as laser turned on
single_mode - if True, plot every single mouse
backup - optional backup folder; if specified each single recording folder can be either on $ppath or $backup;
if it's on both folders, the version in ppath is used
ci - string; possible values: 'sem', 'sd', or value betwen 0 and 100, corresponding
to the bootstrapped confidence interval. The default is ci=95
csv_file - if filename (without or including full file path) is provided,
save pd.DataFrame df (see @Return) to csv file
@Return:
df_timecourse: pd.DataFrame with columns: mouse, time, perc, state.
df: pd.DataFrame with columns mouse_id, REM, NREM, Wake, Lsr
Lsr has three values: 0 - before laser, 1 - during laser, 2 - after laser
if laser was on for laser_dur s, then
df[df['Lsr'] == 0]['REM'] is the average % of REM sleep during laser stimulation for each mouse
df[df['Lsr'] == 0]['REM'] is the average % of REM sleep
during the laser_dur s long time interval preceding laser onset.
df[df['Lsr'] == 2]['REM'] is the average during the time inverval of duration laser_dur that
directly follows laser stimulation
"""
if type(recordings) != list:
recordings = [recordings]
rec_paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
rec_paths[rec] = ppath
else:
rec_paths[rec] = backup
pre += edge
post += edge
BrainstateDict = {}
mouse_order = []
for rec in recordings:
idf = re.split('_', rec)[0]
BrainstateDict[idf] = []
if not idf in mouse_order:
mouse_order.append(idf)
nmice = len(BrainstateDict)
for rec in recordings:
ppath = rec_paths[rec]
SR = get_snr(ppath, rec)
NBIN = np.round(2.5*SR)
dt = NBIN * 1.0/SR
istart_time = int(np.round(start_time / dt))
M = load_stateidx(ppath, rec)[0]
if end_time == -1:
iend_time = len(M)
else:
iend_time = int(np.round(end_time / dt))
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
(idxs, idxe) = laser_start_end(load_laser(ppath, rec))
idf = re.split('_', rec)[0]
ipre = int(np.round(pre/dt))
ipost = int(np.round(post/dt))
idxs = [int(i/NBIN) for i in idxs]
idxe = [int(i/NBIN) for i in idxe]
laser_dur = np.mean((np.array(idxe) - np.array(idxs))) * dt
for (i,j) in zip(idxs, idxe):
if i>=ipre and i+ipost<=len(M)-1 and i>istart_time and i < iend_time:
bs = M[i-ipre:i+ipost+1]
BrainstateDict[idf].append(bs)
# I assume here that every recording has same dt
t = np.linspace(-ipre*dt, ipost*dt, ipre+ipost+1)
# first time point where the laser was fully on (during the complete bin).
izero = np.where(t>0)[0][0]
# the first time bin overlapping with laser is then
izero -= 1
# @BS: mouse x time x state
BS = np.zeros((nmice, len(t), 3))
Trials = []
imouse = 0
for mouse in mouse_order:
if cond==0:
M = np.array(BrainstateDict[mouse])
Trials.append(M)
for state in range(1,4):
C = np.zeros(M.shape)
C[np.where(M==state)] = 1
BS[imouse,:,state-1] = C.mean(axis=0)
if cond>0:
M = BrainstateDict[mouse]
Msel = []
for trial in M:
if trial[izero] == cond:
Msel.append(trial)
M = np.array(Msel)
Trials.append(M)
for state in range(1,4):
C = np.zeros(M.shape)
C[np.where(M==state)] = 1
BS[imouse,:,state-1] = C.mean(axis=0)
imouse += 1
# flatten Trials
Trials = reduce(lambda x,y: np.concatenate((x,y), axis=0), Trials)
BS = BS*100
if sf > 0:
for state in [2, 1, 0]:
for i in range(nmice):
BS[i, :, state] = smooth_data(BS[i, :, state], sf)
df_timecourse = pd.DataFrame()
state_map = {1: 'REM', 2: 'Wake', 3: 'NREM'}
for s in state_map:
df = nparray2df(BS[:, :, s - 1], mouse_order, t, 'perc', 'mouse', 'time')
df['state'] = state_map[s]
df_timecourse = df_timecourse.append(df)
nmice = imouse
if pplot:
state_label = {0:'REM', 1:'Wake', 2:'NREM'}
it = np.where((t >= -pre + edge) & (t <= post - edge))[0]
plt.ion()
if not single_mode:
plt.figure()
ax = plt.axes([0.15, 0.15, 0.6, 0.7])
colors = [[0, 1, 1 ],[0.5, 0, 1],[0.6, 0.6, 0.6]]
if ci == 'sem':
for state in [2,1,0]:
tmp = BS[:, :, state].mean(axis=0)
plt.plot(t[it], tmp[it], color=colors[state], lw=3, label=state_label[state])
if nmice > 1:
smp = BS[:,:,state].std(axis=0) / np.sqrt(nmice)
plt.fill_between(t[it], tmp[it]-smp[it], tmp[it]+smp[it], color=colors[state], alpha=0.4, zorder=3)
plt.xlim([-pre+edge, post-edge])
plt.ylim([0,100])
ax.add_patch(patches.Rectangle((0,0), laser_dur, 100, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('Probability')
#plt.legend(bbox_to_anchor=(0., 1.02, 0.5, .102), loc=3, ncol=3, borderaxespad=0.)
plt.draw()
else:
bs_colors = {'REM': [0, 1, 1], 'Wake': [0.5, 0, 1], 'NREM': [0.6, 0.6, 0.6]}
dfm = df_timecourse.groupby(['mouse', 'state', 'time']).mean().reset_index()
for s in [3, 2, 1]:
sns.lineplot(data=dfm[dfm.state == state_map[s]], ci=ci, x='time', y='perc',
color=bs_colors[state_map[s]], err_kws={'alpha': 0.8, 'zorder': 3})
plt.xlim([-pre+edge, post-edge])
plt.ylim([0,100])
ax.add_patch(patches.Rectangle((0,0), laser_dur, 100, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('Probability')
else:
plt.figure(figsize=(7,7))
clrs = sns.color_palette("husl", nmice)
for state in [2,1,0]:
ax = plt.subplot('31' + str(3-state))
for i in range(nmice):
plt.plot(t[it], BS[i,it,state], color=clrs[i], label=mouse_order[i])
ax.add_patch(patches.Rectangle((0, 0), laser_dur, 100, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1], alpha=0.8))
plt.xlim((t[it][0], t[it][-1]))
plt.ylim((0,100))
plt.ylabel('% ' + state_label[state])
if state==0:
plt.xlabel('Time (s)')
else:
ax.set_xticklabels([])
if state==2:
ax.legend(mouse_order, bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order),
frameon=False)
box_off(ax)
# figure showing all trials
plt.figure(figsize=(4,6))
set_fontarial()
plt.ion()
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
cmap = plt.cm.jet
my_map = cmap.from_list('ha', [[0,1,1],[0.5,0,1], [0.6, 0.6, 0.6]], 3)
x = list(range(Trials.shape[0]))
plt.pcolormesh(t,np.array(x), np.flipud(Trials), cmap=my_map, vmin=1, vmax=3)
plt.plot([0,0], [0, len(x)-1], color='white')
plt.plot([laser_dur,laser_dur], [0, len(x)-1], color='white')
ax.axis('tight')
plt.draw()
plt.xlabel('Time (s)')
plt.ylabel('Trial No.')
box_off(ax)
plt.show()
if len(fig_file)>0:
plt.savefig(fig_file)
# compile dataframe with all baseline and laser values
ilsr = np.where((t>=0) & (t<=laser_dur))[0]
ibase = np.where((t>=-laser_dur) & (t<0))[0]
iafter = np.where((t>=laser_dur) & (t<laser_dur*2))[0]
df = pd.DataFrame(columns = ['Mouse', 'REM', 'NREM', 'Wake', 'Lsr'])
mice = mouse_order + mouse_order + mouse_order
lsr = np.concatenate((np.ones((nmice,), dtype='int'), np.zeros((nmice,), dtype='int'), np.ones((nmice,), dtype='int')*2))
df['Mouse'] = mice
df['Lsr'] = lsr
df['REM'] = np.concatenate((BS[:,ilsr,0].mean(axis=1), BS[:,ibase,0].mean(axis=1), BS[:,iafter,0].mean(axis=1)))
df['NREM'] = np.concatenate((BS[:,ilsr,2].mean(axis=1), BS[:,ibase,2].mean(axis=1), BS[:,iafter,2].mean(axis=1)))
df['Wake'] = np.concatenate((BS[:,ilsr,1].mean(axis=1), BS[:,ibase,1].mean(axis=1), BS[:,iafter,1].mean(axis=1)))
if len(csv_file) > 0:
df.to_csv(csv_file, index=False)
return df_timecourse, df, Trials
def laser_brainstate_bootstrap(ppath, recordings, pre, post, edge=0, sf=0, nboots=1000, alpha=0.05, backup='',
start_time=0, ma_thr=20, bootstrap_mode=0, fig_file=''):
"""
Align brain state with laser stimulation and calculate two-sided 1-$alpha confidence intervals using
bootstrapping.
:param ppath: base folder
:param recordings: list of recordings
:param pre: time before laser
:param post: time after laser onset
:param edge: add $edge seconds add beginning and end (that are not shown in the plot) to avoid filtering artifacts
:param sf: smoothing factor for Gaussian filter; better do not use
:param nboots: int, how many times the whole data set is resampled for boot-strapping
:param alpha: plot shows 1-$alpha confidence interval
:param backup: optional backup folder where recordings are stored
:param start_time: start time of recordding used for analysis
:param ma_thr: sleep periods < ma_thr are thrown away
:param bootstrap_mode: default=0
bootstrap_mode == 0: Take inter-mouse variance and inter-trial variance (of each mouse) into account.
That is, bootstrapping re-models the variance expected when re-doing the same
experimental design (same mouse number and total trial number).
To account for potentially different number of trials per mouse, resample the data
during each iteration the following way: Assume that there are n laser trials from m mice;
randomly select (with replacment) ntrial mice; then select from each mouse randomly one trial.
bootstrap_mode == 1: Only take inter-trial variance (of each mouse) into account. That is,
bootstrapping models the variance expected when redoing the experiment with exactly the same mice.
:param fig_file, if file name is specified, the figure will be saved
:return: P - p-values for NREM, REM, Wake
Mod - by how much the percentage of NREM, REM, Wake is increased compared to baseline
"""
pre += edge
post += edge
rec_paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
rec_paths[rec] = ppath
else:
rec_paths[rec] = backup
# dict: mouse_id --> laser trials, R W N sequence
BrainstateDict = {}
for rec in recordings:
idf = re.split('_', rec)[0]
BrainstateDict[idf] = []
mice = list(BrainstateDict.keys())
nmice = len(BrainstateDict)
for rec in recordings:
ppath = rec_paths[rec]
SR = get_snr(ppath, rec)
NBIN = np.round(2.5 * SR)
dt = NBIN * 1 / SR
istart_time = int(np.round(start_time / dt))
M = load_stateidx(ppath, rec)[0]
seq = get_sequences(np.where(M == 2)[0])
for s in seq:
if len(s) * dt <= ma_thr:
M[s] = 3
(idxs, idxe) = laser_start_end(load_laser(ppath, rec))
idf = re.split('_', rec)[0]
#SR = get_snr(ppath, rec)
#NBIN = np.round(2.5 * SR)
ipre = int(np.round(pre / dt))
ipost = int(np.round(post / dt))
idxs = [int(i / NBIN) for i in idxs]
idxe = [int(i / NBIN) for i in idxe]
laser_dur = np.mean((np.array(idxe) - np.array(idxs))) * dt
for (i, j) in zip(idxs, idxe):
if i >= ipre and j + ipost <= len(M) - 1 and i > istart_time:
bs = M[i - ipre:i + ipost + 1]
BrainstateDict[idf].append(bs)
for mouse in mice:
BrainstateDict[mouse] = np.array(BrainstateDict[mouse])
# I assume here that every recording has same dt
t = np.linspace(-ipre*dt, ipost*dt, ipre+ipost+1)
Trials = dict()
for mouse in BrainstateDict:
Trials[mouse] = np.zeros((BrainstateDict[mouse].shape[0], len(t), 3))
# total number of trials:
ntrials = 0
for mouse in BrainstateDict:
M = np.array(BrainstateDict[mouse])
for state in range(1, 4):
C = np.zeros(M.shape)
C[np.where(M == state)] = 100.
Trials[mouse][:,:,state-1] = C
ntrials += Trials[mouse].shape[0]
Prob = np.zeros((nboots, len(t), 3))
if bootstrap_mode == 1:
for b in range(nboots):
# average brain state percentage for each mouse during iteration b
mouse_mean_state = np.zeros((nmice, len(t), 3))
i = 0
for mouse in mice:
mmouse = Trials[mouse].shape[0]
iselect = rand.randint(0, mmouse, (mmouse,))
for s in [1,2,3]:
#bBS[s][offset:offset+mmouse,:] = Trials[mouse][iselect,:,s-1]
mouse_mean_state[i,:,s-1] = Trials[mouse][iselect,:,s-1].mean(axis=0)
i += 1
for s in [1,2,3]:
Prob[b,:,s-1] = mouse_mean_state[:,:,s-1].mean(axis=0)
else:
mx_iter = np.zeros((ntrials, len(t), 3))
for b in range(nboots):
# for each iteration select randomly select ntrials mice
irand_mice = rand.randint(0, nmice, ntrials)
# average brain state percentage for each mouse during iteration b
# mouse_mean_state = np.zeros((nmice, len(t), 3))
i = 0
# there are ntrials mice
for j in irand_mice:
mouse = mice[irand_mice[j]]
# we have nmouse trials per mouse
mmouse = Trials[mouse].shape[0]
# select one random trial from the current mouse
iselect = rand.randint(0, mmouse)
for s in [1, 2, 3]:
mx_iter[i,:,s-1] = Trials[mouse][iselect,:,s-1]
i += 1
# mx_iter is the resampled data set for bootstrap iteration b
# now we calculate the statistics we're interesting in, which is the mean
for s in [1, 2, 3]:
Prob[b,:,s-1] = mx_iter[:,:,s-1].mean(axis=0)
# simple average for each brainstate across mice (w/o) bootstrapping
Prob_mean = np.zeros((nmice, len(t), 3))
for s in [1,2,3]:
i = 0
for mouse in mice:
Prob_mean[i,:,s-1] = Trials[mouse][:,:,s-1].mean(axis=0)
i += 1
usProb = Prob.copy()
Prob = np.sort(Prob, axis=0)
Bounds = np.zeros((2, len(t), 3))
a = int((nboots * alpha) / 2.0)
for s in [1,2,3]:
Bounds[0,:,s-1] = Prob[a,:,s-1]
Bounds[1,:,s-1] = Prob[-a,:, s-1]
# smooth_data
if sf > 0:
for s in range(3):
Bounds[0, :, s] = smooth_data(Bounds[0, :, s], sf)
Bounds[1, :, s] = smooth_data(Bounds[1, :, s], sf)
for i in range(nmice):
for s in range(3):
Prob_mean[i, :, s] = smooth_data(Prob_mean[i,:,s], sf)
# plot figure
colors = np.array([[0, 1, 1], [0.5, 0, 1], [0.6, 0.6, 0.6]])
br_states = {1:'REM', 2:'Wake', 3:'NREM'}
#colors = np.array([[55,255,255], [153,255,153],[153,153,153]])/255.
it = np.where((t>=-pre+edge) & (t<=post-edge))[0]
plt.ion()
plt.figure()
ax = plt.axes([0.15, 0.15, 0.6, 0.7])
for s in [3,2,1]:
ax.fill_between(t[it], Bounds[0,it,s-1], Bounds[1,it,s-1], color=colors[s-1,:], alpha=0.8, zorder=3, edgecolor=None)
ax.plot(t[it], Prob_mean[:, it, s-1].mean(axis=0), color=colors[s-1,:], label=br_states[s])
ax.add_patch(patches.Rectangle((0, 0), laser_dur, 100, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
plt.xlim([-pre+edge, post-edge])
plt.ylim([0,100])
plt.xlabel('Time (s)')
plt.ylabel('Brain state (%)')
plt.legend(bbox_to_anchor = (1.0, 0.7, 1., .102), loc = 3, mode = 'expand', ncol = 1, frameon = False)
box_off(ax)
plt.draw()
# statistics
# OLD VERSION
# ibase = np.where((t>=-laser_dur) & (t<0))[0]
# ilsr = np.where((t>=0) & (t<laser_dur))[0]
# P = np.zeros((3,))
# Mod = np.zeros((3,))
# for istate in [1,2,3]:
# basel = usProb[:,ibase,istate-1].mean(axis=1)
# laser = usProb[:,ilsr, istate-1].mean(axis=1)
# d = laser - basel
# if np.mean(d) >= 0:
# # now we want all values be larger than 0
# p = len(np.where(d>0)[0]) / (1.0*nboots)
# sig = 1 - p
# if sig == 0:
# sig = 1.0/nboots
# Mod[istate-1] = (np.mean(laser) / np.mean(basel) - 1) * 100
# else:
# p = len(np.where(d<0)[0]) / (1.0*nboots)
# sig = 1 - p
# if sig == 0:
# sig = 1.0/nboots
# Mod[istate-1] = -(1 - np.mean(laser) / np.mean(basel)) * 100
# P[istate-1] = sig
# NEW VERSION
ibase = np.where((t>=-laser_dur) & (t<0))[0]
ilsr = np.where((t>=0) & (t<laser_dur))[0]
P = np.zeros((3,))
Mod = np.zeros((3,))
for istate in [1,2,3]:
basel = usProb[:,ibase,istate-1].mean(axis=1)
laser = usProb[:,ilsr, istate-1].mean(axis=1)
d = laser - basel
p = 2 * np.min([len(np.where(d > 0)[0]) / nboots, len(np.where(d <= 0)[0]) / nboots])
if np.mean(d) >= 0:
# now we want all values be larger than 0
#p = len(np.where(d>0)[0]) / (1.0*nboots)
sig = p
if sig == 0:
sig = 1.0/nboots
Mod[istate-1] = (np.mean(laser) / np.mean(basel) - 1) * 100
else:
# p = len(np.where(d<0)[0]) / (1.0*nboots)
sig = p
if sig == 0:
sig = 1.0/nboots
Mod[istate-1] = -(1 - np.mean(laser) / np.mean(basel)) * 100
P[istate-1] = sig
labels = {1:'REM', 2:'Wake', 3:'NREM'}
for s in [1,2,3]:
print('%s is changed by %f perc.; P = %f, bootstrap' % (labels[s], Mod[s-1], P[s-1]))
print("n = %d mice" % len(mice))
if len(fig_file) > 0:
plt.savefig(fig_file, bbox_inches="tight")
return P, Mod
def _despine_axes(ax):
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
def sleep_example(ppath, name, tlegend, tstart, tend, fmax=30, fig_file='', vm=[], ma_thr=10,
fontsize=12, cb_ticks=[], emg_ticks=[], r_mu = [10, 100],
fw_color=True, pemg_ampl=False, raw_ex = [], eegemg_legend=[], eegemg_max=[]):
"""
plot sleep example
:param ppath: base folder
:param name: recording name
:param tstart: start (in seconds) of shown example interval
:param tend: end of example interval
:param tlegend: length of time legend
:param fmax: maximum frequency shown for EEG spectrogram
:param fig_file: file name where figure will be saved
:param vm: saturation of EEG spectrogram
:param fontsize: fontsize
:param cb_ticks: ticks for colorbar
:param emg_ticks: ticks for EMG amplitude axis (uV)
:param r_mu: range of frequencies for EMG amplitude
:param fw_color: if True, use standard color scheme for brainstate (gray - NREM, violet - Wake, cyan - REM);
otherwise use Shinjae's color scheme
:param pemg_ampl: if True, plot EMG amplitude, other EMG raw traces
:param raw_ex: list of tuples; e.g. if you wish to show 2 raw examples of length t s at time point i and j s,
set raw_ex = [(i,t), (j,t)].
If raw_ex == [], no raw traces are plotted
The raw examples are labeled by gray rectangles
:param eegemg_legend: list with 2 floats: scale bar (in micro Volts) for EEG and EMG raw example
:param eegemg_max: list of 2 floats, the y range (ylim) for EEG and EMG raw examples (in micro Volts)
goes from -eegemg_max[0] to eegemg_max[0] (for EEG) and
from -eegemg_max[1] to eegemg_max[1] (for EMG)
Example call including EEG/EMG raw traces:
sleepy.sleep_example(ppath, name2, 300, 1000, 4000, raw_ex=[(2140, 5), (3000, 5)],
eegemg_legend=[200, 200], eegemg_max=[200, 200],
fig_file='/Users/tortugar/Desktop/example.png')
"""
set_fontarial()
set_fontsize(fontsize)
# True, if laser exists, otherwise set to False
plaser = True
sr = get_snr(ppath, name)
nbin = np.round(2.5 * sr)
dt = nbin * 1 / sr
ddt = 1.0/sr
istart = int(np.round(tstart/dt))
iend = int(np.round(tend/dt))
dur = (iend-istart+1)*dt
istart_emg = int(istart*nbin)
iend_emg = int((iend+1)*nbin)
M,K = load_stateidx(ppath, name)
#kcut = np.where(K>=0)[0]
#M = M[kcut]
if tend==-1:
iend = len(M)
M = M[istart:iend]
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
t = np.arange(0, len(M))*dt
t_emg = np.arange(0, iend_emg-istart_emg)*ddt
P = so.loadmat(os.path.join(ppath, name, 'sp_%s.mat' % name), squeeze_me=True)
SPEEG = P['SP']
# calculate median for choosing right saturation for heatmap
med = np.median(SPEEG.max(axis=0))
if len(vm) == 0:
vm = [0, med*2.5]
#t = np.squeeze(P['t'])
freq = P['freq']
if pemg_ampl:
P = so.loadmat(os.path.join(ppath, name, 'msp_%s.mat' % name), squeeze_me=True)
SPEMG = P['mSP']#/1000000.0
else:
emg = so.loadmat(os.path.join(ppath, name, 'EMG.mat'), squeeze_me=True)['EMG']
# load laser
if not os.path.isfile(os.path.join(ppath, name, 'laser_%s.mat' % name)):
plaser = False
if plaser:
laser = load_laser(ppath, name)
idxs, idxe = laser_start_end(laser, SR=sr)
idxs = [int(i / nbin) for i in idxs]
idxe = [int(i / nbin) for i in idxe]
# laser
if plaser:
laser_start = []
laser_end = []
for (i,j) in zip(idxs, idxe):
if i>=istart and j <= iend:
laser_start.append(i-istart)
laser_end.append(j-istart)
# create figure
plt.ion()
plt.figure(figsize=(8,4))
# axis in the background to draw laser patches
axes_back = plt.axes([0.1, .4, 0.8, 0.52])
_despine_axes(axes_back)
if plaser:
for (i,j) in zip(laser_start, laser_end):
axes_back.add_patch(patches.Rectangle((i*dt, 0), (j-i+1)*dt, 1, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
axes_back.text(laser_end[0] * dt + dur * 0.01, 0.94, 'Laser', color=[0.6, 0.6, 1])
plt.ylim((0,1))
plt.xlim([t[0], t[-1]])
# show brainstate
axes_brs = plt.axes([0.1, 0.4, 0.8, 0.05])
cmap = plt.cm.jet
if fw_color:
my_map = cmap.from_list('brs', [[0, 0, 0], [0, 1, 1], [0.6, 0, 1], [0.8, 0.8, 0.8]], 4)
else:
my_map = cmap.from_list('brs', [[0, 0, 0], [153 / 255.0, 76 / 255.0, 9 / 255.0],
[120 / 255.0, 120 / 255.0, 120 / 255.0], [1, 0.75, 0]], 4)
tmp = axes_brs.pcolorfast(t, [0, 1], np.array([M]), vmin=0, vmax=3)
tmp.set_cmap(my_map)
axes_brs.axis('tight')
_despine_axes(axes_brs)
axes_legend = plt.axes([0.1, 0.33, 0.8, 0.05])
plt.ylim((0,1.1))
plt.xlim([t[0], t[-1]])
plt.plot([0, tlegend], [1, 1], color='black', lw=1)
plt.text(tlegend/4.0, 0.1, str(tlegend) + ' s')
_despine_axes(axes_legend)
# show spectrogram
ifreq = np.where(freq <= fmax)[0]
# axes for colorbar
axes_cbar = plt.axes([0.82, 0.68, 0.1, 0.2])
# axes for EEG spectrogram
axes_spec = plt.axes([0.1, 0.68, 0.8, 0.2], sharex=axes_brs)
im = axes_spec.pcolorfast(t, freq[ifreq], SPEEG[ifreq, istart:iend], cmap='jet', vmin=vm[0], vmax=vm[1])
axes_spec.axis('tight')
axes_spec.set_xticklabels([])
axes_spec.set_xticks([])
axes_spec.spines["bottom"].set_visible(False)
plt.ylabel('Freq (Hz)')
box_off(axes_spec)
plt.xlim([t[0], t[-1]])
# colorbar for EEG spectrogram
cb = plt.colorbar(im, ax=axes_cbar, pad=0.0, aspect=10.0)
cb.set_label('Power ($\mathrm{\mu}$V$^2$s)')
if len(cb_ticks) > 0:
cb.set_ticks(cb_ticks)
axes_cbar.set_alpha(0.0)
_despine_axes(axes_cbar)
# show EMG
axes_emg = plt.axes([0.1, 0.5, 0.8, 0.1], sharex=axes_spec)
if pemg_ampl:
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
p_mu = np.sqrt(SPEMG[i_mu, :].sum(axis=0) * (freq[1] - freq[0])) #* 1000.0 # back to muV
axes_emg.plot(t, p_mu[istart:iend], color='black')
# * 1000: to go from mV to uV
if len(emg_ticks) > 0:
axes_emg.set_yticks(emg_ticks)
plt.ylabel('Ampl. ' + '$\mathrm{(\mu V)}$')
plt.xlim((t[0], t[-1] + 1))
else:
axes_emg.plot(t_emg, emg[istart_emg:iend_emg], color='black', lw=0.2)
plt.xlim((t_emg[0], t_emg[-1] + 1))
box_off(axes_emg)
axes_emg.patch.set_alpha(0.0)
axes_emg.spines["bottom"].set_visible(False)
# axis for raw data example
if len(raw_ex) > 0:
axes_raw_ex = plt.axes([0.1, .39, 0.8, 0.51])
axes_raw_ex.patch.set_alpha(0)
_despine_axes(axes_raw_ex)
for (ta, tlen) in raw_ex:
ta = ta-tstart
axes_raw_ex.add_patch(patches.Rectangle((ta, 0), tlen, 1, fill=False, edgecolor=[0.4, 0.4, 0.4], lw=0.3))
plt.ylim((0,1))
plt.xlim([t[0], t[-1]])
eeg = so.loadmat(os.path.join(ppath, name, 'EEG.mat'), squeeze_me=True)['EEG']
emg = so.loadmat(os.path.join(ppath, name, 'EMG.mat'), squeeze_me=True)['EMG']
# axes to label EEG EMG
ax_eeg_label = plt.axes([0.04, 0.18, 0.05, 0.1])
ax_eeg_label.set_xlim([0, 1])
ax_eeg_label.set_ylim([0, 1])
ax_eeg_label.text(0, 0.5, 'EEG', verticalalignment='center')
_despine_axes(ax_eeg_label)
ax_emg_label = plt.axes([0.04, 0.05, 0.05, 0.1])
ax_emg_label.set_xlim([0, 1])
ax_emg_label.set_ylim([0, 1])
ax_emg_label.text(0, 0.5, 'EMG', verticalalignment='center')
_despine_axes(ax_emg_label)
# axes for legend
ax_eeg_legend = plt.axes([0.92, 0.05, 0.05, 0.1])
ax_emg_legend = plt.axes([0.92, 0.18, 0.05, 0.1])
ax_eeg_legend.set_xlim([0, 1])
ax_emg_legend.set_xlim([0, 1])
ax_eeg_legend.set_ylim([-eegemg_max[0], eegemg_max[0]])
ax_emg_legend.set_ylim([-eegemg_max[1], eegemg_max[1]])
ax_eeg_legend.plot([0., 0.], [-eegemg_legend[0]/2, eegemg_legend[0]/2], color='black')
ax_emg_legend.plot([0., 0.], [-eegemg_legend[1]/2, eegemg_legend[1]/2], color='black')
ax_eeg_legend.text(0.1, -eegemg_legend[0]/2, str(eegemg_legend[0]/1000) + 'mV', rotation=90, fontsize=8)
ax_emg_legend.text(0.1, -eegemg_legend[1]/2, str(eegemg_legend[1]/1000) + 'mV', rotation=90, fontsize=8)
_despine_axes(ax_eeg_legend)
_despine_axes(ax_emg_legend)
nraw_ex = len(raw_ex)
raw_axes_eeg = []
raw_axes_emg = []
len_x = 0.8/nraw_ex-0.02
start_x = np.linspace(0.1+len_x, 0.9, nraw_ex) - len_x
for i in range(nraw_ex):
a = plt.axes([start_x[i], 0.05, (0.8/nraw_ex)-0.02, .1])
raw_axes_emg.append(a)
a = plt.axes([start_x[i], 0.18, (0.8/nraw_ex)-0.02, .1])
raw_axes_eeg.append(a)
for (ax, i) in zip(raw_axes_eeg, range(len(raw_ex))):
ta, tlen = raw_ex[i]
idx = range(int(ta*sr), int(ta*sr+tlen*sr))
t_eeg = np.arange(0, len(idx))*ddt
ax.plot(t_eeg, eeg[idx], color='k', lw=0.5)
ax.set_xlim([t_eeg[0], t_eeg[-1]])
_despine_axes(ax)
ax.set_ylim([-eegemg_max[0], eegemg_max[0]])
for (ax, i) in zip(raw_axes_emg, range(len(raw_ex))):
ta, tlen = raw_ex[i]
idx = range(int(ta*sr), int(ta*sr+tlen*sr))
t_eeg = np.arange(0, len(idx))*ddt
ax.plot(t_eeg, emg[idx], color='k', lw=0.5)
ax.set_xlim([t_eeg[0], t_eeg[-1]])
_despine_axes(ax)
ax.set_ylim([-eegemg_max[1], eegemg_max[1]])
axes_raw_time = plt.axes([0.1, 0.03, len_x, 0.02])
plt.plot([0, tlen/10], [0,0], color='black', lw=0.8)
plt.ylim([-1,1])
plt.xlim([t_eeg[0], t_eeg[-1]])
_despine_axes(axes_raw_time)
if len(fig_file) > 0:
save_figure(fig_file)
plt.show()
def sleep_stats(ppath, recordings, ma_thr=10.0, tstart=0, tend=-1, pplot=True, csv_file=''):
"""
Calculate average percentage of each brain state,
average duration and average frequency
plot histograms for REM, NREM, and Wake durations
@PARAMETERS:
ppath - base folder
recordings - single string specifying recording or list of recordings
@OPTIONAL:
ma_thr - threshold for wake periods to be considered as microarousals
tstart - only consider recorded data starting from time tstart, default 0s
tend - only consider data recorded up to tend s, default -1, i.e. everything till the end
pplot - generate plot in the end; True or False
csv_file - file where data should be saved as csv file (e.g. csv_file = '/home/Users/Franz/Documents/my_data.csv')
@RETURN:
ndarray of percentages (# mice x [REM,Wake,NREM])
ndarray of state durations
ndarray of transition frequency / hour
"""
if type(recordings) != list:
recordings = [recordings]
Percentage = {}
Duration = {}
Frequency = {}
mice = []
for rec in recordings:
idf = re.split('_', os.path.split(rec)[-1])[0]
if not idf in mice:
mice.append(idf)
Percentage[idf] = {1:[], 2:[], 3:[]}
Duration[idf] = {1:[], 2:[], 3:[]}
Frequency[idf] = {1:[], 2:[], 3:[]}
nmice = len(Frequency)
for rec in recordings:
idf = re.split('_', os.path.split(rec)[-1])[0]
SR = get_snr(ppath, rec)
NBIN = np.round(2.5*SR)
dt = NBIN * 1/SR
# load brain state
M, K = load_stateidx(ppath, rec)
kcut = np.where(K >= 0)[0]
M = M[kcut]
istart = int(np.round((1.0 * tstart) / dt))
if tend==-1:
iend = len(M)-1
else:
iend = int(np.round((1.0*tend) / dt))
M[np.where(M==5)] = 2
# polish out microarousals
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if np.round(len(s)*dt) <= ma_thr:
M[s] = 3
midx = np.arange(istart,iend+1)
Mcut = M[midx]
nm = len(Mcut)*1.0
# get percentage of each state
for s in [1,2,3]:
Percentage[idf][s].append(len(np.where(Mcut==s)[0]) / nm)
# get frequency of each state
for s in [1,2,3]:
Frequency[idf][s].append( len(get_sequences(np.where(Mcut==s)[0])) * (3600. / (nm*dt)) )
# get average duration for each state
for s in [1,2,3]:
seq = get_sequences(np.where(Mcut==s)[0])
Duration[idf][s] += [len(i)*dt for i in seq]
PercMx = np.zeros((nmice,3))
i=0
for k in mice:
for s in [1,2,3]:
PercMx[i,s-1] = np.array(Percentage[k][s]).mean()
i += 1
PercMx *= 100
FreqMx = np.zeros((nmice,3))
i = 0
for k in mice:
for s in [1,2,3]:
FreqMx[i,s-1] = np.array(Frequency[k][s]).mean()
i += 1
DurMx = np.zeros((nmice,3))
i = 0
for k in mice:
for s in [1,2,3]:
DurMx[i,s-1] = np.array(Duration[k][s]).mean()
i += 1
DurHist = {1:[], 2:[], 3:[]}
for s in [1,2,3]:
DurHist[s] = np.squeeze(np.array(reduce(lambda x,y: x+y, [Duration[k][s] for k in Duration])))
if pplot:
clrs = sns.color_palette("husl", nmice)
plt.ion()
# plot bars summarizing results - Figure 1
plt.figure(figsize=(10, 5))
ax = plt.axes([0.1, 0.15, 0.2, 0.8])
plt.bar([1,2,3], PercMx.mean(axis=0), align='center', color='gray', fill=False)
plt.xticks([1,2,3], ['REM', 'Wake', 'NREM'], rotation=60)
for i in range(nmice):
plt.plot([1,2,3], PercMx[i,:], 'o', label=mice[i], color=clrs[i])
plt.ylabel('Percentage (%)')
plt.legend(fontsize=9)
plt.xlim([0.2, 3.8])
box_off(ax)
ax = plt.axes([0.4, 0.15, 0.2, 0.8])
plt.bar([1,2,3], DurMx.mean(axis=0), align='center', color='gray', fill=False)
plt.xticks([1,2,3], ['REM', 'Wake', 'NREM'], rotation=60)
for i in range(nmice):
plt.plot([1, 2, 3], DurMx[i, :], 'o', label=mice[i], color=clrs[i])
plt.ylabel('Duration (s)')
plt.xlim([0.2, 3.8])
box_off(ax)
ax = plt.axes([0.7, 0.15, 0.2, 0.8])
plt.bar([1,2,3], FreqMx.mean(axis=0), align='center', color='gray', fill=False)
plt.xticks([1,2,3], ['REM', 'Wake', 'NREM'], rotation=60)
for i in range(nmice):
plt.plot([1, 2, 3], FreqMx[i, :], 'o', label=mice[i], color=clrs[i])
plt.ylabel('Frequency (1/h)')
plt.xlim([0.2, 3.8])
box_off(ax)
plt.show(block=False)
# plot histograms - Figure 2
plt.figure(figsize=(5, 10))
ax = plt.axes([0.2,0.1, 0.7, 0.2])
h, edges = np.histogram(DurHist[1], bins=40, range=(0, 300), density=True)
binWidth = edges[1] - edges[0]
plt.bar(edges[0:-1], h*binWidth, width=5)
plt.xlim((edges[0], edges[-1]))
plt.xlabel('Duration (s)')
plt.ylabel('Freq. REM')
box_off(ax)
ax = plt.axes([0.2,0.4, 0.7, 0.2])
h, edges = np.histogram(DurHist[2], bins=40, range=(0, 1200), density=True)
binWidth = edges[1] - edges[0]
plt.bar(edges[0:-1], h*binWidth, width=20)
plt.xlim((edges[0], edges[-1]))
plt.xlabel('Duration (s)')
plt.ylabel('Freq. Wake')
box_off(ax)
ax = plt.axes([0.2,0.7, 0.7, 0.2])
h, edges = np.histogram(DurHist[3], bins=40, range=(0, 1200), density=True)
binWidth = edges[1] - edges[0]
plt.bar(edges[0:-1], h*binWidth, width=20)
plt.xlim((edges[0], edges[-1]))
plt.xlabel('Duration (s)')
plt.ylabel('Freq. NREM')
box_off(ax)
plt.show()
mouse_list = [[m]*3 for m in mice]
mouse_list = sum(mouse_list, [])
state_list = ['REM', 'Wake', 'NREM']*nmice
df = pd.DataFrame({'mouse':mouse_list, 'state':state_list, 'Perc':PercMx.flatten(), 'Dur':DurMx.flatten(), 'Freq':FreqMx.flatten()})
if len(csv_file) > 0:
df.to_csv(csv_file, index=False)
return PercMx, DurMx, FreqMx, df
def sleep_timecourse_list(ppath, recordings, tbin, n, tstart=0, tend=-1, ma_thr=-1, pplot=True, single_mode=False, csv_file=''):
"""
simplified version of sleep_timecourse
plot sleep timecourse for a list of recordings
The function does not distinguish between control and experimental mice.
It computes/plots the how the percentage, frequency (1/h) of brain states and duration
of brain state episodes evolves over time.
See also sleep_timecourse
@Parameters:
ppath Base folder with recordings
recordings list of recordings as e.g. generated by &load_recordings
tbin duration of single time bin in seconds
n number of time bins
@Optional:
tstart start time of first bin in seconds
tend end time of last bin; end of recording if tend==-1
ma_thr set microarousals (wake periods <= ma_thr seconds) to NREM
if ma_thr==-1, don't do anything
pplot plot figures summarizing results
single_mode if True, plot each single mouse
csv_file string, if non-empty, write data into file $ppath/$csv_file .csv
to load csv file: data = pd.read_csv($csv_file.csv, header=[0,1,2])
@Return:
TimeMx, DurMx, FreqMx, df Dict[state][time_bin x mouse_id]
df is a pandas DataFrame of the format
Perc DUR
REM Wake NREM REM Wake etc.
bin1 ... binn bin1 ... binn bin1 ... bin2 bin1 ... bin2 etc.
mouse1
.
.
.
mousen
"""
if type(recordings) != list:
recordings = [recordings]
Mice = {}
mouse_order = []
for rec in recordings:
idf = re.split('_', os.path.split(rec)[-1])[0]
if not idf in mouse_order:
mouse_order.append(idf)
Mice[idf] = 1
Mice = list(Mice.keys())
TimeCourse = {}
FreqCourse = {}
DurCourse = {}
for rec in recordings:
idf = re.split('_', rec)[0]
SR = get_snr(ppath, rec)
NBIN = np.round(2.5*SR)
# time bin in Fourier time
dt = NBIN * 1/SR
M,K = load_stateidx(ppath, rec)
kcut = np.where(K>=0)[0]
#kidx = np.setdiff1d(np.arange(0, M.shape[0]), kcut)
M = M[kcut]
M[np.where(M)==5] = 2
# polish out microarousals
if ma_thr>0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
if tend==-1:
iend = len(M)-1
else:
iend = int(np.round((1.0*tend) / dt))
M = M[0:iend+1]
istart = int(np.round((1.0*tstart) / dt))
ibin = int(np.round(tbin / dt))
# how brain state percentage changes over time
perc_time = []
for i in range(n):
#midx = np.arange(istart+i*ibin, istart+(i+1)*ibin)
#midx = np.setdiff1d(midx, kcut)
#M_cut = M[np.arange(istart+i*ibin, istart+(i+1)*ibin)]
M_cut = M[(istart+i*ibin):(istart+(i+1)*ibin)]
perc = []
for s in [1,2,3]:
perc.append( len(np.where(M_cut==s)[0]) / (1.0*len(M_cut)) )
perc_time.append(perc)
perc_vec = np.zeros((n,3))
for i in range(3):
perc_vec[:,i] = np.array([v[i] for v in perc_time])
TimeCourse[rec] = perc_vec
# how frequency of sleep stage changes over time
freq_time = []
for i in range(n):
#midx = np.arange(istart+i*ibin, istart+(i+1)*ibin)
#midx = np.setdiff1d(midx, kcut)
#M_cut = M[np.arange(istart+i*ibin, istart+(i+1)*ibin)]
M_cut = M[(istart+i*ibin):(istart+(i+1)*ibin)]
freq = []
for s in [1,2,3]:
tmp = len(get_sequences(np.where(M_cut==s)[0])) * (3600. / (len(M_cut)*dt))
freq.append(tmp)
freq_time.append(freq)
freq_vec = np.zeros((n,3))
for i in range(3):
freq_vec[:,i] = np.array([v[i] for v in freq_time])
FreqCourse[rec] = freq_vec
# how duration of sleep stage changes over time
dur_time = []
for i in range(n):
#midx = np.arange(istart+i*ibin, istart+(i+1)*ibin)
#midx = np.setdiff1d(midx, kcut)
M_cut = M[(istart+i*ibin):(istart+(i+1)*ibin)]
dur = []
for s in [1,2,3]:
tmp = get_sequences(np.where(M_cut==s)[0])
tmp = np.array([len(j)*dt for j in tmp]).mean()
dur.append(tmp)
dur_time.append(dur)
dur_vec = np.zeros((n,3))
for i in range(3):
dur_vec[:,i] = np.array([v[i] for v in dur_time])
DurCourse[rec] = dur_vec
# collect all recordings belonging to a Control mouse
TimeCourseMouse = {}
DurCourseMouse = {}
FreqCourseMouse = {}
# Dict[mouse_id][time_bin x br_state]
for mouse in Mice:
TimeCourseMouse[mouse] = []
DurCourseMouse[mouse] = []
FreqCourseMouse[mouse] = []
for rec in recordings:
idf = re.split('_', rec)[0]
TimeCourseMouse[idf].append(TimeCourse[rec])
DurCourseMouse[idf].append(DurCourse[rec])
FreqCourseMouse[idf].append(FreqCourse[rec])
mx = np.zeros((n, len(Mice)))
TimeMx = {1:mx, 2:mx.copy(), 3:mx.copy()}
mx = np.zeros((n, len(Mice)))
DurMx = {1:mx, 2:mx.copy(), 3:mx.copy()}
mx = np.zeros((n, len(Mice)))
FreqMx = {1:mx, 2:mx.copy(), 3:mx.copy()}
# Dict[R|W|N][time_bin x mouse_id]
i = 0
for k in mouse_order:
for s in range(1,4):
tmp = np.array(TimeCourseMouse[k]).mean(axis=0)
TimeMx[s][:,i] = tmp[:,s-1]
tmp = np.array(DurCourseMouse[k]).mean(axis=0)
DurMx[s][:,i] = tmp[:,s-1]
tmp = np.array(FreqCourseMouse[k]).mean(axis=0)
FreqMx[s][:,i] = tmp[:,s-1]
i += 1
if pplot:
clrs = sns.color_palette("husl", len(mouse_order))
label = {1:'REM', 2:'Wake', 3:'NREM'}
tlabel = np.linspace(istart*dt, istart*dt+n*ibin*dt, n+1)
t = np.linspace(istart*dt, istart*dt+n*ibin*dt, n+1)[0:-1] + (ibin*dt/2.0)
t /= 3600.0
tlabel /= 3600.0
# plot percentage of brain state as function of time
plt.ion()
plt.figure()
for s in range(1,4):
ax = plt.axes([0.1, (s-1)*0.3+0.1, 0.8, 0.2])
if not single_mode:
plt.errorbar(t, np.nanmean(TimeMx[s],axis=1), yerr = np.nanstd(TimeMx[s],axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
else:
for i in range(len(mouse_order)):
plt.plot(t, TimeMx[s][:,i], 'o-', linewidth=1.5, color=clrs[i])
if s==3:
ax.legend(mouse_order, bbox_to_anchor = (0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order), frameon=False)
box_off(ax)
plt.xlim((-0.5, n-0.5))
if s==1:
#plt.ylim([0, 0.2])
pass
else:
plt.ylim([0, 1.0])
plt.ylabel('Perc ' + label[s] + '(%)')
plt.xticks(tlabel)
plt.xlim([tlabel[0], tlabel[-1]])
if s == 1:
plt.xlabel('Time (h)')
plt.draw()
# plot duration as function of time
plt.figure()
for s in range(1,4):
ax = plt.axes([0.1, (s-1)*0.3+0.1, 0.8, 0.2])
if not single_mode:
plt.errorbar(t, np.nanmean(DurMx[s],axis=1), yerr = np.nanstd(DurMx[s],axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
else:
for i in range(len(mouse_order)):
plt.plot(t, DurMx[s][:,i], 'o-', linewidth=1.5, color=clrs[i])
if s==3:
ax.legend(mouse_order, bbox_to_anchor = (0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order), frameon=False)
box_off(ax)
plt.xlim((-0.5, n-0.5))
plt.ylabel('Dur ' + label[s] + '(s)')
plt.xticks(tlabel)
plt.xlim([tlabel[0], tlabel[-1]])
if s==1:
plt.xlabel('Time (h)')
plt.draw()
# plot frequency as function of time
plt.figure()
for s in range(1,4):
ax = plt.axes([0.1, (s-1)*0.3+0.1, 0.8, 0.2])
if not single_mode:
plt.errorbar(t, np.nanmean(FreqMx[s],axis=1), yerr = np.nanstd(FreqMx[s],axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
else:
for i in range(len(mouse_order)):
plt.plot(t, FreqMx[s][:,i], 'o-', linewidth=1.5, color=clrs[i])
if s==3:
ax.legend(mouse_order, bbox_to_anchor = (0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order), frameon=False)
box_off(ax)
plt.xlim((-0.5, n-0.5))
plt.ylabel('Freq ' + label[s] + '(1/h)')
plt.xticks(tlabel)
plt.xlim([tlabel[0], tlabel[-1]])
if s==1:
plt.xlabel('Time (h)')
plt.draw()
# write data into dataframe and csv file
bins = ['bin' + str(i+1) for i in range(n)]
columns = pd.MultiIndex.from_product([['Perc', 'Dur', 'Freq'], ['REM', 'Wake', 'NREM'], bins],
names=['stats', 'state', 'bin'])
D = np.concatenate((TimeMx[1].T, TimeMx[2].T, TimeMx[3].T, DurMx[1].T, DurMx[2].T, DurMx[3].T, FreqMx[1].T, FreqMx[2].T, FreqMx[3].T), axis=1)
df = pd.DataFrame(D, index=mouse_order, columns=columns)
if len(csv_file) > 0:
df.to_csv(csv_file, index=False)
return TimeMx, DurMx, FreqMx, df
def ma_timecourse_list(ppath, recordings, tbin, n, tstart=0, tend=-1, ma_thr=20, pplot=True, single_mode=False, csv_file=''):
"""
Calculate percentage, duration, and frequency of microarousals
:param ppath: base folder
:param recordings: single recording or list of recordings
:param tbin: time bin in seconds
:param n: number of time bins
:param tstart: start time in recording(s) for analysi
:param tend: end time for analysis
:param ma_thr: microarousal threshold; any wake period shorter than $ma_thr will be considered as microarousal
:param pplot: if True, plot figure
:param single_mode: if True, plot each single mouse with different color
:param csv_file: string, if non-empty, write data into file "csv_file";
file name should end with ".csv"
:return: TimeMX, DurMX, FreqMx, df - np.array(# time bins x mice)
TimeMX, DurMX, FreqMx: arrays with shape "time bins x mice"
df: DataFrame with columns: mouse, perc, dur, freq, bin
For example, to get the first time bins of perc, dur, freq of mouse M1 type
df[(df.mouse == 'M1') & (df.bin == 't0')]
"""
if type(recordings) != list:
recordings = [recordings]
mouse_order = []
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mouse_order:
mouse_order.append(idf)
TimeCourse = {}
FreqCourse = {}
DurCourse = {}
for rec in recordings:
SR = get_snr(ppath, rec)
NBIN = np.round(2.5 * SR)
# time bin in Fourier time
dt = NBIN * 1 / SR
M, K = load_stateidx(ppath, rec)
kcut = np.where(K >= 0)[0]
# kidx = np.setdiff1d(np.arange(0, M.shape[0]), kcut)
M = M[kcut]
Mnew = np.zeros(M.shape)
Mnew[np.where(M) == 5] = 1
# polish out microarousals
if ma_thr > 0:
seq = get_sequences(np.where(M == 2)[0])
for s in seq:
if len(s) * dt <= ma_thr:
Mnew[s] = 1
M = Mnew
if tend == -1:
iend = len(M) - 1
else:
iend = int(np.round((1.0 * tend) / dt))
M = M[0:iend + 1]
istart = int(np.round((1.0 * tstart) / dt))
ibin = int(np.round(tbin / dt))
# how brain state percentage changes over time
perc_time = []
for i in range(n):
midx = np.arange(istart + i * ibin, istart + (i + 1) * ibin)
# midx = np.setdiff1d(midx, kcut)
M_cut = M[midx]
perc = len(np.where(M_cut == 1)[0]) / (1.0 * len(M_cut))
perc_time.append(perc)
TimeCourse[rec] = np.array(perc_time)
# how frequency of sleep stage changes over time
freq_time = []
for i in range(n):
midx = np.arange(istart + i * ibin, istart + (i + 1) * ibin)
# midx = np.setdiff1d(midx, kcut)
M_cut = M[midx]
s = 1
freq = len(get_sequences(np.where(M_cut == s)[0])) * (3600. / (len(M_cut) * dt))
freq_time.append(freq)
FreqCourse[rec] = np.array(freq_time)
# how duration of microarousals changes over time
dur_time = []
for i in range(n):
midx = np.arange(istart + i * ibin, istart + (i + 1) * ibin)
# midx = np.setdiff1d(midx, kcut)
M_cut = M[midx]
s = 1
tmp = get_sequences(np.where(M_cut == s)[0])
dur = np.array([len(j) * dt for j in tmp]).mean()
dur_time.append(dur)
DurCourse[rec] = np.array(dur_time)
# collect all recordings belonging to a Control mouse
TimeCourseMouse = {}
DurCourseMouse = {}
FreqCourseMouse = {}
# Dict[mouse_id][time_bin x br_state]
for mouse in mouse_order:
TimeCourseMouse[mouse] = []
DurCourseMouse[mouse] = []
FreqCourseMouse[mouse] = []
for rec in recordings:
idf = re.split('_', rec)[0]
TimeCourseMouse[idf].append(TimeCourse[rec])
DurCourseMouse[idf].append(DurCourse[rec])
FreqCourseMouse[idf].append(FreqCourse[rec])
# np.array(time x mouse_id)
TimeMx = np.zeros((n, len(mouse_order)))
DurMx = np.zeros((n, len(mouse_order)))
FreqMx = np.zeros((n, len(mouse_order)))
i = 0
for k in mouse_order:
tmp = np.array(TimeCourseMouse[k]).mean(axis=0)
TimeMx[:,i] = tmp
tmp = np.array(DurCourseMouse[k]).mean(axis=0)
DurMx[:,i] = tmp
tmp = np.array(FreqCourseMouse[k]).mean(axis=0)
FreqMx[:,i] = tmp
i += 1
# plotting
if pplot:
plot_dict = {0:TimeMx, 1:DurMx, 2:FreqMx}
clrs = sns.color_palette("husl", len(mouse_order))
ylabel = {0:'Perc (%)', 1:'Dur (s)', 2:'Freq ($h^{-1}$)'}
tlabel = np.linspace(istart*dt, istart*dt+n*ibin*dt, n+1)
t = np.linspace(istart*dt, istart*dt+n*ibin*dt, n+1)[0:-1] + (ibin*dt/2.0)
t /= 3600.0
tlabel /= 3600.0
# plot percentage of brain state as function of time
plt.ion()
plt.figure()
for s in range(0, 3):
ax = plt.axes([0.15, s*0.3+0.1, 0.8, 0.2])
if not single_mode:
plt.errorbar(t, np.nanmean(plot_dict[s],axis=1), yerr = np.nanstd(plot_dict[s],axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
else:
for i in range(len(mouse_order)):
plt.plot(t, plot_dict[s][:,i], 'o-', linewidth=1.5, color=clrs[i])
if s==2:
ax.legend(mouse_order, bbox_to_anchor = (0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order), frameon=False)
box_off(ax)
plt.xlim((-0.5, n-0.5))
plt.ylabel(ylabel[s])
plt.xticks(tlabel)
plt.xlim([tlabel[0], tlabel[-1]])
if s == 0:
plt.xlabel('Time (h)')
plt.draw()
bins = [['t' + str(i)]*len(mouse_order) for i in range(n)]
bins = sum(bins, [])
cols = ['mouse', 'perc', 'dur', 'freq', 'bin']
mice = mouse_order*n
df = pd.DataFrame(columns=cols)
df['mouse'] = mice
df['bin'] = bins
df['perc'] = TimeMx.flatten()
df['dur'] = DurMx.flatten()
df['freq'] = FreqMx.flatten()
if len(csv_file) > 0:
df.to_csv(csv_file, index=False)
return TimeMx, DurMx, FreqMx, df
def transition_timecourse_list(ppath, recordings, tbin, n, tdown=10, tstart=0, tend=-1, ma_thr=-1, pplot=True, single_mode=False, csv_file=''):
if type(recordings) != list:
recordings = [recordings]
mouse_order = []
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mouse_order:
mouse_order.append(idf)
Recordings = {idf:[] for idf in mouse_order}
SR = get_snr(ppath, recordings[0])
NBIN = np.round(2.5 * SR)
# time bin in Fourier time
dt = NBIN * 1 / SR
istart = int(np.round((1.0 * tstart) / dt))
ibin = int(np.round(tbin / dt))
idown = int(tdown/dt)
for rec in recordings:
idf = re.split('_', rec)[0]
M = load_stateidx(ppath, rec)[0]
# polish out microarousals
if ma_thr > 0:
seq = get_sequences(np.where(M == 2)[0])
for s in seq:
if len(s) * dt <= ma_thr:
M[s] = 1
if tend == -1:
iend = len(M) - 1
else:
iend = int(np.round((1.0 * tend) / dt))
M = M[0:iend + 1]
# how brain state percentage changes over time
Recordings[idf].append(M)
MX = {idf:[] for idf in mouse_order}
for i in range(n):
for idf in mouse_order:
recs = Recordings[idf]
midx = np.arange(istart + i * ibin, istart + (i + 1) * ibin)
recs = [downsample_states(rec[midx], idown) for rec in recs]
recs = np.array(recs, dtype='int')
#recs = downsample_states(recs, idown)
pmx = complete_transition_matrix(recs, np.array(range(recs.shape[1])))
MX[idf].append(pmx)
#transform MX to a DataFrame
trans_map = {'11':'RR', '12':'RW', '13':'RN',
'21':'WR', '22':'WW', '23':'WN',
'31':'NR', '32':'NW', '33':'NN'}
data = []
for i in range(n):
for si in [1,2,3]:
for sj in [1,2,3]:
for idf in mouse_order:
trans = trans_map[str(si)+str(sj)]
data += [[idf, 't'+str(i), MX[idf][i][si-1, sj-1], trans]]
df = pd.DataFrame(data=data, columns=['mouse', 'time', 'prob', 'trans'])
return df
def sleep_through_days(ppath, recordings, tstart=0, tend=-1, stats=0, xticks=[], ma_thr=20, min_dur=[0,0,0], single_mode=True, csv_file = ''):
"""
Follow sleep quantity (percentage, bout duration, or frequency / hour) over multiple days
:param ppath: base folder
:param recordings: list of lists of recordings, for example [[F1_010118n1, F2_010118n1], [F1_010218n1, F1_010218n1]]
specifies the recordings of F1 and F2 for two days
:param tstart: float, quantificiation of sleep starts at $start s
:param tend: float, quantification of sleep ends at $tend s
:param stats: Measured sleep variable (statistics):
0 - percentage, 1 - episode duration, 2 - episode frequency, 3 - latency to first state occurance REM, Wake, and NREM
:param xticks: list of string, specifying the xticks
:param ma_thr: float, wake periods shorter than $ma_thr are considered as microarousals and further converted to NREM
:param min_dur: list with 3 floats, specifying the minimum duration of the first REM, Wake, and NREM period,
only relevant if $stats == 3
:param single_mode: if True, plot each single mouse in different color
:param csv_file: string, save pd.DataFrame to file; the actual file name will be "$csv_file + stats + $stats .csv" and
will be save in the folder $ppath
:return: np.array, mice x [REM,Wake,NREM] x days AND pd.DataFrame
the pd.DataFrame has the following format:
state REM Wake NREM
day Day1 ... Dayn Day1 ... Dayn Day1 ... Dayn
mouse1
.
.
.
mousen
"""
states = {1:'REM', 2:'Wake', 3:'NREM'}
stats_label = {0:'(%)', 1:'Dur (s)', 2: 'Freq. (1/h)', 3: 'Lat. (min)'}
mice_per_day = {}
iday = 0
for day in recordings:
mice = []
for rec in day:
idf = re.split('_', os.path.split(rec)[-1])[0]
if not idf in mice:
mice.append(idf)
mice_per_day[iday] = mice
iday += 1
ndays = len(mice_per_day)
nmice = len(mice_per_day[0])
for i in range(ndays):
for j in range(i+1, ndays):
if mice_per_day[i] != mice_per_day[j]:
print("ERROR: mice on day %d and %d not consistent" % (i+1, j+1))
return
#DayResults: mice x [R|W|N] x days
DayResults = np.zeros((nmice, 3, ndays))
for day in range(ndays):
if stats<=2:
res = sleep_stats(ppath, recordings[day], tstart=tstart, tend=tend, pplot=False, ma_thr=ma_thr)[stats]
else:
res = np.zeros((nmice, 3))
for s in range(1,4):
res[:,s-1] = state_onset(ppath, recordings[day], s, min_dur=min_dur[s-1], tstart=tstart, tend=tend, pplot=False)
DayResults[:,:,day] = res
plt.ion()
clrs = sns.color_palette("husl", nmice)
plt.figure(figsize=(10,6))
for s in range(1, 4):
ax = plt.axes([0.1, (s - 1) * 0.3 + 0.1, 0.8, 0.2])
if single_mode:
for i in range(nmice):
plt.plot(list(range(1,ndays+1)), DayResults[i,s-1,:], 'o-', color=clrs[i], label=mice[i])
else:
plt.errorbar(list(range(1, ndays+1)), DayResults[:, s-1, :].mean(axis=0), yerr=DayResults[:, s-1, :].std(axis=0),
color='gray', label='avg', linewidth=2)
if s == 1:
if len(xticks) == 0:
plt.xticks(list(range(1,ndays+1)))
else:
plt.xticks(list(range(1, ndays + 1), xticks))
else:
plt.xticks(list(range(1, ndays + 1)))
ax.set_xticklabels([])
if s == 3:
ax.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=nmice,
frameon=False)
box_off(ax)
if s == 1:
plt.xlabel('Day')
plt.ylabel(states[s] + ' ' + stats_label[stats])
if len(xticks) > 0:
col = xticks
else:
col = ['Day' + str(i+1) for i in range(ndays)]
# put data into pandas dataframe
columns = pd.MultiIndex.from_product([['REM', 'Wake', 'NREM'], col], names=['state', 'day'])
D = np.concatenate((DayResults[:,0,:], DayResults[:,1,:], DayResults[:,2,:]), axis=1)
df = pd.DataFrame(D, index=mice, columns=columns)
if len(csv_file) > 0:
csv_file += '_stats' + str(stats) + '.csv'
df.to_csv(os.path.join(ppath, csv_file), index=False)
return DayResults, df
def sleep_timecourse(ppath, trace_file, tbin, n, tstart=0, tend=-1, pplot=True, stats='perc', csv_file='', ma_thr=0):
"""
plot how percentage of REM,Wake,NREM changes over time;
compares control with experimental data; experimental recordings can have different "doses"
a simpler version is sleep_timecourse_list
@Parameters
trace_file - text file, specifies control and experimental recordings,
the syntax for the file is the same as required for load_dose_recording
Example: with one control and two experimental groups (1 or 2 third column)
# Comments
#Mouse Recording dose
C B1_01012020n1
C B2_01012020n1
# no dose value for controls
E B1_01022020n1 1
E B2_01022020n1 1
E B1_01032020n1 2
E B2_01032020n1 2
tbin - size of time bin in seconds
n - number of time bins
@Optional:
tstart - beginning of recording (time <tstart is thrown away)
tend - end of recording (time >tend is thrown away)
pplot - plot figure if True
stats - statistics;
stats = 'perc': compute percentage of each brain state in each time bin;
stats = 'freq': compute frequency of each state for each time bin
stats = 'dur': compute average duration of each state sequence
for each time bin
@Return:
TimeMxCtr - Dict[R|W|N][time_bin x mouse_id]
TimeMxExp - Dict[R|W|N][dose][time_bin x mouse_id]
df - pandas.DataFrame with columns ['mouse', 'dose', 'state', 'time', $stats]
How to run 2way anova with repeated measures?
to determine with effects for different doses on REM:
# extract all REM values from DataFrame
df_rem = df[df.state == 'REM']
the within factors are 'time' and 'dose'; the dependent variable is 'perc'
using pingouin the anova can be calculated using
pg.rm_anova(data=df_rem, dv='perc', within=['time', 'dose'], subject='mouse', correction=True)
"""
(ctr_rec, exp_rec) = load_dose_recordings(ppath, trace_file)
Recordings = []
Recordings += ctr_rec
for k in exp_rec.keys():
Recordings += exp_rec[k]
CMice = []
for mouse in ctr_rec:
idf = re.split('_', mouse)[0]
if not idf in CMice:
CMice.append(idf)
EMice = {}
for d in exp_rec:
mice = exp_rec[d]
EMice[d] = []
for mouse in mice:
idf = re.split('_', mouse)[0]
if not idf in EMice[d]:
EMice[d].append(idf)
TimeCourse = {}
for rec in Recordings:
idf = re.split('_', rec)[0]
SR = get_snr(ppath, rec)
NBIN = np.round(2.5*SR)
# time bin in Fourier time
dt = NBIN * 1/SR
M = load_stateidx(ppath, rec)[0]
M[np.where(M)==5] = 2
if ma_thr > 0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
if tend==-1:
iend = len(M)-1
else:
iend = int(np.round((1.0*tend) / dt))
M = M[0:iend+1]
istart = int(np.round((1.0*tstart) / dt))
ibin = int(np.round(tbin / dt))
perc_time = []
for i in range(n):
# return something even if istart+i+1)*ibin >= len(M)
M_cut = M[istart+i*ibin:istart+(i+1)*ibin]
#midx = np.arange(istart + i * ibin, istart + (i + 1) * ibin)
perc = []
for s in [1,2,3]:
if stats == 'perc':
perc.append( 100 * len(np.where(M_cut==s)[0]) / (1.0*len(M_cut)) )
elif stats == 'freq':
tmp = len(get_sequences(np.where(M_cut==s)[0])) * (3600. / (len(M_cut)*dt))
perc.append(tmp)
else:
tmp = get_sequences(np.where(M_cut==s)[0])
tmp = np.array([len(j)*dt for j in tmp]).mean()
perc.append(tmp)
perc_time.append(perc)
# number of time bins x [REM|Wake|NREM]
perc_vec = np.zeros((n,3))
for i in range(3):
# for each time bin we have a list of 3 elements for each state.
# take from each of these triplets the i-th state, forming a column vector
perc_vec[:,i] = np.array([v[i] for v in perc_time])
TimeCourse[rec] = perc_vec
# define data frame containing all data
#bins = ['t' + str(i) for i in range(n)]
cols = ['mouse', 'dose', 'state', 'time', stats]
df = pd.DataFrame(columns=cols)
state_map = {1: 'REM', 2:'Wake', 3:'NREM'}
# collect all recordings belonging to a Control mouse
TimeCourseCtr = {}
# Dict[mouse_id][time_bin x br_state]
for mouse in CMice:
TimeCourseCtr[mouse] = []
for rec in Recordings:
idf = re.split('_', rec)[0]
if rec in ctr_rec:
TimeCourseCtr[idf].append(TimeCourse[rec])
mx = np.zeros((n, len(CMice)))
TimeMxCtr = {1:mx, 2:mx.copy(), 3:mx.copy()}
# Dict[R|W|N][time_bin x mouse_id]
i = 0
for k in TimeCourseCtr:
for s in range(1,4):
# [time_bin x br_state]
tmp = np.array(TimeCourseCtr[k]).mean(axis=0)
TimeMxCtr[s][:,i] = tmp[:,s-1]
#for j in range(n):
# df = df.append(pd.Series([k, '0', state_map[s], 't'+str(j), tmp[j,s-1]], index=cols), ignore_index=True)
#pdb.set_trace()
for j in range(n):
for r in range(len(TimeCourseCtr[k])):
df = df.append(pd.Series([k, '0', state_map[s], 't'+str(j), TimeCourseCtr[k][r][j,s-1]], index=cols), ignore_index=True)
i += 1
# collect all recording belonging to one Exp mouse with a specific dose
TimeCourseExp = {}
# Dict[dose][mouse_id][time_bin x br_state]
for d in EMice:
TimeCourseExp[d]={}
for mouse in EMice[d]:
TimeCourseExp[d][mouse] = []
for rec in Recordings:
idf = re.split('_', rec)[0]
for d in exp_rec:
if rec in exp_rec[d]:
TimeCourseExp[d][idf].append(TimeCourse[rec])
# dummy dictionary to initialize TimeMxExp
# Dict[R|W|N][dose][time_bin x mouse_id]
TimeMxExp = {1:{}, 2:{}, 3:{}}
for s in [1,2,3]:
TimeMxExp[s] = {}
for d in EMice:
TimeMxExp[s][d] = np.zeros((n, len(EMice[d])))
for d in TimeCourseExp:
i = 0
for k in TimeCourseExp[d]:
print(k)
tmp = np.array(TimeCourseExp[d][k]).mean(axis=0)
for s in [1,2,3]:
# [time_bin x br_state] for mouse k
#tmp = sum(TimeCourseExp[d][k]) / (1.0*len(TimeCourseExp[d][k]))
TimeMxExp[s][d][:,i] = tmp[:,s-1]
#for j in range(n):
# df = df.append(pd.Series([k, d, state_map[s], 't'+str(j), tmp[j, s-1]], index=cols), ignore_index=True)
for j in range(n):
for r in range(len(TimeCourseExp[d][k])):
df = df.append(pd.Series([k, d, state_map[s], 't'+str(j), TimeCourseExp[d][k][r][j,s-1]], index=cols), ignore_index=True)
i += 1
if pplot:
tlabel = np.linspace(istart*dt, istart*dt+n*ibin*dt, n+1)
t = np.linspace(istart*dt, istart*dt+n*ibin*dt, n+1)[0:-1] + (ibin*dt/2.0)
t /= 3600.0
tlabel /= 3600.0
plt.ion()
plt.figure()
ndose = len(EMice)
ax = plt.axes([0.1, 0.7, 0.8, 0.2])
plt.errorbar(t, TimeMxCtr[1].mean(axis=1), yerr = TimeMxCtr[1].std(axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
box_off(ax)
plt.xlim([t[0], t[-1]])
#plt.yticks([0, 0.1, 0.2])
plt.xticks(tlabel)
if stats=='perc':
plt.ylabel('% REM')
elif stats == 'freq':
plt.ylabel('Freq. REM (1/h)')
else:
plt.ylabel('Dur. REM (s)')
i = 1
for d in TimeMxExp[1]:
c = 1 - 1.0/ndose*i
plt.errorbar(t, TimeMxExp[1][d].mean(axis=1), yerr = TimeMxExp[1][d].std(axis=1), color=[c, c, 1], fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
i += 1
ax = plt.axes([0.1, 0.4, 0.8, 0.2])
plt.errorbar(t, TimeMxCtr[2].mean(axis=1), yerr = TimeMxCtr[2].std(axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
box_off(ax)
plt.xlim([t[0], t[-1]])
#plt.yticks([0, 0.1, 0.2])
plt.xticks(tlabel)
if stats=='perc':
plt.ylabel('% Wake')
elif stats == 'freq':
plt.ylabel('Freq. Wake (1/h)')
else:
plt.ylabel('Dur. Wake (s)')
i = 1
for d in TimeMxExp[2]:
c = 1 - 1.0/ndose*i
plt.errorbar(t, TimeMxExp[2][d].mean(axis=1), yerr = TimeMxExp[2][d].std(axis=1), color=[c, c, 1], fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
i += 1
ax = plt.axes([0.1, 0.1, 0.8, 0.2])
plt.errorbar(t, TimeMxCtr[3].mean(axis=1), yerr = TimeMxCtr[3].std(axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
box_off(ax)
plt.xlim([t[0], t[-1]])
#plt.yticks([0, 0.1, 0.2])
plt.xticks(tlabel)
if stats=='perc':
plt.ylabel('% NREM')
elif stats == 'freq':
plt.ylabel('Freq. NREM (1/h)')
else:
plt.ylabel('Dur. NREM (s)')
plt.xlabel('Time (h)')
plt.show()
i = 1
for d in TimeMxExp[2]:
c = 1 - 1.0/ndose*i
plt.errorbar(t, TimeMxExp[3][d].mean(axis=1), yerr = TimeMxExp[3][d].std(axis=1), color=[c, c, 1], fmt = 'o', linestyle='-', linewidth=2, elinewidth=2, label=d)
i += 1
plt.legend()
if len(csv_file) > 0:
df.to_csv(os.path.join(csv_file), index=False)
return TimeMxCtr, TimeMxExp, df
def state_onset(ppath, recordings, istate, min_dur, iseq=0, ma_thr=10, tstart=0, tend=-1, pplot=True):
"""
calculate time point of first occurance of state $istate in @recordings
:param ppath: base folder
:param recordings: list of recordings
:param istate: 1 = REM, 2 = Wake, 3 = NREM
:param min_dur: minimum duration in [s] to be counted as first occurance
:param iseq: calculate the $iseq-th occurance state $istate
:param ma_thr: microarousal threshould
:param tstart: float, quantificiation of sleep starts at $start s
:param tend: float, quantification of sleep ends at $tend s
:return: np.array, average latency (in minutes) for each mouse. If one mouse contributes several recordings
the average latency is computed
"""
if type(recordings) != list:
recordings = [recordings]
# get all mice in recordings
mice = []
dt = 2.5
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice.append(idf)
latency = {m:[] for m in mice}
for rec in recordings:
SR = get_snr(ppath, rec)
# Number of EEG bins / brainstate bin
NBIN = np.round(2.5 * SR)
# Precise time bin duration of each brain state:
dt = NBIN * 1.0 / SR
idf = re.split('_', rec)[0]
M,K = load_stateidx(ppath, rec)
# flatten out microarousals
if istate == 3 and ma_thr > 0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
kcut = np.where(K>=0)[0]
M = M[kcut]
istart = int(np.round(tstart/dt))
iend = int(np.round(tend/dt))
if tend == -1:
iend = len(M)
M = M[istart:iend]
seq = get_sequences(np.where(M==istate)[0])
seq = [s for s in seq if len(s) * dt >= min_dur]
#ifirst = seq[seq[iseq][0]]
ifirst = seq[iseq][0]
latency[idf].append(ifirst*dt)
for m in mice:
latency[m] = np.nanmax(np.array(latency[m]))
values = np.array([latency[m]/60. for m in mice])
clrs = sns.color_palette("husl", len(mice))
if pplot:
# print latencies
for m in mice:
print("%s - %.2f min" % (m, latency[m] / 60.))
plt.ion()
plt.figure()
ax = plt.subplot(111)
set_fontsize(14)
#plt.bar(range(0, len(values)), values, color='gray')
for i in range(len(mice)):
plt.plot(i, values[i], 'o', color=clrs[i], label=m)
#plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mice), frameon=False)
plt.xticks(list(range(0, len(values))), mice)
plt.ylabel('Onset Latency (min)')
box_off(ax)
plt.show()
return values
def sleep_spectrum(ppath, recordings, istate=1, pmode=1, fres=1/3, ma_thr=20.0, f_max=30, pplot=True, sig_type='EEG', mu=[10, 100],
tstart=0, tend=-1, sthres=np.inf, peeg2=False, pnorm=False, single_mode=False, conv=1.0, fig_file='', laser_color='blue', ci='sd'):
"""
calculate power spectrum for brain state i state for the given recordings.
The function first calculates for each mouse the powerspectrum for each
istate sequence, and then averages across all sequences.
Note: If recordings with different sampling rates are combined, set f_max to a
frequency value, which exists for all recordings.
@Param:
ppath - folder containing all recordings
recordings - single recording (string) or list of recordings
@Optional:
istate - state for which to calculate power spectrum; 1=REM, 2=Wake, 3=NREM
fres - resolution of frequency axis; i.e. fres = F[i] - F[i-1]
ma_thr - short wake periods <= $ma_thr are considered as sleep
f_max - maximal frequency, if f_max==-1: f_max is maximally possible frequency
pplot - if True, plot figure showing result
pmode - mode:
pmode == 1, compare state during laser with baseline outside laser interval
pmode == 0, just plot power spectrum for state istate and don't care about laser
pmode == 2, compare periods of state if they overlap with laser and if the laser precedes the state
with state periods w/o laser. That's is we are looking here at "laser induced" periods; the period itself
can be longer as laser stimulation (as long as it follows laser onset).
tstart - use EEG starting from time point tstart [seconds]
sig_type - string, if 'EMG' calculate EMG amplitude (from the EMG spectrum). E.g.,
sleepy.sleep_spectrum(ppath, E, istate=2, f_max=30, sig_type='EMG')
mu - tuple, lower and upper range for EMG frequencies used for amplitude calculation
tend - use data up to tend [seconds], if tend == -1, use data till end
sthres - maximum length of bout duration of state $istate used for calculation. If bout duration > $sthres, only
use the bout up to $sthres seconds after bout onset.
peeg2 - if True, use EEG2 channel for spectrum analysis
pnorm - if True, normalize powerspectrum by dividing each frequency through each average power
over the whole EEG recording
single_mode - if True, plot each single mouse
fig_file - if specified save to given file
ci - parameter for seaborn.lineplot, if ci=='sd' plot standard deviation, for int values plot
confidence interval (e.g. ci=95 will plot the 95% confidence interval). Only works, if there
are more than one mouse!
errorbars: If it's multiple mice make errorbars over mice; if it's multiple
recordings of ONE mouse, show errorbars across recordings;
if just one recording show now errorbars
@Return:
Pow - Dict[No loaser = 0|Laser = 1][array], where array: mice x frequencies, if more than one mouse;
otherwise, array: recordings x frequencies
F - Frequencies
"""
if type(recordings) != list:
recordings = [recordings]
Mice = {}
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in Mice:
Mice[idf] = Mouse(idf, rec, 'E')
else:
Mice[idf].add(rec)
mouse_order = []
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mouse_order:
mouse_order.append(idf)
# Spectra: Dict[mouse_id][laser_on|laser_off][list of powerspectrum_arrays]
Spectra = {}
Ids = list(Mice.keys())
for i in Ids:
Spectra[i] = {0:[], 1:[]}
Spectra[i] = {0:[], 1:[]}
for idf in mouse_order:
for rec in Mice[idf].recordings:
# load EEG
if sig_type =='EEG':
if not peeg2:
EEG = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'EEG.mat'))['EEG']).astype('float')*conv
else:
EEG = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'EEG2.mat'))['EEG2']).astype('float')*conv
elif sig_type == 'EMG':
if not peeg2:
EEG = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'EMG.mat'))['EMG']).astype('float')*conv
else:
EEG = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'EMG2.mat'))['EMG2']).astype('float')*conv
else:
pass
# load brain state
M,K = load_stateidx(ppath, rec)
# set brain states where K<0 to zero;
# this whay they are effectively discarded
M[K<0] = 0
sr = get_snr(ppath, rec)
# calculate time window
#twin = int(np.round(sr * (1/fres))) * (1/sr)
twin = sr * (1/fres) * (1/sr)
# number of time bins for each time bin in spectrogram
nbin = int(np.round(sr) * 2.5)
# duration of time bin in spectrogram / brainstate
dt = nbin * 1/sr
nwin = np.round(twin*sr)
istart = int(np.round(tstart/dt))
if tend==-1:
iend = M.shape[0]
else:
iend = int(np.round(tend/dt))
istart_eeg = istart*nbin
iend_eeg = (iend-1)*nbin+1
M[np.where(M==5)]=2
# flatten out microarousals
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
# get all sequences of state $istate
M = M[istart:iend]
seq = get_sequences(np.where(M==istate)[0])
EEG = EEG[istart_eeg:iend_eeg]
if pnorm:
pow_norm = power_spectrum(EEG, nwin, 1 / sr)[0]
if pmode == 1 or pmode == 2:
laser = load_laser(ppath, rec)[istart_eeg:iend_eeg]
(idxs, idxe) = laser_start_end(laser, SR=sr)
# downsample EEG time to spectrogram time
idxs = [int(i/nbin) for i in idxs]
idxe = [int(i/nbin) for i in idxe]
laser_idx = []
for (i,j) in zip(idxs, idxe):
laser_idx += list(range(i,j+1))
laser_idx = np.array(laser_idx)
if pmode == 1 or pmode == 2:
# first analyze frequencies not overlapping with laser
seq_nolsr = []
for s in seq:
s = np.setdiff1d(s, laser_idx)
if len(s) > 0:
q = get_sequences(s)
seq_nolsr += q
for s in seq_nolsr:
if len(s)*nbin >= nwin:
drn = (s[-1]-s[0])*dt
if drn > sthres:
# b is the end of segment used for power spectrum calculation;
# that is, the last index (in raw EEG) of the segment
b = (s[0] + int(np.round(sthres/dt)))*nbin
else:
b = int((s[-1]+1)*nbin)
sup = list(range(int(s[0]*nbin), b))
if sup[-1]>len(EEG):
sup = list(range(int(s[0]*nbin), len(EEG)))
if len(sup) >= nwin:
Pow, F = power_spectrum(EEG[sup], nwin, 1/sr)
if pnorm:
Pow = np.divide(Pow, pow_norm)
Spectra[idf][0].append(Pow)
# now analyze sequences overlapping with laser
seq_lsr = []
for s in seq:
if pmode == 1:
s = np.intersect1d(s, laser_idx)
if len(s) > 0:
q = get_sequences(s)
seq_lsr += q
if pmode == 2:
r = np.intersect1d(s, laser_idx)
if len(r) > 0 and s[0] in laser_idx:
seq_lsr += [s]
for s in seq_lsr:
# should not be necessary any more...
#if pmode == 1:
# s = np.intersect1d(s, laser_idx)
if len(s)*nbin >= nwin:
# calculate power spectrum
# upsample indices
# brain state time 0 1 2
# EEG time 0-999 1000-1999 2000-2999
drn = (s[-1]-s[0])*dt
if drn > sthres:
b = (s[0] + int(np.round(sthres/dt)))*nbin
else:
b = int((s[-1]+1)*nbin)
sup = list(range(int(s[0]*nbin), b))
if sup[-1]>len(EEG):
sup = list(range(int(s[0]*nbin), len(EEG)))
# changed line on 02/08/2019
if len(sup) >= nwin:
Pow, F = power_spectrum(EEG[sup], nwin, 1/sr)
if pnorm:
Pow = np.divide(Pow, pow_norm)
Spectra[idf][1].append(Pow)
# don't care about laser
if pmode == 0:
for s in seq:
if len(s)*nbin >= nwin:
drn = (s[-1]-s[0])*dt
if drn > sthres:
b = (s[0] + int(np.round(sthres/dt)))*nbin
else:
b = int((s[-1]+1)*nbin)
sup = list(range(int(s[0]*nbin), b))
if sup[-1]>len(EEG):
sup = list(range(int(s[0]*nbin), len(EEG)))
# changed line on 02/08/2019
if len(sup) >= nwin:
Pow, F = power_spectrum(EEG[sup], nwin, 1/sr)
if pnorm:
Pow = np.divide(Pow, pow_norm)
Spectra[idf][0].append(Pow)
mF = F.copy()
if sig_type == 'EEG':
if f_max > -1:
ifreq = np.where(F<=f_max)[0]
F = F[ifreq]
else:
f_max = F[-1]
else:
f_max = F[-1]
ifreq = range(0, F.shape[0])
Pow = {0:[], 1:[]}
if len(Ids)==1:
# only one mouse
#Pow[0] = np.array(Spectra[Ids[0]][0])
#Pow[1] = np.array(Spectra[Ids[0]][1])
Pow[0] = np.array([s[ifreq] for s in Spectra[Ids[0]][0]])
Pow[1] = np.array([s[ifreq] for s in Spectra[Ids[0]][1]])
else:
# several mice
Pow[0] = np.zeros((len(Ids),len(F)))
Pow[1] = np.zeros((len(Ids),len(F)))
i = 0
for m in Ids:
#Pow[0][i,:] = np.array(Spectra[m][0]).mean(axis=0)
tmp = [s[ifreq] for s in Spectra[m][0]]
Pow[0][i,:] = np.array(tmp).mean(axis=0)
if pmode == 1 or pmode == 2:
#Pow[1][i,:] = np.array(Spectra[m][1]).mean(axis=0)
tmp = [s[ifreq] for s in Spectra[m][1]]
Pow[1][i,:] = np.array(tmp).mean(axis=0)
i += 1
if pplot:
plt.ion()
plt.figure()
if sig_type == 'EEG':
ax = plt.axes([0.2, 0.15, 0.6, 0.7])
n = Pow[0].shape[0]
clrs = sns.color_palette("husl", len(mouse_order))
if pmode==1 or pmode==2:
if not single_mode:
a = Pow[1].mean(axis=0) - Pow[1].std(axis=0) / np.sqrt(n)
b = Pow[1].mean(axis=0) + Pow[1].std(axis=0) / np.sqrt(n)
plt.fill_between(F, a, b, alpha=0.5, color=laser_color)
plt.plot(F, Pow[1].mean(axis=0), color=laser_color, lw=2, label='With laser')
else:
for i in range(len(mouse_order)):
plt.plot(F, Pow[1][i,:], '--', color=clrs[i])
if not single_mode:
a = Pow[0].mean(axis=0)-Pow[0].std(axis=0)/np.sqrt(n)
b = Pow[0].mean(axis=0)+Pow[0].std(axis=0)/np.sqrt(n)
plt.fill_between(F, a, b, alpha=0.5, color='gray')
plt.plot(F, Pow[0].mean(axis=0), color='gray', lw=2, alpha=0.5, label='W/o laser')
else:
for i in range(len(mouse_order)):
plt.plot(F, Pow[0][i, :], label=mouse_order[i], color=clrs[i])
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order), frameon=False)
if pmode>=1 and not single_mode:
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', frameon=False)
box_off(ax)
plt.xlim([0, f_max])
plt.xlabel('Freq. (Hz)')
plt.ylabel('Power ($\mathrm{\mu V^2}$)')
plt.show()
else:
# plot EMG amplitude
nmice = len(mouse_order)
clrs = sns.color_palette("husl", nmice)
# plot EMG
Ampl = {0:[], 1:[]}
# range of frequencies
mfreq = np.where((mF >= mu[0]) & (mF <= mu[1]))[0]
df = mF[1] - mF[0]
if pmode>=1:
for i in [0, 1]:
Ampl[i] = np.sqrt(Pow[i][:,mfreq].sum(axis=1)*df)
else:
Ampl[0] = np.sqrt(Pow[0][:,mfreq].sum(axis=1)*df)
if pmode>=1:
ax = plt.axes([0.2, 0.15, 0.4, 0.7])
ax.bar([0], Ampl[0].mean(), color='gray', label='w/o laser')
ax.bar([1], Ampl[1].mean(), color='blue', label='laser')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', frameon=False)
if len(Ids) == 1:
i=0
plt.plot([0,1], [np.mean(Ampl[0]), np.mean(Ampl[1])], color=clrs[i], label=mouse_order[i])
else:
for i in range(nmice):
plt.plot([0,1], [Ampl[0][i], Ampl[1][i]], color=clrs[i], label=mouse_order[i])
box_off(ax)
plt.ylabel('EMG Ampl. ($\mathrm{\mu V}$)')
ax.set_xticks([0,1])
ax.set_xticklabels(['', ''])
# some basic stats
#[tstats, p] = stats.ttest_rel(Ampl[0], Ampl[1])
#print("Stats for EMG amplitude: t-statistics: %.3f, p-value: %.3f" % (tstats, p))
if len(fig_file) > 0:
save_figure(fig_file)
# Use seaborn to plot powerspectra with confidence intervals across mice:
# At some point I will make this the standard code
vals = []
if len(mouse_order) > 0:
mi = 0
for m in mouse_order:
for i in range(len(F)):
if len(mouse_order) > 1:
vals.append([0, m, Pow[0][mi][i], F[i]])
else:
vals.append([0, m, Pow[0][:,i].mean(), F[i]])
if pmode >= 1:
if len(mouse_order) > 1:
vals.append([1, m, Pow[1][mi][i], F[i]])
else:
vals.append([1, m, Pow[1][:,i].mean(), F[i]])
mi += 1
df = pd.DataFrame(columns=['Lsr', 'Idf', 'Pow', 'Freq'], data=vals)
if pplot:
plt.figure()
ax = plt.subplot(111)
if pmode >= 1:
sns.lineplot(x='Freq', y='Pow', hue='Lsr', data=df, ci=ci, palette={0:'gray', 1:'blue'})
else:
sns.lineplot(x='Freq', y='Pow', data=df, ci=ci, palette=['gray'])
box_off(ax)
plt.xlim([0, f_max])
return Pow, F, df
def set_awake(M, MSP, freq, mu=[10, 100]):
imu = np.where((freq>=mu[0]) & (freq<=mu[1]))[0]
df = freq[1]-freq[0]
widx = np.where(M==2)[0]
ampl = np.sqrt(MSP[imu, :].sum(axis=0)*df)
wampl = ampl[widx]
thr = wampl.mean() + 1*wampl.std()
awk_idx = widx[np.where(wampl>thr)[0]]
#qwk_idx = np.setdiff1d(widx, awk_idx)
M[awk_idx] = 5
return M
def sleep_spectrum_simple(ppath, recordings, istate=1, tstart=0, tend=-1, fmax=-1,
mu=[10,100], ci='sd', pmode=1, pnorm = False, pplot=True,
harmcs=0, harmcs_mode='iplt', iplt_level=0, peeg2=False,
pemg2=False, exclusive_mode=0, csv_files=[]):
"""
caluclate EEG power spectrum using pre-calculate spectogram save in ppath/sp_"name".mat
:param ppath: base folder
:param recordings: list of recordings
:param istate: brain state for which power spectrum is computed.
1-REM, 2-Wake, 3-NREM, 5-"active wake"
:param tstart: use EEG/EMG starting from time point tstart [seconds]
:param tend: use EEG/EMG up to time point tend [seconds]; if tend=-1, use EEG/EMG till the end
:param fmax: maximum frequency shown on x-axis
:param ci: 'sd' | int between 0 and 100 specificing confidence interval
:param pmode: mode:
pmode == 0, just plot power spectrum for state istate and don't care about laser
pmode == 1, compare state during laser with baseline outside laser interval
:param pnorm: if True, normalize spectrogram by dividing each frequency band by its average power
:param pplot: if True, plot figure
# What do to with episodes partially overlapping with laser
:param exclusive_mode: if > 0, apply some exception for episodes of state $istate,
that overlap with laser. Say there's a REM period that only partially overlaps with laser.
If $exclusive_mode == 1, then do not use the part w/o laser for the 'no laser' condition;
This can be relevant for closed-loop stimulation: The laser turns on after the
spontaneous onset of REM sleep. So, the initial part of REM sleep would be interpreted
as 'no laser', potentially inducing a bias, because at the very beginning the REM spectrum
looks different than later on.
If $exclusive_mode == 2, then add the part w/o laser to the 'with laser' condition.
If $exclusive_mode == 0, then interprete the part w/o laser as 'w/o laser'.
# interpolating/discarding harmonics
:param harmcs, harmcs_mode, iplt_level: if $harmcs > 0 and $harmcs_mode == 'emg',
remove all harmonics of base frequency $harmcs, from the frequencies used
for EMG amplitude calculation; do nothing for harmonics in EEG
if $harmcs > 0 and $harmcs_mode == 'iplt', interpolate all harmonics by substituting the power
at the harmonic by a sum of the neighboring frequencies. If $iplt_level == 1, only
take one neighboring frequency below and above the harmonic,
if $iplt_level == 2, use the two neighboring frequencies above and below for the
interpolation
:parm peeg2: if True, use EEG2.mat instead of EEG.mat for EEG powerspectrum calculation
:param pemg2: if True, use EMG2 for EMG amplitude calcuation
:param csv_files: if two file names are provided, the results for EEG power spectrum
and EMG amplitude are saved to the csv files. The EEG powerspectrum is
saved to the first file.
:return (ps_mx, freq, df, df_amp)
ps_mx: dict: 0|1 -> np.array(no. mice x frequencies)
freq: vector with frequencies
df: DataFrame with EEG powerspectrum; columns: 'Idf', 'Freq', 'Pow', 'Lsr'
df_amp: DataFrame with EMG amplitude; columns: 'Idf', 'Amp', 'Lsr'
"""
def _interpolate_harmonics(SP, freq, f_max, harmcs, iplt_level):
df = freq[2]-freq[1]
for h in np.arange(harmcs, f_max, harmcs):
i = np.argmin(np.abs(freq - h))
if np.abs(freq[i] - h) < df and h != 60:
if iplt_level == 2:
SP[i,:] = (SP[i-2:i,:] + SP[i+1:i+3,:]).mean(axis=0) * 0.5
elif iplt_level == 1:
SP[i,:] = (SP[i-1,:] + SP[i+1,:]) * 0.5
else:
pass
return SP
# def _interpolate_harmonics(SP, freq, f_max, harmcs, iplt_level):
# df = freq[2]-freq[1]
# for h in np.arange(harmcs, f_max, harmcs):
# i = np.argmin(np.abs(freq - h))
# if np.abs(freq[i] - h) < df and h != 60:
# SP[i,:] = (SP[i-iplt_level:i,:] + SP[i+1:i+1+iplt_level,:]).mean(axis=0) * 0.5
# return SP
mice = []
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice.append(idf)
ps_mice = {0: {m:[] for m in mice}, 1: {m:[] for m in mice}}
amp_mice = {0: {m:0 for m in mice}, 1: {m:0 for m in mice}}
count_mice = {0: {m:0 for m in mice}, 1: {m:0 for m in mice}}
data = []
for rec in recordings:
print(rec)
emg_loaded = False
# load brain state
idf = re.split('_', rec)[0]
M = load_stateidx(ppath, rec)[0]
sr = get_snr(ppath, rec)
# number of time bins for each time bin in spectrogram
nbin = int(np.round(sr) * 2.5)
dt = nbin * (1/sr)
# determine start and end of time frame used for calculation
istart = int(np.round(tstart / dt))
if tend > -1:
iend = int(np.round(tend / dt))
else:
iend = len(M)
istart_eeg = istart*nbin
#iend_eeg = (iend-1)*nbin+1
iend_eeg = iend*nbin
M = M[istart:iend]
if istate == 5:
tmp = so.loadmat(os.path.join(ppath, rec, 'msp_%s.mat' % rec), squeeze_me=True)
if not pemg2:
MSP = tmp['mSP'][:,istart:iend]
freq_emg = tmp['freq']
else:
MSP = tmp['mSP2'][:,istart:iend]
freq_emg = tmp['freq']
emg_loaded = True
M = set_awake(M[istart:iend], MSP[istart:iend], freq_emg, mu=mu)
if type(istate) == int:
idx = np.where(M==istate)[0]
else:
idx = np.array([], dtype='int')
for s in istate:
idx = np.concatenate((idx, np.where(M==s)[0]))
# load laser
if pmode == 1:
lsr = load_laser(ppath, rec)
idxs, idxe = laser_start_end(lsr[istart_eeg:iend_eeg])
# downsample EEG time to spectrogram time
idxs = [int(i/nbin) for i in idxs]
idxe = [int(i/nbin) for i in idxe]
laser_idx = []
for (i,j) in zip(idxs, idxe):
laser_idx += range(i,j+1)
laser_idx = np.array(laser_idx)
idx_lsr = np.intersect1d(idx, laser_idx)
idx_nolsr = np.setdiff1d(idx, laser_idx)
if exclusive_mode > 0 and exclusive_mode < 3:
#rm_idx = []
rem_seq = get_sequences(np.where(M==1)[0])
for s in rem_seq:
d = np.intersect1d(s, idx_lsr)
if len(d) > 0:
# that's the part of the REM period with laser
# that does not overlap with laser
drm = np.setdiff1d(s, d)
idx_nolsr = np.setdiff1d(idx_nolsr, drm)
if exclusive_mode == 2:
idx_lsr = np.union1d(idx_lsr, drm)
if exclusive_mode == 3:
rem_trig = so.loadmat(os.path.join(ppath, rec, 'rem_trig_%s.mat'%rec), squeeze_me=True)['rem_trig']
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
trig_idx = np.where(rem_trig==1)[0]
idx_lsr = np.intersect1d(trig_idx, idx_lsr)
idx_nolsr = np.intersect1d(trig_idx, idx_nolsr)
######################################################################
# load EEG spectrogram
tmp = so.loadmat(os.path.join(ppath, rec, 'sp_%s.mat' % rec), squeeze_me=True)
if not peeg2:
SP = tmp['SP'][:,istart:iend]
else:
SP = tmp['SP2'][:, istart:iend]
if pnorm:
sp_mean = np.mean(SP, axis=1)
SP = np.divide(SP, np.tile(sp_mean, (SP.shape[1], 1)).T)
freq = tmp['freq']
df = freq[1]-freq[0]
if fmax > -1:
ifreq = np.where(freq <= fmax)[0]
freq = freq[ifreq]
SP = SP[ifreq,:]
# load EMG spectrogram
if not emg_loaded:
tmp = so.loadmat(os.path.join(ppath, rec, 'msp_%s.mat' % rec), squeeze_me=True)
if not pemg2:
MSP = tmp['mSP'][:,istart:iend]
freq_emg = tmp['freq']
else:
MSP = tmp['mSP2'][:,istart:iend]
freq_emg = tmp['freq']
imu = np.where((freq_emg>=mu[0]) & (freq_emg<=mu[-1]))[0]
if harmcs > 0 and harmcs_mode == 'iplt':
SP = _interpolate_harmonics(SP, freq, fmax, harmcs, iplt_level)
MSP = _interpolate_harmonics(MSP, freq, fmax, harmcs, iplt_level)
if harmcs > 0 and harmcs_mode == 'emg':
harm_freq = np.arange(0, freq_emg.max(), harmcs)
for h in harm_freq:
imu = np.setdiff1d(imu, imu[np.where(np.round(freq_emg[imu], decimals=1)==h)[0]])
tmp = 0
for i in imu:
tmp += MSP[i,:] * (freq_emg[i]-freq_emg[i-1])
emg_ampl = np.sqrt(tmp)
else:
emg_ampl = np.sqrt(MSP[imu,:].sum(axis=0)*df)
###################################################
if pmode == 1:
count_mice[0][idf] += len(idx_nolsr)
count_mice[1][idf] += len(idx_lsr)
ps_lsr = SP[:,idx_lsr].sum(axis=1)
ps_nolsr = SP[:,idx_nolsr].sum(axis=1)
ps_mice[1][idf].append(ps_lsr)
ps_mice[0][idf].append(ps_nolsr)
amp_mice[1][idf] += emg_ampl[idx_lsr].sum()
amp_mice[0][idf] += emg_ampl[idx_nolsr].sum()
else:
count_mice[0][idf] += len(idx)
ps_nolsr = SP[:,idx].sum(axis=1)
ps_mice[0][idf].append(ps_nolsr)
amp_mice[0][idf] += emg_ampl[idx].sum()
lsr_cond = []
if pmode == 0:
lsr_cond = [0]
else:
lsr_cond = [0,1]
ps_mx = {0:[], 1:[]}
amp_mx = {0:[], 1:[]}
for l in lsr_cond:
mx = np.zeros((len(mice), len(freq)))
amp = np.zeros((len(mice),))
for (i,idf) in zip(range(len(mice)), mice):
mx[i,:] = np.array(ps_mice[l][idf]).sum(axis=0) / count_mice[l][idf]
amp[i] = amp_mice[l][idf] / count_mice[l][idf]
ps_mx[l] = mx
amp_mx[l] = amp
# transform data arrays to pandas dataframe
data_nolsr = list(np.reshape(ps_mx[0], (len(mice)*len(freq),)))
amp_freq = list(freq)*len(mice)
amp_idf = reduce(lambda x,y: x+y, [[b]*len(freq) for b in mice])
if pmode == 1:
data_lsr = list(np.reshape(ps_mx[1], (len(mice)*len(freq),)))
list_lsr = ['yes']*len(freq)*len(mice) + ['no']*len(freq)*len(mice)
data = [[a,b,c,d] for (a,b,c,d) in zip(amp_idf*2, amp_freq*2, data_lsr+data_nolsr, list_lsr)]
else:
list_lsr = ['no']*len(freq)*len(mice)
data = [[a,b,c,d] for (a,b,c,d) in zip(amp_idf, amp_freq, data_nolsr, list_lsr)]
df = | pd.DataFrame(columns=['Idf', 'Freq', 'Pow', 'Lsr'], data=data) | pandas.DataFrame |
"""
Copyright 2022 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
from impetuous.convert import create_synonyms , flatten_dict
from scipy.stats import rankdata
from scipy.stats import ttest_rel , ttest_ind , mannwhitneyu
from scipy.stats.mstats import kruskalwallis as kruskwall
from sklearn.decomposition import PCA
import itertools
import typing
def subArraysOf ( Array:list,Array_:list=None ) -> list :
if Array_ == None :
Array_ = Array[:-1]
if Array == [] :
if Array_ == [] :
return ( [] )
return( subArraysOf(Array_,Array_[:-1]) )
return([Array]+subArraysOf(Array[1:],Array_))
def permuter( inputs:list , n:int ) -> list :
# permuter( inputs = ['T2D','NGT','Female','Male'] , n = 2 )
return( [p[0] for p in zip(itertools.permutations(inputs,n))] )
def grouper ( inputs, n ) :
iters = [iter(inputs)] * n
return zip ( *iters )
def whiten_data ( Xdf ) :
# REMEMBER BOYS AND GIRLS THIS IS SOMETHING YOU MIGHT NOT WANT TO DO :)
mean_center = lambda x: x-np.mean(x,0)
X = Xdf.values
u , s , v = np.linalg.svd( mean_center(X),full_matrices=False )
X_white = np.dot(X,np.dot( np.diag(s**-1),np.abs(v) )) # we don't know the sign
return ( pd.DataFrame( X_white,index=Xdf.index.values,columns=Xdf.columns ) )
def threshold ( E , A ) :
if not 'pandas' in str(type(A)) or not 'pandas' in str(type(E)):
print ( "ERROR MUST SUPPLY TWO PANDAS DATAFRAMES" )
return ( -1 )
thresholds_df = pd .DataFrame ( np.dot( E,A.T ) ,
columns = A .index ,
index = E .index ) .apply ( lambda x:x/np.sum(E,1) )
return ( thresholds_df )
def solve ( C = pd.DataFrame([ [10,1],[3,5] ]) ,
E = pd.DataFrame([ [25],[31] ]) ):
if not 'pandas' in str(type(C)) or not 'pandas' in str(type(E)):
print ( "ERROR MUST SUPPLY TWO PANDAS DATAFRAMES" )
return ( -1 )
recover = lambda U,S,Vt : np.dot(U*S,Vt)
cU, cS, cVt = np.linalg.svd(C, full_matrices=False )
cST = 1/cS
psuedo_inverse = pd.DataFrame( recover(cVt.T,cST,cU.T) , index=C.columns ,columns=C.index )
identity = np.dot(C,psuedo_inverse)
TOLERANCE = np.max( np.sqrt( ( identity * ( ( 1-np.eye(len(np.diag(identity)))) ) )**2 ))
return ( np.dot( psuedo_inverse,E),TOLERANCE )
import re
def find_category_variables( istr ) :
return ( re.findall( r'C\((.*?)\)', istr ) )
def encode_categorical( G = ['Male','Male','Female'] ):
#
# CREATES AN BINARY ENCODING MATRIX FROM THE SUPPLIED LIST
# USES A PANDAS DATAFRAME AS INTERMEDIATE FOR ERROR CHECKING
# THIS PUTS THE O IN OPLS (ORTHOGONAL)
#
ugl = list(set(G)) ; n = len(ugl) ; m = len(G)
lgu = { u:j for u,j in zip(ugl,range(n)) }
enc_d = pd.DataFrame( np.zeros(m*n).reshape(-1,n),columns=ugl )
for i in range ( m ) :
j = lgu[G[i]]
enc_d.iloc[i,j] = 1
return ( enc_d )
def create_encoding_journal( use_categories, journal_df ) :
encoding_df = None
for category in use_categories :
catvals = journal_df.loc[category].to_list()
cat_encoding = encode_categorical( catvals )
cat_encoding.index = journal_df.columns.values
if encoding_df is None :
encoding_df = cat_encoding.T
else :
encoding_df = pd.concat([encoding_df,cat_encoding.T])
return ( encoding_df )
def quantify_density_probability ( rpoints , cutoff = None ) :
#
# DETERMINE P VALUES
loc_pdf = lambda X,mean,variance : [ 1./np.sqrt(2.*np.pi*variance)*np.exp(-((x-mean)/(2.*variance))**2) for x in X ]
from scipy.special import erf as erf_
loc_cdf = lambda X,mean,variance : [ 0.5*( 1. + erf_( (x-mean)/np.sqrt( 2.*variance ) ) ) for x in X ]
loc_Q = lambda X,mean,variance : [ 1. - 0.5*( 1. + erf_( (x-mean)/np.sqrt( 2.*variance ) ) ) for x in X ]
M_,Var_ = np.mean(rpoints),np.std(rpoints)**2
#
# INSTEAD OF THE PROBABILTY DENSITY WE RETURN THE FRACTIONAL RANKS
# SINCE THIS ALLOWS US TO CALCULATE RANK STATISTICS FOR THE PROJECTION
corresponding_density = rankdata (rpoints,'average') / len(rpoints) # loc_pdf( rpoints,M_,Var_ )
corresponding_pvalue = loc_Q ( rpoints,M_,Var_ )
#
# HERE WE MIGHT BE DONE
if not cutoff is None :
resolution = 10. ; nbins = 100.
#
# ONLY FOR ASSESING
h1,h2 = np.histogram(rpoints,bins=int(np.ceil(len(rpoints)/resolution)))
bin_radius = 0.5 * ( h2[1:] + h2[:-1] )
radial_density = np.cumsum( h1 )/np.sum( h1 ) # lt
#
# NOW RETRIEVE ALL DENSITIES OF THE RADII
tol = 1./nbins
corresponding_radius = np.min( bin_radius[radial_density > cutoff/nbins] )
return ( corresponding_pvalue , corresponding_density, corresponding_radius )
return ( corresponding_pvalue , corresponding_density )
def find_category_interactions ( istr ) :
all_cats = re.findall( r'C\((.*?)\)', istr )
interacting = [ ':' in c for c in istr.split(')') ][ 0:len(all_cats) ]
interacting_categories = [ [all_cats[i-1],all_cats[i]] for i in range(1,len(interacting)) if interacting[i] ]
return ( interacting_categories )
def create_encoding_data_frame ( journal_df , formula , bVerbose = False ) :
#
# THE JOURNAL_DF IS THE COARSE GRAINED DATA (THE MODEL)
# THE FORMULA IS THE SEMANTIC DESCRIPTION OF THE PROBLEM
#
interaction_pairs = find_category_interactions ( formula.split('~')[1] )
add_pairs = []
sjdf = set(journal_df.index)
if len( interaction_pairs ) > 0 :
for pair in interaction_pairs :
cpair = [ 'C('+p+')' for p in pair ]
upair = [ pp*(pp in sjdf)+cp*(cp in sjdf and not pp in sjdf) for (pp,cp) in zip( pair,cpair) ]
journal_df.loc[ ':'.join(upair) ] = [ p[0]+'-'+p[1] for p in journal_df.loc[ upair,: ].T.values ]
add_pairs.append(':'.join(upair))
use_categories = list(set(find_category_variables(formula.split('~')[1])))
cusecats = [ 'C('+p+')' for p in use_categories ]
use_categories = [ u*( u in sjdf) + cu *( cu in sjdf ) for (u,cu) in zip(use_categories,cusecats) ]
use_categories = [ *use_categories,*add_pairs ]
#
if len( use_categories )>0 :
encoding_df = create_encoding_journal ( use_categories , journal_df ).T
else :
encoding_df = None
#
if bVerbose :
print ( [ v for v in encoding_df.columns.values ] )
print ( 'ADD IN ANY LINEAR TERMS AS THEIR OWN AXIS' )
#
# THIS TURNS THE MODEL INTO A MIXED LINEAR MODEL
add_df = journal_df.loc[ [c.replace(' ','') for c in formula.split('~')[1].split('+') if not 'C('in c],: ]
if len(add_df)>0 :
if encoding_df is None :
encoding_df = add_df.T
else :
encoding_df = pd.concat([ encoding_df.T ,
journal_df.loc[ [ c.replace(' ','') for c in formula.split('~')[1].split('+') if not 'C(' in c] , : ] ]).T
return ( encoding_df.apply(pd.to_numeric) )
def interpret_problem ( analyte_df , journal_df , formula , bVerbose=False ) :
#
# THE JOURNAL_DF IS THE COARSE GRAINED DATA (THE MODEL)
# THE ANALYTE_DF IS THE FINE GRAINED DATA (THE DATA)
# THE FORMULA IS THE SEMANTIC DESCRIPTION OF THE PROBLEM
#
interaction_pairs = find_category_interactions ( formula.split('~')[1] )
add_pairs = []
if len( interaction_pairs )>0 :
for pair in interaction_pairs :
journal_df.loc[ ':'.join(pair) ] = [ p[0]+'-'+p[1] for p in journal_df.loc[ pair,: ].T.values ]
add_pairs.append(':'.join(pair))
use_categories = list(set(find_category_variables(formula.split('~')[1])))
use_categories = [u for u in use_categories if 'C('+u+')' in set(formula.replace(' ','').split('~')[1].split('+'))]
use_categories = [ *use_categories,*add_pairs ]
#
if len( use_categories )>0 :
encoding_df = create_encoding_journal ( use_categories , journal_df ).T
else :
encoding_df = None
#
if bVerbose :
print ( [ v for v in encoding_df.columns.values ] )
print ( 'ADD IN ANY LINEAR TERMS AS THEIR OWN AXIS' )
#
# THIS TURNS THE MODEL INTO A MIXED LINEAR MODEL
add_df = journal_df.loc[ [c.replace(' ','') for c in formula.split('~')[1].split('+') if not 'C('in c],: ]
if len(add_df)>0 :
if encoding_df is None :
encoding_df = add_df.T
else :
encoding_df = pd.concat([ encoding_df.T ,
journal_df.loc[ [ c.replace(' ','') for c in formula.split('~')[1].split('+') if not 'C(' in c] , : ] ]).T
return ( encoding_df )
def calculate_alignment_properties ( encoding_df , quantx, quanty, scorex,
analyte_df = None , journal_df = None ,
bVerbose = False , synonyms = None ,
blur_cutoff = 99.8 , exclude_labels_from_centroids = [''] ,
study_axii = None , owner_by = 'tesselation' ):
if bVerbose :
print ( np.shape(encoding_df) )
print ( np.shape(analyte_df) )
print ( 'qx:',np.shape(quantx) )
print ( 'qy:',np.shape(quanty) )
print ( 'sx:',np.shape(scorex) )
print ( 'WILL ASSIGN OWNER BY PROXIMITY TO CATEGORICALS' )
if analyte_df is None or journal_df is None:
print ( 'USER MUST SUPPLY ANALYTE AND JOURNAL DATA FRAMES' )
exit(1)
#
# THESE ARE THE CATEGORICAL DESCRIPTORS
use_centroid_indices = [ i for i in range(len(encoding_df.columns.values)) if (
encoding_df.columns.values[i] not in set( exclude_labels_from_centroids )
) ]
#
use_centroids = list( quanty[use_centroid_indices] )
use_labels = list( encoding_df.columns.values[use_centroid_indices] )
#
if owner_by == 'tesselation' :
transcript_owner = [ use_labels[ np.argmin([ np.sum((xw-cent)**2) for cent in use_centroids ])] for xw in quantx ]
sample_owner = [ use_labels[ np.argmin([ np.sum((yw-cent)**2) for cent in use_centroids ])] for yw in scorex ]
#
if owner_by == 'angle' :
angular_proximity = lambda B,A : 1 - np.dot(A,B) / ( np.sqrt(np.dot(A,A))*np.sqrt(np.dot(B,B)) )
transcript_owner = [ use_labels[ np.argmin([ angular_proximity(xw,cent) for cent in use_centroids ])] for xw in quantx ]
sample_owner = [ use_labels[ np.argmin([ angular_proximity(yw,cent) for cent in use_centroids ])] for yw in scorex ]
#
# print ( 'PLS WEIGHT RADIUS' )
radius = lambda vector:np.sqrt(np.sum((vector)**2)) # radii
#
# print ( 'ESTABLISH LENGTH SCALES' )
xi_l = np.max(np.abs(quantx),0)
#
rpoints = np.array( [ radius( v/xi_l ) for v in quantx ] ) # HERE WE MERGE THE AXES
xpoints = np.array( [ radius((v/xi_l)[0]) for v in quantx ] ) # HERE WE USE THE X AXES
ypoints = np.array( [ radius((v/xi_l)[1]) for v in quantx ] ) # HERE WE USE THE Y AXES
#
# print ( 'ESTABLISH PROJECTION OF THE WEIGHTS ONTO THEIR AXES' )
proj = lambda B,A : np.dot(A,B) / np.sqrt( np.dot(A,A) )
#
# ADDING IN ADDITIONAL DIRECTIONS
# THAT WE MIGHT BE INTERESTED IN
if 'list' in str( type( study_axii ) ):
for ax in study_axii :
if len( set( ax ) - set( use_labels ) ) == 0 and len(ax)==2 :
axsel = np.array([ use_centroids[i] for i in range(len(use_labels)) if use_labels[i] in set(ax) ])
axis_direction = axsel[0]-axsel[1]
use_labels .append( '-'.join(ax) )
use_centroids .append( np.array(axis_direction) )
proj_df = pd.DataFrame( [ [ np.abs(proj(P/xi_l,R/xi_l)) for P in quantx ] for R in use_centroids ] ,
index = use_labels , columns=analyte_df.index.values )
#
# print ( 'P VALUES ALIGNED TO PLS AXES' )
for idx in proj_df.index :
proj_p,proj_rho = quantify_density_probability ( proj_df.loc[idx,:].values )
proj_df = proj_df.rename( index = {idx:idx+',r'} )
proj_df.loc[idx+',p'] = proj_p
proj_df.loc[idx+',rho'] = proj_rho
#
# print ( 'THE EQUIDISTANT 1D STATS' )
corresponding_pvalue , corresponding_density , corresponding_radius = quantify_density_probability ( rpoints , cutoff = blur_cutoff )
#
# print ( 'THE TWO XY 1D STATS' )
corr_pvalue_0 , corr_density_0 = quantify_density_probability ( xpoints )
corr_pvalue_1 , corr_density_1 = quantify_density_probability ( ypoints )
#
bOrderedAlphas = False
if True :
# DO ALPHA LEVELS BASED ON DENSITY
bOrderedAlphas = True
use_points = rpoints > corresponding_radius
ordered_alphas = [ float(int(u))*0.5 + 0.01 for u in use_points ]
result_dfs = []
#
# print ( 'COMPILE RESULTS FRAME' )
for ( lookat,I_ ) in [ ( quantx , 0 ) ,
( scorex , 1 ) ] :
lookat = [ [ l[0],l[1] ] for l in lookat ]
if I_ == 1 :
aidx = journal_df.columns.values
else :
aidx = analyte_df.index.values
qdf = pd.DataFrame( [v[0] for v in lookat] , index=aidx , columns = ['x'] )
qdf['y'] = [ v[1] for v in lookat ]
names = aidx
if I_ == 0 :
qdf[ 'owner' ] = transcript_owner
qdf['Corr,p' ] = corresponding_pvalue
qdf['Corr,r' ] = corresponding_density
qdf['Corr0,p'] = corr_pvalue_0
qdf['Corr0,r'] = corr_density_0
qdf['Corr1,p'] = corr_pvalue_1
qdf['Corr1,r'] = corr_density_1
qdf = pd.concat([qdf.T,proj_df]).T
if bOrderedAlphas :
qdf[ 'alpha' ] = ordered_alphas
else :
qdf['alpha'] = [ '0.3' for a in transcript_owner ]
else :
qdf['owner'] = sample_owner # The default should be the aligned projection weight
qdf['alpha'] = [ '0.2' for n in names ]
if synonyms is None :
qdf['name'] = names
else :
qdf['name'] = [ synonyms[v] if v in synonyms else v for v in names ]
result_dfs.append(qdf.copy())
return ( result_dfs )
def run_rpls_regression ( analyte_df , journal_df , formula ,
bVerbose = False , synonyms = None , blur_cutoff = 99.8 ,
exclude_labels_from_centroids = [''] , pls_components = 2,
bDeveloperTesting = False ,
study_axii = None , owner_by = 'tesselation'
) :
encoding_df = interpret_problem ( analyte_df , journal_df , formula , bVerbose = bVerbose )
from sklearn.cross_decomposition import PLSRegression as PLS
if not bDeveloperTesting :
pls_components = 2
rpls = PLS( pls_components )
rpls_res = rpls.fit( X = analyte_df.T.values ,
Y = encoding_df .values )
quantx,quanty = rpls_res.x_weights_ , rpls_res.y_weights_
scorex = rpls_res.x_scores_
res_df = calculate_alignment_properties ( encoding_df , quantx, quanty, scorex,
journal_df = journal_df, analyte_df = analyte_df , blur_cutoff = blur_cutoff ,
bVerbose = bVerbose, exclude_labels_from_centroids = exclude_labels_from_centroids ,
study_axii = study_axii , owner_by = owner_by )
return ( res_df )
import impetuous.fit as ifit
import impetuous.clustering as icluster
def run_shape_alignment_clustering ( analyte_df , journal_df , formula, bVerbose = False ) :
NOTE_ = "This is just a kmeans in arbitrary dimensions that start out with centroids that have been shape aligned"
encoding_df = interpret_problem ( analyte_df , journal_df , formula , bVerbose = bVerbose )
Q = encoding_df.T.apply( lambda x:(rankdata(x,'average')-0.5)/len(x) ).values
P = analyte_df .apply( lambda x:(rankdata(x,'average')-0.5)/len(x) ).values
centroids = ifit.ShapeAlignment( P, Q ,
bReturnTransform = False ,
bShiftModel = True ,
bUnrestricted = True )
#
# FOR DIAGNOSTIC PURPOSES
centroids_df = pd.DataFrame ( centroids ,
index = encoding_df.columns ,
columns = encoding_df.index )
lookup_ = {i:n for n,i in zip( centroids_df.index,range(len(centroids_df.index)) ) }
labels , centroids = icluster.seeded_kmeans( P , centroids )
res_df = pd.DataFrame( [labels] , columns=analyte_df.index , index=['cluster index'] )
nam_df = pd.DataFrame( [ lookup_[l] for l in labels ] ,
columns = ['cluster name'] , index = analyte_df.index ).T
res_df = pd.concat( [ res_df , nam_df ] )
clusters_df = pd.concat( [ centroids_df, pd.DataFrame( res_df.T.groupby('cluster name').apply(len),columns=['size']) ] ,axis=1 )
return ( res_df , clusters_df )
def knn_clustering_alignment ( P, Q , bHighDim = False ) :
print ( "DOING KMEANS ALIGNMENT INSTEAD" )
return ( kmeans_clustering_alignment( P , Q , bHighDim = bHighDim ) )
def kmeans_clustering_alignment( P , Q , bHighDim=False ) :
NOTE_ = "This is just a standard kmeans in arbitrary dimensions that start out with centroids that have been shape aligned"
ispanda = lambda P: 'pandas' in str(type(P)).lower()
BustedPanda = lambda R : R.values if ispanda(R) else R
P_ = BustedPanda ( P )
Q_ = BustedPanda ( Q )
if bHighDim :
centroids = ifit .HighDimensionalAlignment ( P_ , Q_ )
else :
centroids = ifit .ShapeAlignment ( P_ , Q_ ,
bReturnTransform = False ,
bShiftModel = True ,
bUnrestricted = True )
if ispanda ( Q ) :
#
# FOR DIAGNOSTIC PURPOSES
centroids_df = pd.DataFrame ( centroids ,
index = Q.index ,
columns = Q.columns )
lookup_ = {i:n for n,i in zip( centroids_df.index,range(len(centroids_df.index)) ) }
labels , centroids = icluster.seeded_kmeans( P_ , centroids )
if ispanda ( Q ) and ispanda ( P ) :
#
# MORE DIAGNOSTICS
res_df = pd.DataFrame( [labels] , columns=P.index , index=['cluster index'] )
res_df .loc[ 'cluster name' ] = [ lookup_[l] for l in res_df.loc['cluster index'].values ]
print ( res_df )
return ( np.array(labels), np.array(centroids) )
def tol_check( val, TOL=1E-10 ):
if val > TOL :
print ( "WARNING: DATA ENTROPY HIGH (SNR LOW)", val )
ispanda = lambda P : 'pandas' in str(type(P)).lower()
def multifactor_solution ( analyte_df , journal_df , formula ,
bLegacy = False ) :
A , J , f = analyte_df , journal_df , formula
if bLegacy :
encoding_df = interpret_problem ( analyte_df = A , journal_df = J , formula = f ).T
else :
encoding_df = create_encoding_data_frame ( journal_df = J , formula = f ).T
solution_ = solve ( A.T, encoding_df.T )
tol_check ( solution_[1] )
beta_df = pd.DataFrame ( solution_[0] , index=A.index , columns=encoding_df.index )
U, S, VT = np.linalg.svd ( beta_df.values,full_matrices=False )
P = pd.DataFrame( U.T , index = [ 'Comp'+str(r) for r in range(len(U.T))] , columns = A.index )
W = pd.DataFrame( VT , index = [ 'Comp'+str(r) for r in range(len(U.T))] , columns = encoding_df.index )
Z = threshold ( encoding_df.T , S*W ) .T
return ( P.T , W.T , Z.T , encoding_df.T , beta_df )
def multifactor_evaluation ( analyte_df , journal_df , formula ) :
#
# ALTOUGH A GOOD METHOD IT IS STILL NOT SUFFICIENT
#
P, W, Z, encoding_df , beta_df = multifactor_solution ( analyte_df , journal_df , formula )
eval_df = beta_df.apply(lambda x:x**2)
all = [beta_df]
for c in eval_df.columns :
all.append ( pd.DataFrame ( quantify_density_probability ( eval_df.loc[:,c].values ),
index = [c+',p',c+',r'], columns=eval_df.index ).T)
res_df = pd.concat( all,axis=1 )
for c in res_df.columns:
if ',p' in c:
q = [ qv[0] for qv in qvalues(res_df.loc[:,c].values) ]
res_df.loc[:,c.split(',p')[0]+',q'] = q
return ( res_df )
def regression_assessment ( model , X , y , bLog = False ) :
desc_ = """
ALTERNATIVE NAIVE MODEL ASSESSMENT FOR A REGRESSION MODEL
!PRVT2D1701CM5487!
"""
y_ = y
coefs = model.coef_
mstat = dict()
if bLog :
X = np.array( [ [ np.log(x) for x in xx ] for xx in X ])
yp = np.exp(np.dot( coefs, X ) + model.intercept_ )
else :
yp = (np.dot( coefs, X ) + model.intercept_ )
#
n = len ( y_ ) ; p = len(coefs)
ym = np.mean( y_ ) # CRITICAL DIMENSION ...
#
# BZ FORMS
TSS = np.array([ np.sum(( y_ - ym ) ** 2, axis=0) ])[0]; dof_tss = n-1 ; mstat['TSS'] = TSS
RSS = np.array([ np.sum(( y_ - yp ) ** 2, axis=0) ])[0]; dof_rss = n-p ; mstat['RSS'] = RSS
ESS = np.array([ np.sum(( yp - ym ) ** 2, axis=0) ])[0]; dof_ess = p-1 ; mstat['ESS'] = ESS
mstat['dof_tss'] = dof_tss ; mstat['dof_rss'] = dof_rss ; mstat['dof_ess'] = dof_ess
#
TMS = TSS / dof_tss ; mstat['TMS'] = TMS
RMS = RSS / dof_rss ; mstat['RMS'] = RMS
EMS = ESS / dof_ess ; mstat['EMS'] = EMS
#
# F-TEST
dof_numerator = dof_rss
dof_denominator = dof_ess
from scipy.stats import f
fdist = f( dof_numerator , dof_denominator )
f0 = EMS / RMS
#
mstat['dof_numerator'] = dof_numerator
mstat['dof_denominator'] = dof_denominator
mstat['p-value'] = 1 - fdist.cdf(f0)
mstat['f0'] = f0
mstat['yp'] = yp
mstat['model'] = model
#
return ( mstat )
def proj_c ( P ) :
# P CONTAINS MUTUTALLY ORTHOGONAL COMPONENTS ALONG THE COLUMNS
# THE CS CALCULATION MIGHT SEEM STRANGE BUT FULLFILS THE PURPOSE
if not ispanda(P) : # ispandor är coola
print ( "FUNCTION REQUIRES A SAIGA OR PANDA DATA FRAME" )
CS = P.T.apply( lambda x: pd.Series( [x[0],x[1]]/np.sqrt(np.sum(x**2)),index=['cos','sin']) ).T
RHO = P.T.apply( lambda x: np.sqrt(np.sum(x**2)) )
CYL = pd.concat( [RHO*CS['cos'],RHO*CS['sin']],axis=1 )
CYL.columns = ['X','Y']
return ( CYL )
def multivariate_factorisation ( analyte_df , journal_df , formula ,
bVerbose = False , synonyms = None , blur_cutoff = 99.8 ,
exclude_labels_from_centroids = [''] ,
bDeveloperTesting = False , bReturnAll = False ,
study_axii = None , owner_by = 'angle' ,
bDoRecast = False , bUseThresholds = False ) :
P, W, Z, encoding_df , beta_df = multifactor_solution ( analyte_df , journal_df , formula )
#
# USE THE INFLATION PROJECTION AS DEFAULT
if not bUseThresholds :
aA = np.linalg.svd ( analyte_df - np.mean(np.mean(analyte_df)) , full_matrices=False )
aE = np.linalg.svd ( encoding_df.T , full_matrices=False )
Z = pd.DataFrame ( np.dot( np.dot( W.T , aE[-1] ), aA[-1]) ,
columns = encoding_df.T.columns ,
index= [ 'mComp' + str(r) for r in range(len(aE[-1]))]
).T
if bDoRecast :
print ( "WARNING: THROWING AWAY INFORMATION IN ORDER TO DELIVER A" )
print ( " VISUALLY MORE PLEASING POINT CLOUD ... ")
P = proj_c( P )
W = proj_c( W )
Z = proj_c( Z )
res_df = calculate_alignment_properties ( encoding_df ,
quantx = P.values , quanty = W.values , scorex = Z.values ,
journal_df = journal_df , analyte_df = analyte_df ,
blur_cutoff = blur_cutoff , bVerbose = bVerbose ,
exclude_labels_from_centroids = exclude_labels_from_centroids ,
study_axii = study_axii , owner_by = owner_by )
if bReturnAll :
return ( { 'Mutlivariate Solutions' : res_df ,
'Feature Scores' : P , 'Encoding Weights' : W ,
'Sample Scores' : Z , 'Encoding DataFrame' : encoding_df })
else :
return ( res_df )
def associations ( M , W = None , bRanked = True ) :
ispanda = lambda P : 'pandas' in str(type(P)).lower()
if not ispanda( M ) :
print ( "FUNCTION ",'recast_alignments'," REQUIRES ", 'M'," TO BE A PANDAS DATAFRAME" )
bValid = False
if not W is None :
if not len(W.columns.values) == len(M.columns.values):
W = M
else:
bValid = True
else :
W = M
if bRanked :
from scipy.stats import rankdata
M = ( M.T.apply(lambda x:rankdata(x,'average')).T-0.5 )/len(M.columns)
W = ( W.T.apply(lambda x:rankdata(x,'average')).T-0.5 )/len(W.columns)
rho1 = M.T.apply( lambda x:np.sqrt( np.dot( x,x ) ) )
rho2 = rho1
if bValid :
rho2 = W.T.apply( lambda x:np.sqrt( np.dot( x,x ) ) )
R2 = pd.DataFrame( np.array([np.array([r]) for r in rho1.values])*[rho2.values] ,
index = rho1.index, columns = rho2.index )
PQ = pd.DataFrame( np.dot( M,W.T ), index = rho1.index, columns = rho2.index )
res = PQ/R2
return ( res )
crop = lambda x,W:x[:,:W]
def run_shape_alignment_regression( analyte_df , journal_df , formula ,
bVerbose = False , synonyms = None , blur_cutoff = 99.8 ,
exclude_labels_from_centroids = [''] ,
study_axii = None , owner_by = 'tesselation' ,
transform = crop ) :
print ( 'WARNING: STILL UNDER DEVELOPMENT' )
print ( 'WARNING: DEFAULT IS TO CROP ALIGNED FACTORS!!')
encoding_df = interpret_problem ( analyte_df , journal_df , formula , bVerbose = bVerbose )
Q = encoding_df.T.apply( lambda x:(rankdata(x,'average')-0.5)/len(x) ).copy().values
P = analyte_df .apply( lambda x:(rankdata(x,'average')-0.5)/len(x) ).copy().values
centroids = ifit.ShapeAlignment( P, Q ,
bReturnTransform = False ,
bShiftModel = True ,
bUnrestricted = True )
#
# FOR DIAGNOSTIC PURPOSES
centroids_df = pd.DataFrame ( centroids ,
index = encoding_df.columns ,
columns = encoding_df.index )
xws = ifit.WeightsAndScoresOf( P )
yws = ifit.WeightsAndScoresOf( centroids )
W = np.min( [*np.shape(xws[0]),*np.shape(yws[0])] )
quantx = transform( xws[0],W )
quanty = transform( yws[0],W )
scorex = transform( xws[1],W )
res_df = calculate_alignment_properties ( encoding_df , quantx, quanty, scorex,
analyte_df = analyte_df.copy() , journal_df = journal_df.copy() ,
blur_cutoff = blur_cutoff , bVerbose = bVerbose,
exclude_labels_from_centroids = exclude_labels_from_centroids ,
study_axii = study_axii , owner_by = owner_by, synonyms=synonyms )
return ( res_df )
def add_foldchanges ( df, information_df , group='', fc_type=0 , foldchange_indentifier = 'FC,') :
all_vals = list(set(information_df.loc[group].values))
pair_values = [all_vals[i] for i in range(len(all_vals)) if i<2 ]
group1 = df.iloc[:,[n in pair_values[0] for n in information_df.loc[group].values] ].T
group2 = df.iloc[:,[n in pair_values[1] for n in information_df.loc[group].values] ].T
if fc_type == 0:
FC = np.mean(group1.values,0) - np.mean(group2.values,0)
if fc_type == 1:
FC = np.log2( np.mean(group1.values,0) - np.mean(group2.values,0) )
FCdf = pd.DataFrame(FC,index=df.index,columns=[foldchange_indentifier+'-'.join(pair_values) ] )
df = pd.concat([df.T,FCdf.T]).T
return ( df )
from statsmodels.stats.multitest import multipletests
def adjust_p ( pvalue_list , method = 'fdr_bh' , alpha = 0.05,
check_r_bh = False , is_sorted = False ,
returnsorted = False
) :
""" WRAPPER FOR MULTIPLE HYPOTHESIS TESTING
pvalue_list = [0.00001,0.01,0.0002,0.00005,0.01,0.1,0.2,0.4,0.5,0.6,0.7,0.8,0.9,0.99,0.0114,0.15,0.23,0.20]
"""
available_methods = set( [ 'bonferroni' , 'sidak',
'holm-sidak' , 'holm' , 'simes-hochberg' ,
'hommel' , 'fdr_bh' , 'fdr_by' , 'fdr_tsbh' ,
'fdr_tsbky' ] )
if method not in available_methods :
print ( available_methods )
r_equiv = { 'fdr_bh':'BH' }
if check_r_bh and method in r_equiv :
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import FloatVector
r_stats = importr('stats')
p_adjust = r_stats.p_adjust ( FloatVector(pvalue_list), method = r_equiv[method] )
else :
p_adjust_results = multipletests ( pvalue_list, alpha=alpha, method=method,
is_sorted = is_sorted , returnsorted = returnsorted )
p_adjust = [ p_adj for p_adj in p_adjust_results[1] ]
return ( p_adjust )
def qvalues ( p_values_in , pi0 = None ) :
p_s = p_values_in
if pi0 is None :
pi0 = 1.
qs_ = []
m = float(len(p_s)) ; itype = str( type( p_s[0] ) ) ; added_info = False
if 'list' in itype or 'tuple' in itype :
added_info = True
ps = [ p[0] for p in p_s ]
else :
ps = p_s
frp_ = rankdata( ps,method='ordinal' )/m
ifrp_ = [ ( (p<=f)*f + p*(p>f) ) for p,f in zip(ps,frp_) ]
for ip in range(len(ps)) :
p_ = ps[ ip ] ; f_ = frp_[ip]
q_ = pi0 * p_ / ifrp_[ip]
qs_.append( (q_,p_) )
if added_info :
q = [ tuple( [qvs[0]]+list(pinf) ) for ( qvs,pinf ) in zip(qs_,p_s) ]
qs_ = q
return qs_
class Qvalues ( object ) :
def __init__( self, pvalues:np.array , method:str = "UNIQUE" , pi0:np.array = None ) :
from scipy.stats import rankdata
self.rankdata = rankdata
self.method : str = method
self.pvalues : np.array = pvalues
self.qvalues : np.array = None
self.qpres : np.array = None
if method == "FDR-BH" :
self.qpres = self.qvaluesFDRBH ( self.pvalues )
if method == "QVALS" :
self.qpres = self.qvaluesFDRBH ( self.pvalues , pi0 )
if method == "UNIQUE" :
self.qpres = self.qvaluesUNIQUE ( self.pvalues , pi0 )
def __str__ ( self ) :
return ( self.info() )
def __repr__( self ) :
return ( self.info() )
def help ( self ) :
desc__ = "\n\nRANK CORRECTION FOR P-VALUES\nVIABLE METHODS ARE method = FDR-BH , QVALS , UNIQUE\n\n EMPLOYED METHOD: " + self.method
return ( desc__ )
def info ( self ) :
desc__ = "\nMETHOD:"+self.method+"\n q-values \t p-values\n"
return ( desc__+'\n'.join( [ ' \t '.join(["%10.10e"%z for z in s]) for s in self.qpres ] ) )
def get ( self ) :
return ( self.qpres )
def qvaluesFDRBH ( self , p_values_in:np.array = None , pi0:np.array = None ) :
p_s = p_values_in
if p_s is None :
p_s = self.pvalues
m = int(len(p_s))
if pi0 is None :
pi0 = np.array([1. for i in range(m)])
qs_ = []
ps = p_s
frp_ = (self.rankdata( ps,method='ordinal' )-0.5)/m
ifrp_ = [ ( (p<=f)*f + p*(p>f) ) for p,f in zip(ps,frp_) ]
for ip,p0 in zip(range(m),pi0) :
p_ = ps[ ip ] ; f_ = frp_[ip]
q_ = p0 * p_ / ifrp_[ip]
qs_.append( (q_,p_) )
self.qvalues = np.array([q[0] for q in qs_])
return np.array(qs_)
def qvaluesUNIQUE ( self , p_values_in = None , pi0 = None ) :
p_s = p_values_in
if p_s is None :
p_s = self.pvalues
m = int(len(set(p_s)))
n = int(len(p_s))
if pi0 is None :
pi0 = np.array([1. for i in range(n)])
qs_ = []
ps = p_s
frp_ = (self.rankdata( ps,method='average' )-0.5)/m
ifrp_ = [ ( (p<=f)*f + p*(p>f) ) for p,f in zip(ps,frp_) ]
for ip,p0 in zip( range(n),pi0 ) :
p_ = ps[ ip ] ; f_ = frp_[ip]
q_ = p0 * p_ / ifrp_[ip]
qs_.append( (q_,p_) )
self.qvalues = np.array([q[0] for q in qs_])
return np.array(qs_)
class Pvalues ( object ) :
def __init__( self, data_values:np.array , method:str = "RANK DERIV E" ) :
from scipy.stats import rankdata
self.rankdata = rankdata
self.method : str = method
self.dvalues : np.array = data_values
self.pvalues : np.array = None
self.dsdrvalues : np.array = None
self.dpres : np.array = None
if method == "RANK DERIV E" :
self.dpres = self.pvalues_dsdr_e ( self.dvalues , True)
if method == "RANK DERIV N" :
self.dpres = self.pvalues_dsdr_n ( self.dvalues , True )
if method == "NORMAL" :
self.dpres = self.normal_pvalues ( self.dvalues , True )
self.pvalues = self.dpres[0]
def __str__ ( self ) :
return ( self.info() )
def __repr__( self ) :
return ( self.info() )
def help ( self ) :
#
# PVALUES FROM "RANK DERIVATIVES"
#
desc__ = "\n\nRANK DERIVATIVE P-VALUES\nVIABLE METHODS ARE method = RANK DERIV E, RANK DERIV N \n\n EMPLOYED METHOD: " + self.method
return ( desc__ )
def info ( self ) :
desc__ = "\nMETHOD:"+self.method+"\n p-values \t ds-values\n"
return ( desc__+'\n'.join( [ ' \t '.join(["%10.10e"%z for z in s]) for s in self.dpres.T ] ) )
def get ( self ) :
return ( self.qpres )
def sgn ( self, x:float) -> int :
return( - int(x<0) + int(x>=0) )
def nn ( self, N:int , i:int , n:int=1 )->list :
t = [(i-n)%N,(i+n)%N]
if i-n<0 :
t[0] = 0
t[1] += n-i
if i+n>=N :
t[0] -= n+i-N
t[1] = N-1
return ( t )
def normal_pvalues ( self, v:np.array , bReturnDerivatives:bool=False ) -> np.array :
ds = v # TRY TO ACT LIKE YOU ARE NORMAL ...
N = len(v)
M_ , Var_ = np.mean(ds) , np.std(ds)**2
from scipy.special import erf as erf_
loc_Q = lambda X,mean,variance : [ 1. - 0.5*( 1. + erf_( (x-mean)/np.sqrt( 2.*variance ) ) ) for x in X ]
rv = loc_Q ( ds,M_,Var_ )
if bReturnDerivatives :
rv = [*rv,*ds ]
return ( np.array(rv).reshape(-1,N) )
def pvalues_dsdr_n ( self, v:np.array ,
bReturnDerivatives:bool=False ,
bSymmetric:bool=True ) -> np.array :
#
N = len(v)
vsym = lambda a,b : a*self.sgn(a) if b else a
import scipy.stats as st
rv = st.rankdata(v,'ordinal') - 1
vr = { int(k):v for k,v in zip(rv,range(len(rv)))}
ds = []
for w,r in zip(v,rv) :
nr = self.nn(N,int(r),1)
nv = [ vr[j] for j in nr ]
s_ = [ v[j] for j in sorted(list(set( [ *[vr[int(r)]] , *nv ] )) ) ]
dsv = np.mean( np.diff(s_) )
ds.append( vsym( dsv , bSymmetric) ) # DR IS ALWAYS 1
M_,Var_ = np.mean(ds) , np.std(ds)**2
from scipy.special import erf as erf_
loc_Q = lambda X,mean,variance : [ 1. - 0.5*( 1. + erf_( (x-mean)/np.sqrt( 2.*variance ) ) ) for x in X ]
rv = loc_Q ( ds,M_,Var_ )
if bReturnDerivatives :
rv = [*rv,*ds ]
return ( np.array(rv).reshape(-1,N) )
def pvalues_dsdr_e ( self, v:np.array ,
bReturnDerivatives:bool=False ,
bSymmetric:bool=True ) -> np.array :
#
N = len(v)
vsym = lambda a,b : a*self.sgn(a) if b else a
import scipy.stats as st
rv = st.rankdata(v,'ordinal') - 1
vr = { int(k):v for k,v in zip(rv,range(len(rv)))}
ds = []
for w,r in zip(v,rv) :
nr = self.nn(N,int(r),1)
nv = [ vr[j] for j in nr ]
s_ = [ v[j] for j in sorted(list(set( [ *[vr[int(r)]] , *nv ] )) ) ]
dsv = np.mean( np.diff(s_) )
ds.append( vsym( dsv , bSymmetric) ) # DR IS ALWAYS 1
M_ = np.mean ( ds )
loc_E = lambda X,L_mle : [ np.exp(-L_mle*x) for x in X ]
ev = loc_E ( ds,1.0/M_) # EXP DISTRIBUTION P
if bReturnDerivatives :
rv = [*ev,*ds ]
return ( np.array(rv).reshape(-1,N) )
class MultiFactorAnalysis ( object ) :
def __init__( self, analyte_df, journal_df, formula ) :
#super(MultiFactorAnalysis,self).__init__()
self.rankdata = rankdata
self.A = analyte_df
self.J = journal_df
self.f = formula
self.E = None
self.C = None
self.B = None
self.R = None
self.TOL = None
self.multifactor_evaluation ( self.A , self.J , self.f )
#print ( self.predict(self.A.iloc[:,0],'male') )
def fit(self, analyte_df, journal_df, formula) :
self.__init__( analyte_df, journal_df, formula )
def tol_check ( self, val, TOL=1E-10 ):
if val > TOL :
print ( "WARNING: DATA ENTROPY HIGH (SNR LOW)", val )
def recover ( self, U, S, Vt ):
return ( np.dot(U*S,Vt) )
def qvalues ( self, p_values_in , pi0 = None ) :
p_s = p_values_in
if pi0 is None :
pi0 = 1.
qs_ = []
m = float(len(p_s)) ; itype = str( type( p_s[0] ) ) ; added_info = False
if 'list' in itype or 'tuple' in itype :
added_info = True
ps = [ p[0] for p in p_s ]
else :
ps = p_s
frp_ = rankdata( ps,method='ordinal' )/m
ifrp_ = [ ( (p<=f)*f + p*(p>f) ) for p,f in zip(ps,frp_) ]
for ip in range(len(ps)) :
p_ = ps[ ip ] ; f_ = frp_[ip]
q_ = pi0 * p_ / ifrp_[ip]
qs_.append( (q_,p_) )
if added_info :
q = [ tuple( [qvs[0]]+list(pinf) ) for ( qvs,pinf ) in zip(qs_,p_s) ]
qs_ = q
return qs_
def solve ( self , C=None , E=None ) :
if C is None :
C = self.C
if E is None :
E = self.E
if not 'pandas' in str(type(C)) or not 'pandas' in str(type(E)):
print ( "ERROR MUST SUPPLY TWO PANDAS DATAFRAMES" )
return ( -1 )
cU, cS, cVt = np.linalg.svd(C, full_matrices=False )
cST = 1/cS
psuedo_inverse = pd.DataFrame( self.recover(cVt.T,cST,cU.T) , index=C.columns ,columns=C.index )
identity = np.dot( C , psuedo_inverse )
TOLERANCE = np.max( np.sqrt( ( identity * ( ( 1-np.eye(len(np.diag(identity)))) ) )**2 ))
self.B = np.dot( psuedo_inverse,E )
self.TOL = TOLERANCE
return ( self.B , self.TOL )
def encode_categorical ( self , G = ['A','B'] ):
#
# CREATES AN BINARY ENCODING MATRIX FROM THE SUPPLIED LIST
# USES A PANDAS DATAFRAME AS INTERMEDIATE FOR ERROR CHECKING
#
ugl = list(set(G)) ; n = len(ugl) ; m = len(G)
lgu = { u:j for u,j in zip(ugl,range(n)) }
enc_d = pd.DataFrame( np.zeros(m*n).reshape(-1,n),columns=ugl )
for i in range ( m ) :
j = lgu[G[i]]
enc_d.iloc[i,j] = 1
return ( enc_d )
def create_encoding_journal ( self , use_categories, journal_df ) :
encoding_df = None
for category in use_categories :
catvals = journal_df.loc[category].to_list()
cat_encoding = self.encode_categorical( catvals )
cat_encoding.index = journal_df.columns.values
if encoding_df is None :
encoding_df = cat_encoding.T
else :
encoding_df = pd.concat([encoding_df,cat_encoding.T])
return ( encoding_df )
def find_category_interactions ( self , istr ) :
all_cats = re.findall( r'C\((.*?)\)', istr )
interacting = [ ':' in c for c in istr.split(')') ][ 0:len(all_cats) ]
interacting_categories = [ [all_cats[i-1],all_cats[i]] for i in range(1,len(interacting)) if interacting[i] ]
return ( interacting_categories )
def create_encoding_data_frame ( self, journal_df=None , formula=None , bVerbose = False ) :
#
# THE JOURNAL_DF IS THE COARSE GRAINED DATA (THE MODEL)
# THE FORMULA IS THE SEMANTIC DESCRIPTION OF THE PROBLEM
#
if journal_df is None :
journal_df = self.J
if formula is None :
formula = self.f
interaction_pairs = self.find_category_interactions ( formula.split('~')[1] )
add_pairs = []
sjdf = set(journal_df.index)
if len( interaction_pairs ) > 0 :
for pair in interaction_pairs :
cpair = [ 'C('+p+')' for p in pair ]
upair = [ pp*(pp in sjdf)+cp*(cp in sjdf and not pp in sjdf) for (pp,cp) in zip( pair,cpair) ]
journal_df.loc[ ':'.join(upair) ] = [ p[0]+'-'+p[1] for p in journal_df.loc[ upair,: ].T.values ]
add_pairs.append(':'.join(upair))
use_categories = list(set(find_category_variables(formula.split('~')[1])))
cusecats = [ 'C('+p+')' for p in use_categories ]
use_categories = [ u*( u in sjdf) + cu *( cu in sjdf ) for (u,cu) in zip(use_categories,cusecats) ]
use_categories = [ *use_categories,*add_pairs ]
#
if len( use_categories ) > 0 :
encoding_df = self.create_encoding_journal ( use_categories , journal_df ).T
else :
encoding_df = None
#
if bVerbose :
print ( [ v for v in encoding_df.columns.values ] )
print ( 'ADD IN ANY LINEAR TERMS AS THEIR OWN AXIS' )
#
# THIS TURNS THE MODEL INTO A MIXED LINEAR MODEL
add_df = journal_df.loc[ [c.replace(' ','') for c in formula.split('~')[1].split('+') if not 'C('in c],: ]
if len(add_df)>0 :
if encoding_df is None :
encoding_df = add_df.T
else :
encoding_df = pd.concat([ encoding_df.T ,
journal_df.loc[ [ c.replace(' ','') for c in formula.split('~')[1].split('+') if not 'C(' in c] , : ] ]).T
self.E = encoding_df.apply(pd.to_numeric)
return ( self.E )
def threshold ( self, E , A ) :
if not 'pandas' in str(type(A)) or not 'pandas' in str(type(E)):
print ( "ERROR MUST SUPPLY TWO PANDAS DATAFRAMES" )
return ( -1 )
thresholds_df = pd .DataFrame ( np.dot( E,A.T ) ,
columns = A .index ,
index = E .index ) .apply ( lambda x:x/np.sum(E,1) )
return ( thresholds_df )
def multifactor_solution ( self , analyte_df=None , journal_df=None ,
formula=None , bLegacy = False ) :
A , J , f = analyte_df , journal_df , formula
if A is None :
A = self.A
if J is None :
J = self.J
if f is None :
f = self.f
encoding_df = self.create_encoding_data_frame ( journal_df = J , formula = f ).T
encoding_df.loc['NormFinder'] = np.array([1 for i in range(len(encoding_df.columns))])
self.E = encoding_df
solution_ = self.solve ( A.T, encoding_df.T )
self.tol_check ( solution_[1] )
beta_df = pd.DataFrame ( solution_[0] , index=A.index , columns=encoding_df.index )
self.B = beta_df
return ( encoding_df.T , beta_df )
def quantify_density_probability ( self , rpoints , cutoff = None ) :
#
# DETERMINE P VALUES
loc_pdf = lambda X,mean,variance : [ 1./np.sqrt(2.*np.pi*variance)*np.exp(-((x-mean)/(2.*variance))**2) for x in X ]
from scipy.special import erf as erf_
loc_cdf = lambda X,mean,variance : [ 0.5*( 1. + erf_( (x-mean)/np.sqrt( 2.*variance ) ) ) for x in X ]
loc_Q = lambda X,mean,variance : [ 1. - 0.5*( 1. + erf_( (x-mean)/np.sqrt( 2.*variance ) ) ) for x in X ]
M_,Var_ = np.mean(rpoints),np.std(rpoints)**2
#
# INSTEAD OF THE PROBABILTY DENSITY WE RETURN THE FRACTIONAL RANKS
# SINCE THIS ALLOWS US TO CALCULATE RANK STATISTICS FOR THE PROJECTION
corresponding_density = ( self.rankdata (rpoints,'average')-0.5 ) / len( set(rpoints) )
corresponding_pvalue = loc_Q ( rpoints,M_,Var_ )
return ( corresponding_pvalue , corresponding_density )
def predict ( self, X, name ) :
desc__=""" print ( self.predict(X.iloc[:,0],'male') ) """
if len( X ) == len(self.A.iloc[:,0].values) and name in set(self.B.columns) :
coefs = self.B.loc[:,name].values
return ( name , np.dot( coefs,X )>self.TOL )
else :
print ( "CANNOT PREDICT" )
def regression_assessment ( self ) :
desc_ = """
ALTERNATIVE NAIVE MODEL ASSESSMENT FOR A REGRESSION MODEL
!PRVT2D1701CM5487!
NO CV; NOT YET INFORMATIVE
"""
X = self.A
coefs = self.B.T
yp = np.dot( coefs,X )
y_ = self.E.values
#
# NORMFINDER IS INTERCEPT
mstat = dict()
#
n = len ( y_ ); p = len(coefs); q = len ( coefs.T )
if q>n :
print ( "OVER DETERMINED SYSTEM OF EQUATIONS " )
ym = np.mean( y_ , axis=0 )
#
# BZ FORMS
TSS = np.array([ np.sum(( y_ - ym ) ** 2, axis=1) ])[0]; dof_tss = np.abs(n-1) ; mstat['TSS'] = TSS
RSS = np.array([ np.sum(( y_ - yp ) ** 2, axis=1) ])[0]; dof_rss = np.abs(n-q) ; mstat['RSS'] = RSS
ESS = np.array([ np.sum(( yp - ym ) ** 2, axis=1) ])[0]; dof_ess = np.abs(p-1) ; mstat['ESS'] = ESS
mstat['dof_tss'] = dof_tss ; mstat['dof_rss'] = dof_rss ; mstat['dof_ess'] = dof_ess
#
TMS = TSS / dof_tss ; mstat['TMS'] = TMS
RMS = RSS / dof_rss ; mstat['RMS'] = RMS
EMS = ESS / dof_ess ; mstat['EMS'] = EMS
#
# F-TEST
dof_numerator = dof_rss
dof_denominator = dof_ess
from scipy.stats import f
fdist = f( dof_numerator , dof_denominator )
f0 = EMS / RMS
#
#
mstat['dof_numerator'] = dof_numerator
mstat['dof_denominator'] = dof_denominator
mstat['p-value'] = 1 - fdist.cdf(f0)
mstat['f0'] = f0
mstat['yp'] = yp
mstat['model'] = model
return ( mstat )
def multifactor_evaluation ( self, analyte_df=None , journal_df=None , formula=None ) :
#
if analyte_df is None :
analyte_df = self.A
if journal_df is None :
journal_df = self.J
if formula is None :
formula = self.f
encoding_df , beta_df = self.multifactor_solution ( analyte_df , journal_df , formula )
eval_df = beta_df.apply(lambda x:x**2)
all = [ beta_df ]
for c in eval_df.columns :
all.append ( pd.DataFrame ( self.quantify_density_probability ( eval_df.loc[:,c].values ),
index = [c+',p',c+',r'], columns=eval_df.index ).T)
res_df = pd.concat( all , axis=1 )
for c in res_df.columns :
if ',p' in c :
q = [ qv[0] for qv in self.qvalues(res_df.loc[:,c].values) ]
res_df.loc[:,c.split(',p')[0]+',q'] = q
self.R = res_df
return ( self.R )
from scipy import stats
from statsmodels.stats.anova import anova_lm as anova
import statsmodels.api as sm
import patsy
def anova_test ( formula, group_expression_df, journal_df, test_type = 'random' ) :
type_d = { 'paired':1 , 'random':2 , 'fixed':1 }
formula = formula.replace(' ','')
tmp_df = pd.concat([ journal_df, group_expression_df ])
gname = tmp_df.index.tolist()[-1]
formula_l = formula.split('~')
rename = { gname:formula_l[0] }
tmp_df.rename( index=rename, inplace=True )
tdf = tmp_df.T.iloc[ :,[ col in formula for col in tmp_df.T.columns] ].apply( pd.to_numeric )
y, X = patsy.dmatrices( formula, tdf, return_type='dataframe')
model = sm.OLS(endog=y,exog=X).fit()
model .model.data.design_info = X.design_info
table = sm.stats.anova_lm(model,typ=type_d[test_type])
return table.iloc[ [(idx in formula) for idx in table.index],-1]
def glm_test ( formula , df , jdf , distribution='Gaussian' ) :
tmp_df = pd.concat([ jdf, df ])
family_description = """
Family(link, variance) # The parent class for one-parameter exponential families.
Binomial([link]) # Binomial exponential family distribution.
Gamma([link]) # Gamma exponential family distribution.
Gaussian([link]) # Gaussian exponential family distribution.
InverseGaussian([link]) # InverseGaussian exponential family.
NegativeBinomial([link, alpha]) # Negative Binomial exponential family.
Poisson([link]) # Poisson exponential family.
Tweedie([link, var_power, eql]) # Tweedie family.
"""
if distribution == 'Gaussian' :
family = sm.families.Gaussian()
if distribution == 'Binomial' :
family = sm.families.Binomial()
if distribution == 'Gamma' :
family = sm.families.Gamma()
if distribution == 'InverseGaussian' :
family = sm.families.InverseGaussian()
if distribution == 'NegativeBinomial' :
family = sm.families.NegativeBinomial()
if distribution == 'Poisson' :
family = sm.families.Poisson()
formula = formula.replace( ' ','' )
gname = tmp_df.index.tolist()[-1]
formula_l = formula.split('~')
rename = { gname:formula_l[0] }
tmp_df .rename( index=rename, inplace=True )
tdf = tmp_df.T.iloc[ :,[ col in formula for col in tmp_df.T.columns] ].apply( pd.to_numeric )
y , X = patsy.dmatrices( formula, tdf, return_type='dataframe')
distribution_model = sm.GLM( y, X, family=family )
glm_results = distribution_model.fit()
if False:
print('Parameters: ', glm_results.params )
print('T-values : ', glm_results.tvalues )
print('p-values : ', glm_results.pvalues )
table = glm_results.pvalues
return table.iloc[ [( idx.split('[')[0] in formula) for idx in table.index]]
def t_test ( df , endogen = 'expression' , group = 'disease' ,
pair_values = ('Sick','Healthy') , test_type = 'independent',
equal_var = False , alternative = 'greater' ) :
group1 = df.iloc[:,[n in pair_values[0] for n in df.loc[group,:].values] ].loc[endogen,:].astype(float)
group2 = df.iloc[:,[n in pair_values[1] for n in df.loc[group,:].values] ].loc[endogen,:].astype(float)
if test_type == 'independent' :
pv = ttest_ind ( group1, group2 , equal_var = equal_var )
if test_type == 'related' :
pv = ttest_rel ( group1, group2 )
try :
p_mannu = mannwhitneyu( group1, group2, alternative=alternative )[1]
except ValueError as err:
print(err.args)
p_mannu = 1.0
pvalue = pv[1] ; statistic=pv[0]
return ( pvalue , p_mannu, statistic )
def mycov( x , full_matrices=0 ):
x = x - x.mean( axis=0 )
U, s, V = np.linalg.svd( x , full_matrices = full_matrices )
C = np.dot(np.dot(V.T,np.diag(s**2)),V)
return C / (x.shape[0]-1)
from scipy.special import chdtrc as chi2_cdf
def p_value_merger ( pvalues_df , p_label=',p' , axis = 0 ) :
#
print( " REQUIRED READING: doi: 10.1093/bioinformatics/btw438" )
print( " ALSO MAKE SURE TO ADD THAT ARTICLE AS ADDITIONAL CITATION" )
print( " IF THIS METHOD IS EMPLOYED" )
print( " READ ABOVE ! " )
print( " YOU CALLED A FUNCTION FOR MERGING P-VALUES" )
print( " THIS METHOD IS NO LONGER SUPPORTED " )
print( " FOR IMPETUOUS VERSIONS > 0.84.0 " )
print( " THE METHOD merge_significance COULD BE EMPLOYED INSTEAD ")
print( " BUT IT IS NOT RECOMMENDED " )
print( " FATAL : WILL TERMINATE NOW " )
exit(1)
def parse_test ( statistical_formula, group_expression_df , journal_df , test_type = 'random' ) :
#
# THE FALLBACK IS A TYPE2 ANOVA
ident = False
if 'glm' in statistical_formula.lower() :
if not test_type in set(['Gaussian','Binomial','Gamma','InverseGaussian','NegativeBinomial','Poisson']):
test_type = 'Gaussian'
print('ONLY GAUSSIAN TESTS ARE SUPPORTED')
print('THIS TEST IS NO LONGER SUPPORTED')
result = glm_test( statistical_formula, group_expression_df , journal_df , distribution = test_type )
ident = True
if 'ttest' in statistical_formula.lower() :
ident = True ; result = None
#
# WE CONDUCT SEPARATE TESTS FOR ALL THE UNIQUE PAIR LABELS PRESENT
check = [ idx for idx in journal_df.index if idx in statistical_formula ]
df = pd.concat( [journal_df,group_expression_df],axis=0 ).T
for c in check :
if test_type in set([ 'related' , 'fixed' , 'paired' ]):
test_type = 'related'
else :
test_type = 'independent'
for pair in permuter( list(set(journal_df.loc[c].values)),2) :
result_ = t_test( df, endogen = df.columns.values[-1], group = c,
pair_values = pair, test_type = test_type, equal_var = False )
hdr = ' '.join( [c,' '.join([str(p) for p in pair])] )
tdf = pd.Series( result_, index = [ hdr, hdr+' mwu', hdr+' stat,s' ] )
if result is None :
result = tdf
else :
result = pd.concat([result,tdf])
result.name = 'PR>t'
if not ident :
result = anova_test( statistical_formula, group_expression_df , journal_df , test_type=test_type )
return ( result )
def prune_journal ( journal_df , remove_units_on = '_' ) :
journal_df = journal_df.loc[ [ 'label' in idx.lower() or '[' in idx for idx in journal_df.index.values] , : ].copy()
bSel = [ ('label' in idx.lower() ) for idx in journal_df.index.values]
bool_dict = { False:0 , True:1 , 'False':0 , 'True':1 }
str_journal = journal_df.iloc[ bSel ]
journal_df = journal_df.replace({'ND':np.nan})
nmr_journal = journal_df.iloc[ [ not b for b in bSel ] ].replace(bool_dict).apply( pd.to_numeric )
if not remove_units_on is None :
nmr_journal.index = [ idx.split(remove_units_on)[0] for idx in nmr_journal.index ]
journal_df = pd.concat( [nmr_journal,str_journal] )
return( journal_df )
def merge_significance ( significance_df , distance_type='euclidean' ) :
# TAKES P VALUES OR Q VALUES
# TRANSFORMS INTO A MERGED P OR Q VALUE VIA
# THE DISTANCE SCORE
# THE DATA DOMAIN SIGNIFICANCE IS ALONG COLUMNS AND
# GROUPS ALONG INDICES
# EX: pd.DataFrame( np.random.rand(20).reshape(5,4) , columns=['bio','cars','oil','money']).apply( lambda x: -1.*np.log10(x) ).T.apply( lambda x: np.sqrt(np.sum(x**2)) )
#
distance = lambda x : np.sqrt(np.sum(x**2))
if distance_type == 'euclidean' : # ESTIMATE
distance = lambda x : np.sqrt(np.sum(x**2))
if distance_type == 'extreme' : # ANTI-CONSERVATIVE ESTIMATE
distance = lambda x : np.max(x)
if distance_type == 'mean' : # MEAN ESTIMATE
distance = lambda x : np.mean(x)
get_pvalue = lambda x : 10**(-x)
return ( significance_df.apply( lambda x: -1.*np.log10(x) ).T.apply(distance).apply(get_pvalue) )
def group_significance( subset , all_analytes_df = None ,
tolerance = 0.05 , significance_name = 'pVal' ,
AllAnalytes = None , SigAnalytes = None,
alternative = 'two-sided' ) :
# FISHER ODDS RATIO CHECK
# CHECK FOR ALTERNATIVE :
# 'greater' ( ENRICHMENT IN GROUP )
# 'two-sided' ( DIFFERENTIAL GROUP EXPERSSION )
# 'less' ( DEPLETION IN GROUP )
if AllAnalytes is None :
if all_analytes_df is None :
AllAnalytes = set( all_analytes_df.index.values )
if SigAnalytes is None :
if all_analytes_df is None :
SigAnalytes = set( all_analytes_df.iloc[(all_analytes_df<tolerance).loc[:,significance_name]].index.values )
Analytes = set(subset.index.values)
notAnalytes = AllAnalytes - Analytes
notSigAnalytes = AllAnalytes - SigAnalytes
AB = len(Analytes&SigAnalytes) ; nAB = len(notAnalytes&SigAnalytes)
AnB = len(Analytes¬SigAnalytes) ; nAnB = len(notAnalytes¬SigAnalytes)
oddsratio , pval = stats.fisher_exact([[AB, nAB], [AnB, nAnB]], alternative=alternative )
return ( pval , oddsratio )
def quantify_groups_by_analyte_pvalues( analyte_df, grouping_file, delimiter='\t',
tolerance = 0.05 , p_label = 'C(Status),p' ,
group_prefix = '' , alternative = 'two-sided' ) :
AllAnalytes = set( analyte_df.index.values ) ; nidx = len( AllAnalytes )
SigAnalytes = set( analyte_df.iloc[ (analyte_df.loc[:,p_label].values < tolerance), : ].index.values )
if len( AllAnalytes ) == len(SigAnalytes) :
print ( 'THIS STATISTICAL TEST WILL BE NONSENSE' )
eval_df = None
with open( grouping_file ) as input :
for line in input :
vline = line.replace('\n','').split(delimiter)
gid, gdesc, analytes_ = vline[0], vline[1], vline[2:]
try :
group = analyte_df.loc[[a for a in analytes_ if a in AllAnalytes] ].dropna( axis=0, how='any', thresh=analyte_df.shape[1]/2 ).drop_duplicates()
except KeyError as e :
continue
L_ = len( group ) ; str_analytes=','.join(group.index.values)
if L_ > 0 :
pv , odds = group_significance( group , AllAnalytes=AllAnalytes, SigAnalytes=SigAnalytes , alternative=alternative )
rdf = pd.DataFrame( [[pv]], columns = [ group_prefix + 'Fisher_' + p_label ] , index = [ gid ] )
rdf .columns = [ col+',p' if ',p' not in col else col for col in rdf.columns ]
rdf[ 'description' ] = gdesc+',' + str(L_) ; rdf['analytes'] = str_analytes
rdf[ group_prefix + 'NGroupAnalytes' ] = L_
rdf[ group_prefix + 'AllFracFilling' ] = L_ / float( len(analytes_) )
present_sig = set(group.index.values)&SigAnalytes
rdf[ group_prefix + 'SigFracGroupFill' ] = float ( len ( present_sig ) ) / float( len(analytes_) )
ndf = rdf
if eval_df is None :
eval_df = ndf
else :
eval_df = pd.concat( [ eval_df,ndf ] )
edf = eval_df.T
for col in eval_df.columns :
if ',p' in col :
q = [q_[0] for q_ in qvalues(eval_df.loc[:,col].values)]; l=col.split(',')[0]+',q'
edf.loc[l] = q
return ( edf.T )
class APCA ( object ) :
#
# THIS CLASS PERFORMS A SPARSE PCA IF REQUESTED
# IT THEN USES THE SPARSE SVD ALGORITHM FOUND IN SCIPY
# THE STANDARD IS TO USE THE NUMPY SVD
#
def __init__ ( self , X = None , k =-1 , fillna = None , transcending = True , not_sparse = True ) :
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import svds
self.svds_ , self.smatrix_ = svds , csc_matrix
self.components_ = None
self.F_ = None
self.U_ , self.S_, self.V_ = None,None,None
self.evr_ = None
self.var_ = None
self.fillna_ = fillna
self.X_ = self.interpret_input(X)
self.k_ = k
self.transcending_ = transcending
self.not_sparse = not_sparse
def interpret_input ( self,X ) :
if 'pandas' in str(type(X)) :
for idx in X.index :
X.loc[idx] = [ np.nan if 'str' in str(type(v)) else v for v in X.loc[idx].values ]
if 'float' in str(type(self.fillna_)) or 'int' in str(type(self.fillna_)) :
X = X.fillna(self.fillna_)
self.X_ = X.values
else :
self.X_ = X
return ( self.X_ )
def fit ( self , X=None ) :
self.fit_transform( X=X )
def fit_transform ( self , X=None ) :
if X is None:
X = self.X_
if not X is None :
X = self.interpret_input(X)
Xc = X - np.mean( X , 0 )
if self.k_<=0 :
k_ = np.min( np.shape(Xc) ) - 1
else:
k_ = self.k_
if self.not_sparse :
u, s, v = np.linalg.svd( Xc , full_matrices = False )
self.transcending_ = False
else :
u, s, v = self.svds_ ( self.smatrix_(Xc, dtype=float) , k=k_ )
if self.transcending_ :
u, s, v = self.transcending_order(u,s,v)
S = np.diag( s )
self.F_ = np.dot(u,S)
self.var_ = s ** 2 / Xc.shape[0]
self.explained_variance_ratio_ = self.var_/self.var_.sum()
self.U_ , self.S_ , self.V_ = u,s,v
self.components_ = self.V_
return ( self.F_ )
def transcending_order(self,u,s,v) :
return ( u[:,::-1],s[::-1],v[::-1,:] )
def apply_matrix( self , R ) :
self.U_ = np.dot( self.U_,R.T )
self.V_ = np.dot( self.V_.T,R.T ).T
self.F_ = np.dot( self.F_,R.T )
self.components_ = self.V_
return ( self.F_ )
dimred = PCA()
def quantify_groups ( analyte_df , journal_df , formula , grouping_file , synonyms = None ,
delimiter = '\t' , test_type = 'random' ,
split_id = None , skip_line_char = '#'
) :
statistical_formula = formula
if not split_id is None :
nidx = [ idx.split(split_id)[-1].replace(' ','') for idx in analyte_df.index.values ]
analyte_df.index = nidx
sidx = set( analyte_df.index.values ) ; nidx=len(sidx)
eval_df = None
with open ( grouping_file ) as input:
for line in input:
if line[0] == skip_line_char :
continue
vline = line.replace('\n','').split(delimiter)
gid,gdesc,analytes_ = vline[0],vline[1],vline[2:]
if not synonyms is None :
[ analytes_.append(synonyms[a]) for a in analytes_ if a in synonyms ]
try :
group = analyte_df.loc[[a for a in analytes_ if a in sidx] ].dropna( axis=0, how='any', thresh=analyte_df.shape[1]/2 ).drop_duplicates()
except KeyError as e :
continue
L_ = len( group ) ; str_analytes=','.join(group.index.values)
if L_>0 :
dimred.fit(group.values)
group_expression_df = pd.DataFrame([dimred.components_[0]],columns=analyte_df.columns.values,index=[gid])
rdf = pd.DataFrame( parse_test( statistical_formula, group_expression_df , journal_df , test_type=test_type )).T
rdf .columns = [ col+',p' if (not ',s' in col) else col+',s' for col in rdf.columns ]
rdf['description'] = gdesc+','+str(L_)
rdf['analytes'] = str_analytes
rdf.index = [ gid ] ; ndf = pd.concat([rdf.T,group_expression_df.T]).T
if eval_df is None :
eval_df = ndf
else :
eval_df = pd.concat([eval_df,ndf])
edf = eval_df.T
for col in eval_df.columns :
if ',p' in col :
q = [q_[0] for q_ in qvalues(eval_df.loc[:,col].values)]; l=col.split(',')[0]+',q'
edf.loc[l] = q
return ( edf.T )
from scipy.stats import combine_pvalues
def quantify_by_dictionary ( analyte_df , journal_df , formula , split_id=None,
grouping_dictionary = dict() , synonyms = None ,
delimiter = ':' ,test_type = 'random', tolerance = 0.05,
supress_q = False , analyte_formula = None,
use_loc_pca=False , k=-1 ) :
if use_loc_pca :
dimred = APCA(X=analyte_df,k=k)
if not 'dict' in str(type(grouping_dictionary)) :
print ( 'INVALID GROUPING' )
return
statistical_formula = formula
if not split_id is None :
nidx = [ idx.split(split_id)[-1].replace(' ','') for idx in analyte_df.index.values ]
analyte_df.index = nidx
sidx = set( analyte_df.index.values ) ; nidx = len(sidx)
eval_df = None
if True :
for line in grouping_dictionary.items() :
gid,analytes_ = line[0],line[1:][0]
gdesc = line[0].split(delimiter)[0]
if not synonyms is None :
[ analytes_.append(synonyms[a]) for a in analytes_ if a in synonyms ]
try :
group = analyte_df.loc[[a for a in analytes_ if a in sidx] ].dropna( axis=0, how='any', thresh=analyte_df.shape[1]/2 ).drop_duplicates()
except KeyError as e :
continue
L_ = len( group ) ; str_analytes=','.join(group.index.values)
if L_>0 :
dimred .fit( group.values )
ddf = None
for ic in range(len( dimred.components_ )) :
group_expression_df = | pd.DataFrame([dimred.components_[ic]],columns=analyte_df.columns.values,index=[gid]) | pandas.DataFrame |
"""
NAD Lab Tools
This program was written for the NAD Lab at the University of Arizona by <NAME>.
It processes intracellular calcium concentration and pH measurements (from the InCytim2 software)
as well as filters the data for outliers and spikes.
The experiment consists of placing fluorescent-stained cells under a microscope to measure either
calcium concentration or pH. Over a period of time, solutions are added to determine the response
of the quantities.
<NAME> 2019
"""
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tkinter as tk
from tkinter import ttk
from tkinter.filedialog import askopenfilenames, askdirectory
# File names for analysis
names = []
# Output directory
output = ''
# Names of drugs added to solution
events = []
# Beginning of baseline measurement
itime = 60
# End of baseline measurement
ftime = 200
# Lower threshold to exclude cells
lbase = 50
# Upper threshold to exclude cells
ubase = 150
# Mode to analyze data - can be either 'Calcium' or 'pH'
measure = 'Calcium'
def process_data(df):
"""
Takes in a pandas dataframe and calculates the mean Calcium/pH as well as ratios
between different wavelength measurements. Then formats the data into a CSV file.
Lastly, uses the user-defined thresholds to exclude outlier cells.
Arguments:
df {pd.DataFrame} -- a dataframe to process
Returns:
tuple -- a tuple of dataframes containing the processed dataframes, their outliers, and
graph data
"""
global itime, ftime, lbase, ubase, measure
# Adjusts parameters based on Calcium/pH mode
meas = ''
length1 = ''
length2 = ''
meanname = ''
if measure == 'Calcium':
meas = 'Ca++'
length1 = '340'
length2 = '380'
meanname = 'Mean Calcium (nM)'
elif measure == 'pH':
meas = 'pH'
length1 = '488'
length2 = '460'
meanname = 'Mean pH'
# Reads pertinent data from dataframe
times = df.iloc[:, 0].to_frame(name='Time (s)').astype(float)
calcium = df.filter(like=meas, axis=1).astype(float)
conc_340 = df.filter(like=length1, axis=1).astype(float)
conc_380 = df.filter(like=length2, axis=1).astype(float)
ratio = pd.DataFrame()
# Calculates ratio of different wavelength measurements
for i, col in enumerate(conc_340.columns):
ratio[length1 + '/' + length2 + col[-4:]
] = conc_340.iloc[:, i] / conc_380.iloc[:, i]
# Calculates mean ratio and Calcium/pH
mean_ratio = ratio.mean(axis=1).to_frame(name='Mean Ratio')
mean_ca = calcium.mean(axis=1).to_frame(name=meanname)
# Empty dataframe to space columns in CSV file
empty = pd.DataFrame(columns=[''])
# Data for CSV
processed_data = pd.concat([times, empty, calcium, empty, conc_340,
empty, conc_380, empty, ratio, empty, mean_ca, mean_ratio], axis=1)
# Data for graph
graph_data = pd.concat([times, mean_ca], axis=1)
remove = []
# Get baseline times for cells
baselines = calcium[(times.iloc[:, 0].astype(float) >= itime) & (
times.iloc[:, 0].astype(float) <= ftime)]
# Exclude outliers
if len(baselines) != 0:
for i in range(len(baselines.iloc[0])):
baseline = baselines.iloc[:, i].mean()
if baseline <= lbase or baseline >= ubase:
remove.append(i)
else:
remove = range(len(calcium.iloc[0]))
# Compiles outlier data
calc_outliers = calcium.drop(calcium.columns[remove], axis=1)
conc_340_outliers = conc_340.drop(conc_340.columns[remove], axis=1)
conc_380_outliers = conc_380.drop(conc_380.columns[remove], axis=1)
ratio_outliers = pd.DataFrame()
# Outlier ratios
for i, col in enumerate(conc_340_outliers.columns):
ratio_outliers[length1 + '/' + length2 + col[-4:]] = conc_340_outliers.iloc[:,
i] / conc_380_outliers.iloc[:, i]
# Outlier means
mean_ratio_outliers = ratio_outliers.mean(
axis=1).to_frame(name='Mean Ratio')
mean_ca_outliers = calc_outliers.mean(axis=1).to_frame(name=meanname)
# Format CSV for outliers
processed_outliers = pd.concat([times, empty, calc_outliers, empty, conc_340_outliers, empty,
conc_380_outliers, empty, ratio_outliers, empty, mean_ca_outliers, mean_ratio_outliers], axis=1)
# Outlier graph
graph_outliers = pd.concat([times, mean_ca_outliers], axis=1)
return processed_data, processed_outliers, graph_data, graph_outliers
def save_figure(df, filename, output, events, eventtimes):
"""
Takes in a pandas dataframe and saves the graph of the data. Also
labels events on the graph the user defines.
Arguments:
df {pd.DataFrame} -- a dataframe to generate a graph from
filename {str} -- the name to save the graph as
output {str} -- a path to the desired output folder
events {list} -- a list of events to mark on the diagram
eventtimes {list} -- a list of the times when the events happened
"""
global measure
# Calcium/pH mode
meanname = ''
if measure == 'Calcium':
meanname = 'Mean Calcium (nM)'
elif measure == 'pH':
meanname = 'Mean pH'
# Formatting of the graph
df.plot(x='Time (s)', y=meanname, kind='line', legend=None)
ax = plt.gca()
plt.xlabel('Time (s)')
plt.ylabel(meanname)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
xdata = ax.lines[0].get_xdata()
ydata = ax.lines[0].get_ydata()
# Annotate the plot with events
for i, event in enumerate(events):
if i >= len(eventtimes):
break
idx = np.where(
(xdata >= (eventtimes[i] - 3)) & (xdata <= (eventtimes[i] + 3)))
plt.annotate(event, xy=(xdata[idx], ydata[idx]), xycoords='data', xytext=(0, 50), textcoords='offset points',
arrowprops=dict(arrowstyle='-|>', facecolor='black'), horizontalalignment='right', verticalalignment='top')
plt.savefig(os.path.join(output, filename + '.png'))
def spike_detection(df):
"""
Implements a simple "bandpass" filter to remove spikes from the data. Also uses
local median filtering to establish where spikes happen.
Arguments:
df {pd.DataFrame} -- a dataframe to despike
Returns:
pd.DataFrame -- the despiked dataframe
"""
global measure
# Calcium/pH mode
meanname = ''
if measure == 'Calcium':
meanname = 'Mean Calcium (nM)'
elif measure == 'pH':
meanname = 'Mean pH'
# Filter parameters
minimum = 15
maximum = 50
radius = 5
epsilon = 10
# Read means
means = np.array(df[meanname])
# Calculate differences in array, e.g. [1, 5, 3] would return [4, -2]
diffs = np.abs(np.diff(means))
# Apply "bandpass" filter and flatten array
remove = np.array(np.where((diffs > minimum) &
(diffs < maximum))).flatten()
# Function to return whether a number is within epsilon of a specified value
def within(e, a, r): return (a > e - r) & (a < e + r)
# Apply radius around point to remove from data as well as median filtering
remove = np.array([np.arange(i, i + radius) for i in remove if
any(within(np.median(means[:i]), means[i:i+radius], epsilon)) and
all(diffs[i-radius:i+radius] < maximum)]).flatten()
# Ensure indices are not out of bounds
remove = remove[(remove > 1) & (remove < len(means))]
# Remove duplicate indices
remove = np.unique(remove)
# Remove spikes and return new dataframe
return df.drop(remove)
def process_file(name, output, events):
"""
Reads a raw data file and puts the data into a pandas dataframe.
Then processes the data and saves the files.
Arguments:
name {str} -- a path to the data file
output {str} -- a path to the desired output folder
events {list} -- a list of events to mark on the diagram
"""
global measure
# Read data file (specific to InCytim2 software)
dummy = False
time = False
data = []
eventtimes = []
with open(name) as lines:
for line in lines:
if 'Event_Times' in line:
time = True
elif 'Horizontal' in line:
time = False
elif 'DATA_AFTER_THIS_LINE' in line:
dummy = True
elif time:
eventtimes.append(float(line))
elif dummy:
data.append(line.split())
# Sets top row of dataframe to be the header
filename = os.path.basename(name)[:-4]
df = pd.DataFrame(data)
header = df.iloc[0]
df = df[1:]
df.columns = header
# Processes data
processed_data, processed_outliers, graph_data, graph_outliers = process_data(
df)
# Spike detection
graph_outliers = spike_detection(graph_outliers)
# Saves CSV files
processed_data.to_csv(os.path.join(output, filename + '.csv'), index=False)
processed_outliers.to_csv(os.path.join(
output, filename + '_outliers.csv'), index=False)
# Saves graphs
save_figure(graph_data, filename, output, events, eventtimes)
save_figure(graph_outliers, filename + '_outliers',
output, events, eventtimes)
def generate_average(names, output):
"""
Reads previously processed CSV files and averages them together.
Also outputs a despiked version of the data.
Arguments:
names {list} -- a list of paths to the CSV files
output {str} -- a path to the desired output folder
"""
global measure
# Calcium/pH mode
meanname = ''
if measure == 'Calcium':
meanname = 'Mean Calcium (nM)'
elif measure == 'pH':
meanname = 'Mean pH'
# Drop empty columns and determine shortest experiment
means = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import tabula
import numpy as np
from CNAFenums import Approach, Landing, Role
from local_func import getFILES, INTERGER
import uuid, re, os, glob
import PyPDF2
from progress.bar import Bar
def msharp(log_file, aircraft_filter='All', nav = False):
#print(log_file)
msharp_data_raw = pd.read_excel(log_file, index_col=None)
Column_type = msharp_data_raw.iloc[4]
Landings_start = pd.Index(Column_type).get_loc('Landings')
App_start = pd.Index(Column_type).get_loc('App')
msharp_data_raw.iat[5,2] = "Date"
#Trim off header data
msharp_data = msharp_data_raw[7:msharp_data_raw.loc[msharp_data_raw.iloc[:,2] == 'Career Totals'].index.values[0]]
msharp_data.columns = msharp_data_raw.iloc[5].fillna('DROP')
msharp_data.loc[0,msharp_data.columns[Landings_start]] = "Landings"
msharp_data.loc[0,msharp_data.columns[App_start]] = "App"
msharp_data = msharp_data.sort_index()
msharp_data = msharp_data.drop('DROP', axis=1)
msharp_data.columns = msharp_data.columns.astype(str)
new_app = pd.Index(msharp_data.iloc[0]).get_loc('App')
new_ldg = pd.Index(msharp_data.iloc[0]).get_loc('Landings')
msharp_data = msharp_data.drop(0)
msharp_data = msharp_data.reset_index(drop=True)
###############################################
## Clean T&R ##
###############################################
TR = msharp_data.filter(regex=("T&R*"))
msharp_data = msharp_data.drop(columns=TR.columns)
TR_string = []
for index, row in TR.iterrows():
row = row.dropna()
row = row.astype(str)
TR_string.append(row.str.cat(sep=', '))
msharp_data['T&R']=TR_string
###############################################
## Clean APP ##
###############################################
app_raw = msharp_data.iloc[:,new_app:-2]
msharp_data = msharp_data.drop(columns=app_raw.columns)
app_raw.columns = [x[0] for x in app_raw.columns]
app_raw.columns = [Approach(x).name for x in app_raw.columns]
###############################################
## Clean LDG ##
###############################################
ldg_raw = msharp_data.iloc[:,new_ldg:-2]
msharp_data = msharp_data.drop(columns=ldg_raw.columns)
ldg_raw.columns = [x[0] for x in ldg_raw.columns]
ldg_raw.columns = [Landing(x).name for x in ldg_raw.columns]
msharp_data = pd.concat([msharp_data, app_raw, ldg_raw], axis=1)
###############################################
## Clean Date ##
###############################################
#msharp_data["Date"] = msharp_data["Date"].apply(lambda x: x.strftime('%d/%m/%Y'))
###############################################
## Add role ##
###############################################
msharp_data["Role"] = msharp_data.apply(lambda row: Role.ACFT_CMDR.name if row["ACMDR"] > 0.0 else (Role.COPILOT.name if row["TPT"] > 0.0 else Role.OTHER.name) , axis=1)
msharp_data = msharp_data.rename(columns={"TMS": "Model", "SIM": "SIT", "ACT": "AIT", "NAVFLIR":"Record", 6.0:""})
msharp_data["Record"] = msharp_data["Record"].apply(lambda x: str(uuid.uuid4())[:8] if | pd.isna(x) | pandas.isna |
import numpy as np
import pandas as pd
import argparse
def check_smiles_match(data,screen):
return (data['SMILES'].values==screen['SMILES'].values).all()
def apply_screen(data,col_name,selection_type,selection_thresh,keep):
data = data.sort_values(col_name,ascending=True)
if selection_type=='Fraction':
if keep=='High':
data = data[-int(len(data)*selection_thresh):]
elif keep=='Low':
data = data[0:-int(len(data)*selection_thresh)]
else:
print('WARNING: INVALID KEEP TYPE')
elif selection_type=='Cutoff':
if keep=='High':
data = data[data[col_name]>selection_thresh]
elif keep=='Low':
data = data[data[col_name]<selection_thresh]
else:
print('WARNING: INVALID KEEP TYPE')
else:
print('WARNING: INVALID SELECTION TYPE')
return data
parser = argparse.ArgumentParser()
parser.add_argument('--molfile', type=str, required=True)
parser.add_argument('--outfile', type=str, required=True)
parser.add_argument('--screen_file1', type=str, default=None)
parser.add_argument('--selection_type1', type=str, default='Fraction') # Fraction or Cutoff Value
parser.add_argument('--selection_thresh1', type=float, default=0.5)
parser.add_argument('--keep1', type=str, default='High') # High or low
parser.add_argument('--screen_file2', type=str, default=None)
parser.add_argument('--selection_type2', type=str, default='Cutoff') # Fraction or Cutoff Value
parser.add_argument('--selection_thresh2', type=float, default=5.0)
parser.add_argument('--keep2', type=str, default='Low') # High or low
args = parser.parse_args()
data = pd.read_csv(args.molfile)
data = data.drop_duplicates() # Remove duplicates
if args.screen_file1 is not None:
screen1 = pd.read_csv(args.screen_file1)
# Check if smiles match:
if not check_smiles_match(data,screen1):
print('WARNING: SMILES LISTS DO NOT MATCH')
# Add screen
col_name1 = pd.DataFrame(screen1.columns)[[not (x =='SMILES') for x in screen1.columns]].values[0][0]
data[col_name1]=screen1[col_name1]
if args.screen_file2 is not None:
screen2 = pd.read_csv(args.screen_file2)
# Check if smiles match:
if not check_smiles_match(data,screen1):
print('WARNING: SMILES LISTS DO NOT MATCH')
# Add screen
col_name2 = | pd.DataFrame(screen2.columns) | pandas.DataFrame |
import folium
import time
import branca
from tqdm import tqdm
from datetime import datetime
import requests
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import codecs
from folium.features import DivIcon
from nts_data_collect import terminal_intakes
from bokeh.io import save
from bokeh.plotting import figure, output_file, save
def bokeh_plot(data):
'''
INPUTS:
'data', numpy array, first column is each label, first row is x-axis (here time)
every remaining row is a variable to be plotted against x-axis.
i.e. np.array([['Time',0,1,2,3...],['Gas Input',4,3,5,7...]...])
DESCRIPTION:
Defining a function that, given a table of inputs,
creates and saves HTML files containing plots
of the timeseries of each variable in the table
'''
# iterating over rows
for i in range(1,len(data[1:,0])+1):
# name of html file produced
output_file('Terminal Plots/'+str(data[i,0])+".html")
# creating base plot
p = figure(plot_width=400,plot_height=200,\
x_axis_type="datetime", title=data[i,0])
# axis labels
p.xaxis.axis_label = 'Time'
p.yaxis.axis_label = 'Instantaneous flow (mcm/day)'
# plotting line (time is always first row)
p.line(data[0,1:],data[i,1:])
# saving plot given prev specified output file name
save(p)
return
def createplots():
'''
Main creation of folium plot, takes locations, geoJSON files,
and terminal intake data and combines.
'''
# collection of initial data of gsa coming into UK terminals
terminal_data = terminal_intakes()
# conversion of string to official datetime format
for i in range(1,len(terminal_data[0,:])):
terminal_data[0,i] = datetime.strptime(terminal_data[0,i],'%Y-%m-%d %H:%M:%S')
# importing locations and parsing the file
locations = pd.read_excel(r'location.xlsx')
locations = locations.to_numpy()[:,1:]
titles = locations[0,:]
i = 0
# following section stops importing when there are no coordinates
# can occur in some types of csv file
while True:
try:
if titles[i] != titles[i]:
titles = np.delete(titles,i)
i -= 1
i += 1
except:
break
location_names = locations[1:,0]
locations = locations[1:,:]
c = ['blue']
location_index = np.arange(0,len(locations[0,:]),3)
overall_location = np.array([[0,0]])
# while loop to coninually add new data to plots
while True:
# creation of base-map, tiles defines the style (see docs for more)
# coords specify centering on UK and appropriate zoom
m = folium.Map(location=[54.213730,-3.105027],zoom_start=6,tiles='cartodbpositron')
# creating html files (plots) of each terminals data
bokeh_plot(terminal_data)
# iterating over location_indexes (in this case only 1 aka terminals)
# lots of the following code is only relevent if different markers need to
# represent different classes of Slocation
# this can be added in the location.xlsx file as new columns
for j in location_index:
class_location = np.array([[-1,0]])
for i in range(len(locations[:,1+j])):
loc = locations[i,j+1:j+3]
if loc[0] != loc[0]:
break
class_location = np.append(class_location,[loc],axis=0)
class_location = class_location[1:,:]
for i in range(len(class_location)):
# reading html files created earlier
f = codecs.open('Terminal Plots/'+str(location_names[i])+'.html','r')
html = f.read()
# creating a 'frame' to place them in
iframe = branca.element.IFrame(html=html,width=420,height=230)
# defining the popup of a marker
popup = folium.Popup(iframe,max_width=5000)
# plotting each marker with appropriate popup containing
# relevant html plot
folium.Marker([class_location[i,0],class_location[i,1]],\
popup=popup,\
icon=folium.Icon(color=c[j],icon='graph_up')).add_to(m)
# adding toggle-able layers
folium.LayerControl().add_to(m)
# saving map
m.save(r'folium_map.html')
terminal_data_pd = | pd.DataFrame(terminal_data) | pandas.DataFrame |
'''
Tests for bipartitepandas
DATE: March 2021
'''
import pytest
import numpy as np
import pandas as pd
import bipartitepandas as bpd
import pickle
###################################
##### Tests for BipartiteBase #####
###################################
def test_refactor_1():
# 2 movers between firms 0 and 1, and 1 stayer at firm 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_2():
# 2 movers between firms 0 and 1, and 1 stayer at firm 2. Time has jumps.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 3
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_3():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 2
assert movers.iloc[2]['j1'] == 2
assert movers.iloc[2]['j2'] == 1
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 2
def test_refactor_4():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2.
worker_data = []
# Firm 0 -> 1 -> 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 3})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_5():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 1 -> 0
# Time 1 -> 2 -> 4
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 4})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_6():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 2 -> 3 -> 5
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 5})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_7():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_8():
# 2 movers between firms 0 and 1, and 1 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 0 -> 1
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 0
assert movers.iloc[2]['j2'] == 1
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_9():
# 2 movers between firms 0 and 1, and 1 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 0
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_10():
# 1 mover between firms 0 and 1, 1 between firms 1 and 2, and 1 stayer at firm 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_11():
# 1 mover between firms 0 and 1 and 2 and 3, 1 between firms 1 and 2, and 1 stayer at firm 2.
# Check going to event study and back to long, for data where movers have extended periods where they stay at the same firm
worker_data = []
# Firm 0 -> 1 -> 2 -> 3
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 2, 'y': 0.5, 't': 3})
worker_data.append({'i': 0, 'j': 2, 'y': 0.5, 't': 4})
worker_data.append({'i': 0, 'j': 2, 'y': 0.75, 't': 5})
worker_data.append({'i': 0, 'j': 3, 'y': 1.5, 't': 6})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df).clean_data().get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 0
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 0.5
assert stayers.iloc[0]['y2'] == 0.5
assert stayers.iloc[0]['t1'] == 4
assert stayers.iloc[0]['t2'] == 4
assert stayers.iloc[1]['i'] == 2
assert stayers.iloc[1]['j1'] == 2
assert stayers.iloc[1]['j2'] == 2
assert stayers.iloc[1]['y1'] == 1.
assert stayers.iloc[1]['y2'] == 1.
assert stayers.iloc[1]['t1'] == 1
assert stayers.iloc[1]['t2'] == 1
assert stayers.iloc[2]['i'] == 2
assert stayers.iloc[2]['j1'] == 2
assert stayers.iloc[2]['j2'] == 2
assert stayers.iloc[2]['y1'] == 1.
assert stayers.iloc[2]['y2'] == 1.
assert stayers.iloc[2]['t1'] == 2
assert stayers.iloc[2]['t2'] == 2
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2.
assert movers.iloc[0]['y2'] == 1.
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1.
assert movers.iloc[1]['y2'] == 0.5
assert movers.iloc[1]['t1'] == 2
assert movers.iloc[1]['t2'] == 3
assert movers.iloc[2]['i'] == 0
assert movers.iloc[2]['j1'] == 2
assert movers.iloc[2]['j2'] == 3
assert movers.iloc[2]['y1'] == 0.75
assert movers.iloc[2]['y2'] == 1.5
assert movers.iloc[2]['t1'] == 5
assert movers.iloc[2]['t2'] == 6
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j1'] == 1
assert movers.iloc[3]['j2'] == 2
assert movers.iloc[3]['y1'] == 1.
assert movers.iloc[3]['y2'] == 1.
assert movers.iloc[3]['t1'] == 1
assert movers.iloc[3]['t2'] == 2
bdf = bdf.get_long()
for row in range(len(bdf)):
df_row = df.iloc[row]
bdf_row = bdf.iloc[row]
for col in ['i', 'j', 'y', 't']:
assert df_row[col] == bdf_row[col]
def test_refactor_12():
# Check going to event study and back to long
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
assert len(bdf) == len(bdf.get_es().get_long())
def test_contiguous_fids_11():
# Check contiguous_ids() with firm ids.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 3, 'y': 1., 't': 2})
# Firm 3 -> 3
worker_data.append({'i': 2, 'j': 3, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 3, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_contiguous_wids_12():
# Check contiguous_ids() with worker ids.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
# Firm 2 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_contiguous_cids_13():
# Check contiguous_ids() with cluster ids.
worker_data = []
# Firm 0 -> 1
# Cluster 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 2})
# Firm 1 -> 2
# Cluster 2 -> 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 1})
# Firm 2 -> 2
# Cluster 1 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2, 'g': 1})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert stayers.iloc[0]['g1'] == 0
assert stayers.iloc[0]['g2'] == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['g1'] == 0
assert movers.iloc[0]['g2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[1]['g1'] == 1
assert movers.iloc[1]['g2'] == 0
def test_contiguous_cids_14():
# Check contiguous_ids() with cluster ids.
worker_data = []
# Firm 0 -> 1
# Cluster 2 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 1})
# Firm 1 -> 2
# Cluster 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 2})
# Firm 2 -> 2
# Cluster 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2, 'g': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es().original_ids()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['original_g1'] == 2
assert movers.iloc[0]['original_g2'] == 1
assert movers.iloc[0]['g1'] == 0
assert movers.iloc[0]['g2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[1]['original_g1'] == 1
assert movers.iloc[1]['original_g2'] == 2
assert movers.iloc[1]['g1'] == 1
assert movers.iloc[1]['g2'] == 0
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert stayers.iloc[0]['original_g1'] == 2
assert stayers.iloc[0]['original_g2'] == 2
assert stayers.iloc[0]['g1'] == 0
assert stayers.iloc[0]['g2'] == 0
def test_col_dict_15():
# Check that col_dict works properly.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
# Firm 2 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)]).rename({'j': 'firm', 'i': 'worker'}, axis=1)
bdf = bpd.BipartiteLong(data=df, col_dict={'j': 'firm', 'i': 'worker'})
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_worker_year_unique_16_1():
# Workers with multiple jobs in the same year, keep the highest paying, with long format. Testing 'max', 'sum', and 'mean' options, where options should not have an effect.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2 -> 3
# Time 1 -> 2 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't': 2})
# Worker 3
# Firm 2 -> 1 -> 2
# Time 1 -> 1 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how}))
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_worker_year_unique_16_2():
# Workers with multiple jobs in the same year, keep the highest paying, with long format. Testing 'max', 'sum' and 'mean' options, where options should have an effect.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2 -> 2 -> 3
# Time 1 -> 2 -> 2 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1.5, 't': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't': 2})
# Worker 3
# Firm 2 -> 1 -> 2
# Time 1 -> 1 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteLong(data=df.copy())
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how}))
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
if how == 'max':
assert movers.iloc[3]['y'] == 1.5
elif how == 'sum':
assert movers.iloc[3]['y'] == 2.5
elif how == 'mean':
assert movers.iloc[3]['y'] == 1.25
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_worker_year_unique_16_3():
# Workers with multiple jobs in the same year, keep the highest paying, with collapsed long format. Testing 'max', 'sum', and 'mean' options, where options should have an effect. Using collapsed long data.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't1': 1, 't2': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't1': 2, 't2': 2})
# Firm 1 -> 2 -> 2 -> 3
# Time 1 -> 2 -> 2 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1.5, 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't1': 2, 't2': 2})
# Worker 3
# Firm 2 -> 1
# Time 1 -> 1
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't1': 1, 't2': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteLongCollapsed(data=df)
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how}))
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 1
assert stayers.iloc[0]['y'] == 1.5
assert stayers.iloc[0]['t1'] == 1
assert stayers.iloc[0]['t2'] == 2
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t1'] == 2
assert movers.iloc[1]['t2'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t1'] == 1
assert movers.iloc[2]['t2'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
if how == 'max':
assert movers.iloc[3]['y'] == 1.5
elif how == 'sum':
assert movers.iloc[3]['y'] == 2.5
elif how == 'mean':
assert movers.iloc[3]['y'] == 1.25
assert movers.iloc[3]['t1'] == 2
assert movers.iloc[3]['t2'] == 2
def test_worker_year_unique_16_4():
# Workers with multiple jobs in the same year, keep the highest paying, with event study format. Testing 'max', 'sum', and 'mean' options, where options should have an effect. NOTE: because of how data converts from event study to long (it only shifts period 2 (e.g. j2, y2) for the last row, as it assumes observations zigzag), it will only correct duplicates for period 1
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j1': 0, 'j2': 1, 'y1': 2., 'y2': 1., 't1': 1, 't2': 2})
# Worker 1
worker_data.append({'i': 1, 'j1': 1, 'j2': 2, 'y1': 0.5, 'y2': 1.5, 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j1': 1, 'j2': 2, 'y1': 0.75, 'y2': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j1': 2, 'j2': 1, 'y1': 1., 'y2': 2., 't1': 1, 't2': 2})
# Worker 3
worker_data.append({'i': 3, 'j1': 2, 'j2': 2, 't1': 1, 't2': 1, 'y1': 1., 'y2': 1.})
worker_data.append({'i': 3, 'j1': 2, 'j2': 2, 'y1': 1., 'y2': 1., 't1': 2, 't2': 2})
worker_data.append({'i': 3, 'j1': 1, 'j2': 1, 'y1': 1.5, 'y2': 1.5, 't1': 1, 't2': 1})
worker_data.append({'i': 3, 'j1': 1, 'j2': 1, 'y1': 1.5, 'y2': 1.5, 't1': 2, 't2': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteEventStudy(data=df.copy(), include_id_reference_dict=True)
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how})).original_ids()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['original_i'] == 3
assert stayers.iloc[0]['j1'] == 1
assert stayers.iloc[0]['j2'] == 1
assert stayers.iloc[0]['y1'] == 1.5
assert stayers.iloc[0]['y2'] == 1.5
assert stayers.iloc[0]['t1'] == 1
assert stayers.iloc[0]['t2'] == 1
assert stayers.iloc[1]['i'] == 2
assert stayers.iloc[1]['original_i'] == 3
assert stayers.iloc[1]['j1'] == 1
assert stayers.iloc[1]['j2'] == 1
assert stayers.iloc[1]['y1'] == 1.5
assert stayers.iloc[1]['y2'] == 1.5
assert stayers.iloc[1]['t1'] == 2
assert stayers.iloc[1]['t2'] == 2
assert movers.iloc[0]['original_i'] == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 2
assert movers.iloc[1]['original_i'] == 1
assert movers.iloc[1]['i'] == 1
if how == 'max':
assert movers.iloc[1]['j1'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['j2'] == 1
assert movers.iloc[1]['y2'] == 2
elif how == 'sum':
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['y1'] == 1.25
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y2'] == 2.5
elif how == 'mean':
assert movers.iloc[1]['j1'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['j2'] == 1
assert movers.iloc[1]['y2'] == 2
assert movers.iloc[1]['t1'] == 1
assert movers.iloc[1]['t2'] == 2
def test_string_ids_17():
# String worker and firm ids.
worker_data = []
# Worker 'a'
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
# Worker 'b'
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'c', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'd', 'y': 0.5, 't': 2})
# Worker 'd'
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_general_methods_18():
# Test some general methods, like n_workers/n_firms/n_clusters, included_cols(), drop(), and rename().
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 1})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 2})
# Worker 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2, 'g': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
assert bdf.n_workers() == 3
assert bdf.n_firms() == 3
assert bdf.n_clusters() == 2
correct_cols = True
all_cols = bdf._included_cols()
for col in ['i', 'j', 'y', 't', 'g']:
if col not in all_cols:
correct_cols = False
break
assert correct_cols
bdf.drop('g1', axis=1, inplace=True)
assert 'g1' in bdf.columns and 'g2' in bdf.columns
bdf.drop('g', axis=1, inplace=True)
assert 'g1' not in bdf.columns and 'g2' not in bdf.columns
bdf.rename({'i': 'w'})
assert 'i' in bdf.columns
bdf['g1'] = 1
bdf['g2'] = 1
bdf.col_dict['g1'] = 'g1'
bdf.col_dict['g2'] = 'g2'
assert 'g1' in bdf.columns and 'g2' in bdf.columns
bdf.rename({'g': 'r'})
assert 'g1' not in bdf.columns and 'g2' not in bdf.columns
def test_save_19():
# Make sure changing attributes in a saved version does not overwrite values in the original.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
# Long
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
# Event study
bdf = bdf.gen_m(copy=False).get_es()
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
# Collapsed long
bdf = bdf.gen_m(copy=False).get_long().get_collapsed_long()
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
# Collapsed event study
bdf = bdf.gen_m(copy=False).get_es()
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
def test_id_reference_dict_20():
# String worker and firm ids, link with id_reference_dict.
worker_data = []
# Worker 'a'
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
# Worker 'b'
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'c', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'd', 'y': 0.5, 't': 2})
# Worker 'd'
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
id_reference_dict = bdf.id_reference_dict
merge_df = bdf.merge(id_reference_dict['i'], how='left', left_on='i', right_on='adjusted_ids_1').rename({'original_ids': 'original_i'})
merge_df = merge_df.merge(id_reference_dict['j'], how='left', left_on='j', right_on='adjusted_ids_1').rename({'original_ids': 'original_j'})
stayers = merge_df[merge_df['m'] == 0]
movers = merge_df[merge_df['m'] == 1]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['original_i'] == 'a'
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['original_j'] == 'a'
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['original_i'] == 'a'
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['original_j'] == 'b'
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['original_i'] == 'b'
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['original_j'] == 'b'
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['original_i'] == 'b'
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['original_j'] == 'c'
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['original_i'] == 'd'
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['original_j'] == 'b'
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['original_i'] == 'd'
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['original_j'] == 'c'
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_id_reference_dict_22():
# String worker and firm ids, link with id_reference_dict. Testing original_ids() method.
worker_data = []
# Worker 'a'
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
# Worker 'b'
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'c', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'd', 'y': 0.5, 't': 2})
# Worker 'd'
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
merge_df = bdf.original_ids()
stayers = merge_df[merge_df['m'] == 0]
movers = merge_df[merge_df['m'] == 1]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['original_i'] == 'a'
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['original_j'] == 'a'
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['original_i'] == 'a'
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['original_j'] == 'b'
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['original_i'] == 'b'
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['original_j'] == 'b'
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['original_i'] == 'b'
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['original_j'] == 'c'
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['original_i'] == 'd'
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['original_j'] == 'b'
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['original_i'] == 'd'
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['original_j'] == 'c'
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_id_reference_dict_23():
# String worker and firm ids, link with id_reference_dict. Testing original_ids() method where there are multiple steps of references.
worker_data = []
# Worker 'a'
# Firm a -> b -> c turns into 0 -> 1 -> 2 turns into 0 -> 1
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
worker_data.append({'i': 'a', 'j': 'c', 'y': 1.5, 't': 3})
# Worker 'b'
# Firm b -> d turns into 1 -> 3 turns into 0 -> 2
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'd', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'c', 'y': 0.5, 't': 2})
# Worker 'd'
# Firm b -> d turns into 1 -> 3 turns into 0 -> 2
worker_data.append({'i': 'd', 'j': 'd', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'd', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
bdf = bdf[bdf['j'] > 0]
bdf = bdf.clean_data(bpd.clean_params({'connectedness': None}))
merge_df = bdf.original_ids()
stayers = merge_df[merge_df['m'] == 0]
movers = merge_df[merge_df['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['original_i'] == 'a'
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['original_j'] == 'b'
assert movers.iloc[0]['y'] == 1
assert movers.iloc[0]['t'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['original_i'] == 'a'
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['original_j'] == 'c'
assert movers.iloc[1]['y'] == 1.5
assert movers.iloc[1]['t'] == 3
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['original_i'] == 'b'
assert movers.iloc[2]['j'] == 0
assert movers.iloc[2]['original_j'] == 'b'
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['original_i'] == 'b'
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['original_j'] == 'd'
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['original_i'] == 'd'
assert movers.iloc[4]['j'] == 0
assert movers.iloc[4]['original_j'] == 'b'
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['original_i'] == 'd'
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['original_j'] == 'd'
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_fill_time_24_1():
# Test .fill_time() method for long format, with no data to fill in.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
new_df = bdf.fill_periods()
stayers = new_df[new_df['m'] == 0]
movers = new_df[new_df['m'] == 1]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
def test_fill_time_24_2():
# Test .fill_time() method for long format, with 1 row of data to fill in.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
# Time 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 3})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
new_df = bdf.fill_periods()
stayers = new_df[new_df.groupby('i')['m'].transform('max') == 0]
movers = new_df[new_df.groupby('i')['m'].transform('max') == 1]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == - 1
assert np.isnan(movers.iloc[3]['y'])
assert np.isnan(movers.iloc[3]['m'])
assert movers.iloc[4]['i'] == 1
assert movers.iloc[4]['j'] == 2
assert movers.iloc[4]['y'] == 1
def test_fill_time_24_3():
# Test .fill_time() method for long format, with 2 rows of data to fill in.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
# Time 1 -> 4
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 4})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
new_df = bdf.fill_periods()
stayers = new_df[new_df.groupby('i')['m'].transform('max') == 0]
movers = new_df[new_df.groupby('i')['m'].transform('max') == 1]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == - 1
assert np.isnan(movers.iloc[3]['y'])
assert np.isnan(movers.iloc[3]['m'])
assert movers.iloc[4]['i'] == 1
assert movers.iloc[4]['j'] == - 1
assert np.isnan(movers.iloc[4]['y'])
assert np.isnan(movers.iloc[4]['m'])
assert movers.iloc[5]['i'] == 1
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
def test_uncollapse_25():
# Convert from collapsed long to long format.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't1': 1, 't2': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't1': 2, 't2': 2})
# Worker 1
# Time 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1.5, 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't1': 2, 't2': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't1': 1, 't2': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLongCollapsed(data=df).uncollapse()
assert bdf.iloc[0]['i'] == 0
assert bdf.iloc[0]['j'] == 0
assert bdf.iloc[0]['y'] == 2
assert bdf.iloc[0]['t'] == 1
assert bdf.iloc[1]['i'] == 0
assert bdf.iloc[1]['j'] == 1
assert bdf.iloc[1]['y'] == 1
assert bdf.iloc[1]['t'] == 2
assert bdf.iloc[2]['i'] == 1
assert bdf.iloc[2]['j'] == 1
assert bdf.iloc[2]['y'] == 1
assert bdf.iloc[2]['t'] == 1
assert bdf.iloc[3]['i'] == 1
assert bdf.iloc[3]['j'] == 1
assert bdf.iloc[3]['y'] == 1
assert bdf.iloc[3]['t'] == 2
assert bdf.iloc[4]['i'] == 1
assert bdf.iloc[4]['j'] == 2
assert bdf.iloc[4]['y'] == 1
assert bdf.iloc[4]['t'] == 2
assert bdf.iloc[5]['i'] == 1
assert bdf.iloc[5]['j'] == 2
assert bdf.iloc[5]['y'] == 1.5
assert bdf.iloc[5]['t'] == 2
assert bdf.iloc[6]['i'] == 1
assert bdf.iloc[6]['j'] == 3
assert bdf.iloc[6]['y'] == 0.5
assert bdf.iloc[6]['t'] == 2
assert bdf.iloc[7]['i'] == 3
assert bdf.iloc[7]['j'] == 2
assert bdf.iloc[7]['y'] == 1
assert bdf.iloc[7]['t'] == 1
assert bdf.iloc[8]['i'] == 3
assert bdf.iloc[8]['j'] == 2
assert bdf.iloc[8]['y'] == 1
assert bdf.iloc[8]['t'] == 2
assert bdf.iloc[9]['i'] == 3
assert bdf.iloc[9]['j'] == 1
assert bdf.iloc[9]['y'] == 1.5
assert bdf.iloc[9]['t'] == 1
assert bdf.iloc[10]['i'] == 3
assert bdf.iloc[10]['j'] == 1
assert bdf.iloc[10]['y'] == 1.5
assert bdf.iloc[10]['t'] == 2
def test_keep_ids_26():
# Keep only given ids.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
all_fids = bdf['j'].unique()
ids_to_keep = all_fids[: len(all_fids) // 2]
bdf_keep = bdf.get_es().keep_ids('j', ids_to_keep).get_long()
assert set(bdf_keep['j']) == set(ids_to_keep)
# Make sure long and es give same results
bdf_keep2 = bdf.keep_ids('j', ids_to_keep)
assert len(bdf_keep) == len(bdf_keep2)
def test_drop_ids_27():
# Drop given ids.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
all_fids = bdf['j'].unique()
ids_to_drop = all_fids[: len(all_fids) // 2]
bdf_keep = bdf.get_es().drop_ids('j', ids_to_drop).get_long()
assert set(bdf_keep['j']) == set(all_fids).difference(set(ids_to_drop))
# Make sure long and es give same results
bdf_keep2 = bdf.drop_ids('j', ids_to_drop)
assert len(bdf_keep) == len(bdf_keep2)
def test_min_obs_firms_28_1():
# List only firms that meet a minimum threshold of observations.
# Using long/event study.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 250
# First, manually estimate the valid set of firms
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_obs_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_obs_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_obs_firms_28_2():
# List only firms that meet a minimum threshold of observations.
# Using long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long()
threshold = 60
# First, manually estimate the valid set of firms
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_obs_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_obs_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_obs_frame_29_1():
# Keep only firms that meet a minimum threshold of observations.
# Using long/event study.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 250
# First, manually estimate the new frame
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = frame.keep_ids('j', valid_firms)
new_frame.reset_index(drop=True, inplace=True)
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_obs_frame(threshold)
new_frame3 = bdf.get_es().min_obs_frame(threshold).get_long()
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
def test_min_obs_frame_29_2():
# Keep only firms that meet a minimum threshold of observations.
# Using long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long()
threshold = 60
# First, manually estimate the new frame
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = frame.keep_ids('j', valid_firms)
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_obs_frame(threshold)
new_frame3 = bdf.get_es().min_obs_frame(threshold).get_long()
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
def test_min_workers_firms_30():
# List only firms that meet a minimum threshold of workers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 40
# First, manually estimate the valid set of firms
frame = bdf.copy()
# Count workers
n_workers = frame.groupby('j')['i'].nunique()
valid_firms = sorted(n_workers[n_workers >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_workers_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_workers_firms(threshold))
valid_firms4 = sorted(bdf.get_collapsed_long().min_workers_firms(threshold))
valid_firms5 = sorted(bdf.get_collapsed_long().get_es().min_workers_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3) == len(valid_firms4) == len(valid_firms5)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i] == valid_firms4[i] == valid_firms5[i]
def test_min_workers_frame_31():
# Keep only firms that meet a minimum threshold of workers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 60
# First, manually estimate the new frame
frame = bdf.copy()
# Count workers
n_workers = frame.groupby('j')['i'].nunique()
valid_firms = n_workers[n_workers >= threshold].index
new_frame = frame.keep_ids('j', valid_firms).get_collapsed_long()
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_workers_frame(threshold).get_collapsed_long()
new_frame3 = bdf.get_es().min_workers_frame(threshold).get_long().get_collapsed_long()
new_frame4 = bdf.get_collapsed_long().min_workers_frame(threshold)
new_frame5 = bdf.get_collapsed_long().get_es().min_workers_frame(threshold).get_long()
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3) == len(new_frame4) == len(new_frame5)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
def test_min_moves_firms_32_1():
# List only firms that meet a minimum threshold of moves.
# Using long/event study.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 20
# First, manually estimate the valid set of firms
frame = bdf.copy()
frame.loc[frame.loc[:, 'm'] == 2, 'm'] = 1
n_moves = frame.groupby('j')['m'].sum()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_moves_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_moves_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_moves_firms_32_2():
# List only firms that meet a minimum threshold of moves.
# Using long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long()
threshold = 20
# First, manually estimate the valid set of firms
frame = bdf.copy()
frame.loc[frame.loc[:, 'm'] == 2, 'm'] = 1
n_moves = frame.groupby('j')['m'].sum()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_moves_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_moves_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_moves_frame_33():
# Keep only firms that meet a minimum threshold of moves.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 12
# First, manually estimate the valid set of firms
frame = bdf.copy()
frame.loc[frame.loc[:, 'm'] == 2, 'm'] = 1
n_moves = frame.groupby('j')['m'].sum()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = frame.keep_ids('j', valid_firms)
# Iterate until set of firms stays the same between loops
loop = True
n_loops = 0
while loop:
n_loops += 1
prev_frame = new_frame
prev_frame.loc[prev_frame.loc[:, 'm'] == 2, 'm'] = 1
# Keep firms with sufficiently many moves
n_moves = prev_frame.groupby('j')['m'].sum()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = prev_frame.keep_ids('j', valid_firms)
loop = (len(new_frame) != len(prev_frame))
new_frame = new_frame.get_collapsed_long()
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_moves_frame(threshold).get_collapsed_long()
new_frame3 = bdf.get_es().min_moves_frame(threshold).get_long().get_collapsed_long()
new_frame4 = bdf.get_collapsed_long().min_moves_frame(threshold)
new_frame5 = bdf.get_collapsed_long().get_es().min_moves_frame(threshold).get_long()
assert n_loops > 1
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3) == len(new_frame4) == len(new_frame5)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
def test_min_movers_firms_34():
# List only firms that meet a minimum threshold of movers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 20
# First, manually estimate the valid set of firms
frame = bdf.copy()
# Keep movers
frame = frame[frame['m'] > 0]
n_movers = frame.groupby('j')['i'].nunique()
valid_firms = sorted(n_movers[n_movers >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_movers_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_movers_firms(threshold))
valid_firms4 = sorted(bdf.get_collapsed_long().min_movers_firms(threshold))
valid_firms5 = sorted(bdf.get_collapsed_long().get_es().min_movers_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3) == len(valid_firms4) == len(valid_firms5)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i] == valid_firms4[i] == valid_firms5[i]
def test_min_movers_frame_35():
# Keep only firms that meet a minimum threshold of movers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 12
# First, manually estimate the new frame
frame = bdf.copy()
# Keep movers
frame_movers = frame[frame['m'] > 0]
n_movers = frame_movers.groupby('j')['i'].nunique()
valid_firms = n_movers[n_movers >= threshold].index
new_frame = frame.keep_ids('j', valid_firms)
# Iterate until set of firms stays the same between loops
loop = True
n_loops = 0
while loop:
n_loops += 1
prev_frame = new_frame
# Keep movers
prev_frame_movers = prev_frame[prev_frame['m'] > 0]
n_movers = prev_frame_movers.groupby('j')['i'].nunique()
valid_firms = n_movers[n_movers >= threshold].index
new_frame = prev_frame.keep_ids('j', valid_firms)
loop = (len(new_frame) != len(prev_frame))
new_frame = new_frame.get_collapsed_long()
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_movers_frame(threshold).get_collapsed_long()
new_frame3 = bdf.get_es().min_movers_frame(threshold).get_long().get_collapsed_long()
new_frame4 = bdf.get_collapsed_long().min_movers_frame(threshold)
new_frame5 = bdf.get_collapsed_long().get_es().min_movers_frame(threshold).get_long()
assert n_loops > 1
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3) == len(new_frame4) == len(new_frame5)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
###################################
##### Tests for BipartiteLong #####
###################################
def test_long_get_es_extended_1():
# Test get_es_extended() by making sure it is generating the event study correctly for periods_pre=2 and periods_post=1
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 4})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 2., 't': 3})
worker_data.append({'i': 1, 'j': 5, 'y': 1., 't': 3})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 3, 'j': 3, 'y': 1.5, 't': 3})
# Worker 4
worker_data.append({'i': 4, 'j': 0, 'y': 1., 't': 1})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
df['g'] = df['j'] # Fill in g column as j
bdf = bpd.BipartiteLong(df)
bdf = bdf.clean_data()
es_extended = bdf.get_es_extended(periods_pre=2, periods_post=1, include=['j', 'y'], transition_col='g')
assert es_extended.iloc[0]['i'] == 0
assert es_extended.iloc[0]['j_l2'] == 1
assert es_extended.iloc[0]['j_l1'] == 1
assert es_extended.iloc[0]['j_f1'] == 0
assert es_extended.iloc[0]['y_l2'] == 1
assert es_extended.iloc[0]['y_l1'] == 1
assert es_extended.iloc[0]['y_f1'] == 1
assert es_extended.iloc[0]['t'] == 4
assert es_extended.iloc[1]['i'] == 2
assert es_extended.iloc[1]['j_l2'] == 2
assert es_extended.iloc[1]['j_l1'] == 2
assert es_extended.iloc[1]['j_f1'] == 3
assert es_extended.iloc[1]['y_l2'] == 1
assert es_extended.iloc[1]['y_l1'] == 1
assert es_extended.iloc[1]['y_f1'] == 1.5
assert es_extended.iloc[1]['t'] == 3
def test_long_get_es_extended_2():
# Test get_es_extended() by making sure workers move firms at the fulcrum of the event study
sim_data = bpd.SimBipartite().sim_network()
bdf = bpd.BipartiteLong(sim_data)
bdf = bdf.clean_data()
es_extended = bdf.get_es_extended(periods_pre=3, periods_post=2, include=['j', 'y'])
assert np.sum(es_extended['j_l1'] == es_extended['j_f1']) == 0
def test_long_get_es_extended_3_1():
# Test get_es_extended() by making sure workers move firms at the fulcrum of the event study and stable_pre works
sim_data = bpd.SimBipartite().sim_network()
bdf = bpd.BipartiteLong(sim_data)
bdf = bdf.clean_data()
es_extended = bdf.get_es_extended(periods_pre=2, periods_post=3, stable_pre='j', include=['j', 'y'])
assert np.sum(es_extended['j_l2'] != es_extended['j_l1']) == 0
assert np.sum(es_extended['j_l1'] == es_extended['j_f1']) == 0
def test_long_get_es_extended_3_2():
# Test get_es_extended() by making sure workers move firms at the fulcrum of the event study and stable_post works
sim_data = bpd.SimBipartite().sim_network()
bdf = bpd.BipartiteLong(sim_data)
bdf = bdf.clean_data()
es_extended = bdf.get_es_extended(periods_pre=3, periods_post=2, stable_post='j', include=['j', 'y'])
assert np.sum(es_extended['j_l1'] == es_extended['j_f1']) == 0
assert np.sum(es_extended['j_f1'] != es_extended['j_f2']) == 0
def test_long_get_es_extended_3_3():
# Test get_es_extended() by making sure workers move firms at the fulcrum of the event study and stable_post and stable_pre work together
sim_data = bpd.SimBipartite().sim_network()
bdf = bpd.BipartiteLong(sim_data)
bdf = bdf.clean_data()
es_extended = bdf.get_es_extended(periods_pre=3, periods_post=2, stable_pre='j', stable_post='j', include=['j', 'y'])
assert len(es_extended) > 0 # Make sure something is left
assert np.sum(es_extended['j_l3'] != es_extended['j_l2']) == 0
assert np.sum(es_extended['j_l2'] != es_extended['j_l1']) == 0
assert np.sum(es_extended['j_l1'] == es_extended['j_f1']) == 0
assert np.sum(es_extended['j_f1'] != es_extended['j_f2']) == 0
# Only uncomment for manual testing - this produces a graph which pauses the testing
# def test_long_plot_es_extended_4():
# # Test plot_es_extended() by making sure it doesn't crash
# sim_data = bpd.SimBipartite().sim_network()
# bdf = bpd.BipartiteLong(sim_data).clean_data().cluster(grouping=bpd.grouping.kmeans(n_clusters=2))
# bdf.plot_es_extended()
# assert True # Just making sure it doesn't crash
############################################
##### Tests for BipartiteLongCollapsed #####
############################################
def test_long_collapsed_1():
# Test constructor for BipartiteLongCollapsed.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
df = pd.DataFrame(bdf.get_collapsed_long()).rename({'y': 'y'}, axis=1)
bdf = bpd.BipartiteLongCollapsed(df, col_dict={'y': 'y'})
bdf = bdf.clean_data()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
#########################################
##### Tests for BipartiteEventStudy #####
#########################################
def test_event_study_1():
# Test constructor for BipartiteEventStudy.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
df = pd.DataFrame(bdf.get_es()).rename({'t1': 't'}, axis=1)
bdf = bpd.BipartiteEventStudy(df, col_dict={'t1': 't'})
bdf = bdf.clean_data()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert stayers.iloc[0]['t1'] == 1
assert stayers.iloc[0]['t2'] == 1
assert stayers.iloc[1]['i'] == 2
assert stayers.iloc[1]['j1'] == 2
assert stayers.iloc[1]['j2'] == 2
assert stayers.iloc[1]['y1'] == 2
assert stayers.iloc[1]['y2'] == 2
assert stayers.iloc[1]['t1'] == 2
assert stayers.iloc[1]['t2'] == 2
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 2
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[1]['t1'] == 1
assert movers.iloc[1]['t2'] == 2
def test_get_cs_2():
# Test get_cs() for BipartiteEventStudy.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
df = pd.DataFrame(bdf.get_es()).rename({'t1': 't'}, axis=1)
bdf = bpd.BipartiteEventStudy(df, col_dict={'t1': 't'})
bdf = bdf.clean_data()
bdf = bdf.get_cs()
stayers = bdf[bdf['m'] == 0]
movers1 = bdf[(bdf['m'] > 0) & (bdf['cs'] == 1)]
movers0 = bdf[(bdf['m'] > 0) & (bdf['cs'] == 0)]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert stayers.iloc[0]['t1'] == 1
assert stayers.iloc[0]['t2'] == 1
assert stayers.iloc[1]['i'] == 2
assert stayers.iloc[1]['j1'] == 2
assert stayers.iloc[1]['j2'] == 2
assert stayers.iloc[1]['y1'] == 2
assert stayers.iloc[1]['y2'] == 2
assert stayers.iloc[1]['t1'] == 2
assert stayers.iloc[1]['t2'] == 2
assert movers1.iloc[0]['i'] == 0
assert movers1.iloc[0]['j1'] == 0
assert movers1.iloc[0]['j2'] == 1
assert movers1.iloc[0]['y1'] == 2
assert movers1.iloc[0]['y2'] == 1
assert movers1.iloc[0]['t1'] == 1
assert movers1.iloc[0]['t2'] == 2
assert movers1.iloc[1]['i'] == 1
assert movers1.iloc[1]['j1'] == 1
assert movers1.iloc[1]['j2'] == 2
assert movers1.iloc[1]['y1'] == 1
assert movers1.iloc[1]['y2'] == 1
assert movers1.iloc[1]['t1'] == 1
assert movers1.iloc[1]['t2'] == 2
assert movers0.iloc[0]['i'] == 0
assert movers0.iloc[0]['j1'] == 1
assert movers0.iloc[0]['j2'] == 0
assert movers0.iloc[0]['y1'] == 1
assert movers0.iloc[0]['y2'] == 2
assert movers0.iloc[0]['t1'] == 2
assert movers0.iloc[0]['t2'] == 1
assert movers0.iloc[1]['i'] == 1
assert movers0.iloc[1]['j1'] == 2
assert movers0.iloc[1]['j2'] == 1
assert movers0.iloc[1]['y1'] == 1
assert movers0.iloc[1]['y2'] == 1
assert movers0.iloc[1]['t1'] == 2
assert movers0.iloc[1]['t2'] == 1
##################################################
##### Tests for BipartiteEventStudyCollapsed #####
##################################################
def test_event_study_collapsed_1():
# Test constructor for BipartiteEventStudyCollapsed.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 2., 't': 3})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
df = pd.DataFrame(bdf.get_es()).rename({'y1': 'comp1'}, axis=1)
bdf = bpd.BipartiteEventStudyCollapsed(df, col_dict={'y1': 'comp1'})
bdf = bdf.clean_data()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert stayers.iloc[0]['t11'] == 1
assert stayers.iloc[0]['t12'] == 2
assert stayers.iloc[0]['t21'] == 1
assert stayers.iloc[0]['t22'] == 2
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['t11'] == 1
assert movers.iloc[0]['t12'] == 1
assert movers.iloc[0]['t21'] == 2
assert movers.iloc[0]['t22'] == 2
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1.5
assert movers.iloc[1]['t11'] == 1
assert movers.iloc[1]['t12'] == 1
assert movers.iloc[1]['t21'] == 2
assert movers.iloc[1]['t22'] == 3
def test_get_cs_2():
# Test get_cs() for BipartiteEventStudyCollapsed.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 2., 't': 3})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
df = pd.DataFrame(bdf.get_es()).rename({'y1': 'comp1'}, axis=1)
bdf = bpd.BipartiteEventStudyCollapsed(df, col_dict={'y1': 'comp1'})
bdf = bdf.clean_data()
bdf = bdf.get_cs()
stayers = bdf[bdf['m'] == 0]
movers1 = bdf[(bdf['m'] > 0) & (bdf['cs'] == 1)]
movers0 = bdf[(bdf['m'] > 0) & (bdf['cs'] == 0)]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert stayers.iloc[0]['t11'] == 1
assert stayers.iloc[0]['t12'] == 2
assert stayers.iloc[0]['t21'] == 1
assert stayers.iloc[0]['t22'] == 2
assert movers1.iloc[0]['i'] == 0
assert movers1.iloc[0]['j1'] == 0
assert movers1.iloc[0]['j2'] == 1
assert movers1.iloc[0]['y1'] == 2
assert movers1.iloc[0]['y2'] == 1
assert movers1.iloc[0]['t11'] == 1
assert movers1.iloc[0]['t12'] == 1
assert movers1.iloc[0]['t21'] == 2
assert movers1.iloc[0]['t22'] == 2
assert movers1.iloc[1]['i'] == 1
assert movers1.iloc[1]['j1'] == 1
assert movers1.iloc[1]['j2'] == 2
assert movers1.iloc[1]['y1'] == 1
assert movers1.iloc[1]['y2'] == 1.5
assert movers1.iloc[1]['t11'] == 1
assert movers1.iloc[1]['t12'] == 1
assert movers1.iloc[1]['t21'] == 2
assert movers1.iloc[1]['t22'] == 3
assert movers0.iloc[0]['i'] == 0
assert movers0.iloc[0]['j1'] == 1
assert movers0.iloc[0]['j2'] == 0
assert movers0.iloc[0]['y1'] == 1
assert movers0.iloc[0]['y2'] == 2
assert movers0.iloc[0]['t11'] == 2
assert movers0.iloc[0]['t12'] == 2
assert movers0.iloc[0]['t21'] == 1
assert movers0.iloc[0]['t22'] == 1
assert movers0.iloc[1]['i'] == 1
assert movers0.iloc[1]['j1'] == 2
assert movers0.iloc[1]['j2'] == 1
assert movers0.iloc[1]['y1'] == 1.5
assert movers0.iloc[1]['y2'] == 1
assert movers0.iloc[1]['t11'] == 2
assert movers0.iloc[1]['t12'] == 3
assert movers0.iloc[1]['t21'] == 1
assert movers0.iloc[1]['t22'] == 1
#####################################
##### Tests for BipartitePandas #####
#####################################
def test_reformatting_1():
# Convert from long --> event study --> long --> collapsed long --> collapsed event study --> collapsed long to ensure conversion maintains data properly.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 2., 't': 3, 'g': 1})
worker_data.append({'i': 1, 'j': 5, 'y': 1., 't': 3, 'g': 3})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2, 'g': 1})
# Worker 4
worker_data.append({'i': 4, 'j': 0, 'y': 1., 't': 1, 'g': 1})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_es()
bdf = bdf.clean_data()
bdf = bdf.get_long()
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.clean_data()
bdf = bdf.get_es()
bdf = bdf.clean_data()
bdf = bdf.get_long()
bdf = bdf.clean_data()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert stayers.iloc[0]['t1'] == 1
assert stayers.iloc[0]['t2'] == 2
assert stayers.iloc[0]['g'] == 0
assert stayers.iloc[1]['i'] == 3
assert stayers.iloc[1]['j'] == 0
assert stayers.iloc[1]['y'] == 1
assert stayers.iloc[1]['t1'] == 1
assert stayers.iloc[1]['t2'] == 1
assert stayers.iloc[1]['g'] == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 1
assert movers.iloc[0]['g'] == 0
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t1'] == 2
assert movers.iloc[1]['t2'] == 2
assert movers.iloc[1]['g'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t1'] == 1
assert movers.iloc[2]['t2'] == 1
assert movers.iloc[2]['g'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1.5
assert movers.iloc[3]['t1'] == 2
assert movers.iloc[3]['t2'] == 3
assert movers.iloc[3]['g'] == 0
###################################
##### Tests for Connectedness #####
###################################
def test_connectedness_1():
# Test connected and leave-one-firm-out for collapsed long format.
# There are 2 biconnected sets that are connected by 1 mover, so the largest connected set is all the observations, while the largest biconnected component is the larger of the 2 biconnected sets.
worker_data = []
# Group 1 is firms 0 to 5
# Worker 0
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 1, 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1.5, 't': 2})
# Worker 1
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 2, 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1, 't': 2})
# Worker 2
# Firm 2 -> 3
worker_data.append({'i': 2, 'j': 2, 'y': 3, 't': 1})
worker_data.append({'i': 2, 'j': 3, 'y': 2.5, 't': 2})
# Worker 3
# Firm 3 -> 4
worker_data.append({'i': 3, 'j': 3, 'y': 1, 't': 1})
worker_data.append({'i': 3, 'j': 4, 'y': 1, 't': 2})
# Worker 4
# Firm 4 -> 5
worker_data.append({'i': 4, 'j': 4, 'y': 1, 't': 1})
worker_data.append({'i': 4, 'j': 5, 'y': 1.3, 't': 2})
# Worker 5
# Firm 5 -> 0
worker_data.append({'i': 5, 'j': 5, 'y': 1.1, 't': 1})
worker_data.append({'i': 5, 'j': 0, 'y': 1, 't': 2})
# Group 2 is firms 6 to 9
# Group 2 is linked to Group 1 through firms 3 and 6
# Worker 6
# Firm 3 -> 6
worker_data.append({'i': 6, 'j': 3, 'y': 2, 't': 1})
worker_data.append({'i': 6, 'j': 6, 'y': 1, 't': 2})
# Worker 7
# Firm 6 -> 7
worker_data.append({'i': 7, 'j': 6, 'y': 1, 't': 1})
worker_data.append({'i': 7, 'j': 7, 'y': 2, 't': 2})
# Worker 8
# Firm 7 -> 8
worker_data.append({'i': 8, 'j': 7, 'y': 1.5, 't': 1})
worker_data.append({'i': 8, 'j': 8, 'y': 1.2, 't': 2})
# Worker 9
# Firm 8 -> 9
worker_data.append({'i': 9, 'j': 8, 'y': 1.6, 't': 1})
worker_data.append({'i': 9, 'j': 9, 'y': 2, 't': 2})
# Worker 10
# Firm 9 -> 6
worker_data.append({'i': 10, 'j': 9, 'y': 1.8, 't': 1})
worker_data.append({'i': 10, 'j': 6, 'y': 1.4, 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
# Connected
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long().clean_data(bpd.clean_params({'connectedness': 'connected'}))
assert bdf.n_firms() == 10
# Biconnected
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long().clean_data(bpd.clean_params({'connectedness': 'leave_one_firm_out'}))
assert bdf.n_firms() == 6
def test_connectedness_2():
# Test connected and leave-one-firm-out for collapsed long format. Now, add two firms connected to the largest biconnected set that are linked only by 1 mover.
# There are 3 biconnected sets that are connected by 1 mover, so the largest connected set is all the observations, while the largest biconnected component is the larger of the 3 biconnected sets.
worker_data = []
# Group 1 is firms 0 to 5
# Worker 0
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 1, 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1.5, 't': 2})
# Worker 1
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 2, 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1, 't': 2})
# Worker 2
# Firm 2 -> 3
worker_data.append({'i': 2, 'j': 2, 'y': 3, 't': 1})
worker_data.append({'i': 2, 'j': 3, 'y': 2.5, 't': 2})
# Worker 3
# Firm 3 -> 4
worker_data.append({'i': 3, 'j': 3, 'y': 1, 't': 1})
worker_data.append({'i': 3, 'j': 4, 'y': 1, 't': 2})
# Worker 4
# Firm 4 -> 5
worker_data.append({'i': 4, 'j': 4, 'y': 1, 't': 1})
worker_data.append({'i': 4, 'j': 5, 'y': 1.3, 't': 2})
# Worker 5
# Firm 5 -> 0
worker_data.append({'i': 5, 'j': 5, 'y': 1.1, 't': 1})
worker_data.append({'i': 5, 'j': 0, 'y': 1, 't': 2})
# Group 2 is firms 6 to 9
# Group 2 is linked to Group 1 through firms 3 and 6
# Worker 6
# Firm 3 -> 6
worker_data.append({'i': 6, 'j': 3, 'y': 2, 't': 1})
worker_data.append({'i': 6, 'j': 6, 'y': 1, 't': 2})
# Worker 7
# Firm 6 -> 7
worker_data.append({'i': 7, 'j': 6, 'y': 1, 't': 1})
worker_data.append({'i': 7, 'j': 7, 'y': 2, 't': 2})
# Worker 8
# Firm 7 -> 8
worker_data.append({'i': 8, 'j': 7, 'y': 1.5, 't': 1})
worker_data.append({'i': 8, 'j': 8, 'y': 1.2, 't': 2})
# Worker 9
# Firm 8 -> 9
worker_data.append({'i': 9, 'j': 8, 'y': 1.6, 't': 1})
worker_data.append({'i': 9, 'j': 9, 'y': 2, 't': 2})
# Worker 10
# Firm 9 -> 6
worker_data.append({'i': 10, 'j': 9, 'y': 1.8, 't': 1})
worker_data.append({'i': 10, 'j': 6, 'y': 1.4, 't': 2})
# Group 3 is firms 10 to 11
# Worker 11
# Firm 10 -> 4
worker_data.append({'i': 11, 'j': 10, 'y': 1.3, 't': 1})
worker_data.append({'i': 11, 'j': 4, 'y': 1.2, 't': 2})
worker_data.append({'i': 11, 'j': 11, 'y': 1, 't': 3})
worker_data.append({'i': 11, 'j': 10, 'y': 1.1, 't': 4})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
# Connected
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long().clean_data(bpd.clean_params({'connectedness': 'connected'}))
assert bdf.n_firms() == 12
# Biconnected
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long().clean_data(bpd.clean_params({'connectedness': 'leave_one_firm_out'}))
assert bdf.n_firms() == 6
def test_connectedness_3():
# Test connected and leave-one-firm-out for collapsed long format.
# There are 2 biconnected sets that are connected by 1 mover, so the largest connected set is all the observations. However, unlike test 1, the largest biconnected component is also all the observations. This is because individual 2 goes from firm 2 to 3 to 6 to 7. It seems that removing 3 disconnects the 2 groups, but the shifts used to compute the biconnected components corrects this.
worker_data = []
# Group 1 is firms 0 to 5
# Worker 0
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 1, 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1.5, 't': 2})
# Worker 1
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 2, 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1, 't': 2})
# Worker 2
# Firm 2 -> 3 -> 6 -> 7
worker_data.append({'i': 2, 'j': 2, 'y': 3, 't': 1})
worker_data.append({'i': 2, 'j': 3, 'y': 2.5, 't': 2})
worker_data.append({'i': 2, 'j': 6, 'y': 1, 't': 3})
worker_data.append({'i': 2, 'j': 7, 'y': 2.5, 't': 4})
# Worker 3
# Firm 3 -> 4
worker_data.append({'i': 3, 'j': 3, 'y': 1, 't': 1})
worker_data.append({'i': 3, 'j': 4, 'y': 1, 't': 2})
# Worker 4
# Firm 4 -> 5
worker_data.append({'i': 4, 'j': 4, 'y': 1, 't': 1})
worker_data.append({'i': 4, 'j': 5, 'y': 1.3, 't': 2})
# Worker 5
# Firm 5 -> 0
worker_data.append({'i': 5, 'j': 5, 'y': 1.1, 't': 1})
worker_data.append({'i': 5, 'j': 0, 'y': 1, 't': 2})
# Group 2 is firms 6 to 9
# Group 2 is linked to Group 1 through firms 2, 3, 6, and 7
# Worker 6
# Firm 6 -> 7
worker_data.append({'i': 6, 'j': 6, 'y': 1, 't': 1})
worker_data.append({'i': 6, 'j': 7, 'y': 2, 't': 2})
# Worker 7
# Firm 7 -> 8
worker_data.append({'i': 7, 'j': 7, 'y': 1.5, 't': 1})
worker_data.append({'i': 7, 'j': 8, 'y': 1.2, 't': 2})
# Worker 8
# Firm 8 -> 9
worker_data.append({'i': 8, 'j': 8, 'y': 1.6, 't': 1})
worker_data.append({'i': 8, 'j': 9, 'y': 2, 't': 2})
# Worker 9
# Firm 9 -> 6
worker_data.append({'i': 9, 'j': 9, 'y': 1.8, 't': 1})
worker_data.append({'i': 9, 'j': 6, 'y': 1.4, 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
# Connected
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long().clean_data(bpd.clean_params({'connectedness': 'connected'}))
assert bdf.n_firms() == 10
# Biconnected
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long().clean_data(bpd.clean_params({'connectedness': 'leave_one_firm_out'}))
assert bdf.n_firms() == 10
def test_connectedness_4():
# Test connected and leave-one-firm-out for collapsed long format.
# There are 2 biconnected sets that are connected by 1 mover, so the largest connected set is all the observations. Unlike test 3, the largest biconnected component is group 1. This is because individual 2 goes from firm 2 to 3 to 6. We also now have individual 4 going from 4 to 5 to 6. However, removing firm 6 disconnects the 2 groups. Additionally, these new linkages to firm 6 mean firm 6 is a member of both groups.
worker_data = []
# Group 1 is firms 0 to 5
# Worker 0
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 1, 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1.5, 't': 2})
# Worker 1
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 2, 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1, 't': 2})
# Worker 2
# Firm 2 -> 3 -> 6
worker_data.append({'i': 2, 'j': 2, 'y': 3, 't': 1})
worker_data.append({'i': 2, 'j': 3, 'y': 2.5, 't': 2})
worker_data.append({'i': 2, 'j': 6, 'y': 1, 't': 3})
# Worker 3
# Firm 3 -> 4
worker_data.append({'i': 3, 'j': 3, 'y': 1, 't': 1})
worker_data.append({'i': 3, 'j': 4, 'y': 1, 't': 2})
# Worker 4
# Firm 4 -> 5 -> 6
worker_data.append({'i': 4, 'j': 4, 'y': 1, 't': 1})
worker_data.append({'i': 4, 'j': 5, 'y': 1.3, 't': 2})
worker_data.append({'i': 4, 'j': 6, 'y': 1.15, 't': 3})
# Worker 5
# Firm 5 -> 0
worker_data.append({'i': 5, 'j': 5, 'y': 1.1, 't': 1})
worker_data.append({'i': 5, 'j': 0, 'y': 1, 't': 2})
# Group 2 is firms 6 to 9
# Group 2 is linked to Group 1 through firms 2, 3, and 6
# Worker 6
# Firm 6 -> 7
worker_data.append({'i': 6, 'j': 6, 'y': 1, 't': 1})
worker_data.append({'i': 6, 'j': 7, 'y': 2, 't': 2})
# Worker 7
# Firm 7 -> 8
worker_data.append({'i': 7, 'j': 7, 'y': 1.5, 't': 1})
worker_data.append({'i': 7, 'j': 8, 'y': 1.2, 't': 2})
# Worker 8
# Firm 8 -> 9
worker_data.append({'i': 8, 'j': 8, 'y': 1.6, 't': 1})
worker_data.append({'i': 8, 'j': 9, 'y': 2, 't': 2})
# Worker 9
# Firm 9 -> 6
worker_data.append({'i': 9, 'j': 9, 'y': 1.8, 't': 1})
worker_data.append({'i': 9, 'j': 6, 'y': 1.4, 't': 2})
df = pd.concat([ | pd.DataFrame(worker, index=[i]) | pandas.DataFrame |
__author__ = "<NAME>"
__copyright__ = "xuanchen yao"
__license__ = "mit"
import mysql.connector
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
def train(i):
try:
rds_host='mybike.c0jxuz6r8olg.us-west-2.rds.amazonaws.com'
name='hibike'
pwd='<PASSWORD>'
db_name='bike'
port=3306
conn = mysql.connector.connect(host=rds_host,user=name, password=pwd, database=db_name)
df = | pd.read_sql("select * from weather", con=conn) | pandas.read_sql |
import random
import time
import numpy as np
import pandas as pd
from pyziabm.runner2017mpi_r4 import Runner
def participation_to_list(h5in, outlist):
trade_df = pd.read_hdf(h5in, 'trades')
trade_df = trade_df.assign(trader_id = trade_df.resting_order_id.str.split('_').str[0])
lt_df = pd.DataFrame(trade_df.groupby(['trader_id']).quantity.count())
lt_df.rename(columns={'quantity': 'trade'}, inplace=True)
if 'p999999' in lt_df.index:
lt_df.drop('p999999', inplace=True)
ltsum_df = pd.DataFrame(trade_df.groupby(['trader_id']).quantity.sum())
ltsum_df.rename(columns={'quantity': 'trade_vol'}, inplace=True)
ltsum_df = ltsum_df.assign(Participation = 100*ltsum_df.trade_vol/ltsum_df.trade_vol.sum())
providers = ltsum_df.index.unique()
market_makers = [x for x in providers if x.startswith('m')]
market_makers.append('j0')
ltsum_df = ltsum_df.ix[market_makers]
part_dict = {'MCRun': j, 'MM_Participation': ltsum_df.loc['m0', 'Participation']}
if 'j0' in providers:
part_dict.update({'PJ_Participation': ltsum_df.loc['j0', 'Participation']})
outlist.append(part_dict)
def position_to_list(h5in, outlist):
mmcf_df = pd.read_hdf(h5in, 'mmp')
market_makers = mmcf_df.mmid.unique()
for mm in market_makers:
pos_dict = {}
pos_dict['MCRun'] = j
pos_dict['MarketMaker'] = mm
pos_dict['Min'] = mmcf_df[mmcf_df.mmid == mm].position.min()
pos_dict['Max'] = mmcf_df[mmcf_df.mmid == mm].position.max()
outlist.append(pos_dict)
def profit_to_list(h5in, outlist):
trade_df = pd.read_hdf(h5in, 'trades')
trade_df = trade_df.assign(trader_id = trade_df.resting_order_id.str.split('_').str[0])
buy_trades = trade_df[trade_df.side=='buy']
buy_trades = buy_trades.assign(BuyCashFlow = buy_trades.price*buy_trades.quantity)
buy_trades = buy_trades.assign(BuyVol = buy_trades.groupby('trader_id').quantity.cumsum(),
CumulBuyCF = buy_trades.groupby('trader_id').BuyCashFlow.cumsum()
)
buy_trades.rename(columns={'timestamp': 'buytimestamp'}, inplace=True)
sell_trades = trade_df[trade_df.side=='sell']
sell_trades = sell_trades.assign(SellCashFlow = -sell_trades.price*sell_trades.quantity)
sell_trades = sell_trades.assign(SellVol = sell_trades.groupby('trader_id').quantity.cumsum(),
CumulSellCF = sell_trades.groupby('trader_id').SellCashFlow.cumsum()
)
sell_trades.rename(columns={'timestamp': 'selltimestamp'}, inplace=True)
buy_trades = buy_trades[['trader_id', 'BuyVol', 'CumulBuyCF', 'buytimestamp']]
sell_trades = sell_trades[['trader_id', 'SellVol', 'CumulSellCF', 'selltimestamp']]
cash_flow = | pd.merge(buy_trades, sell_trades, left_on=['trader_id', 'BuyVol'], right_on=['trader_id', 'SellVol']) | pandas.merge |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = | date_range('20130101', periods=3, name='bar') | pandas.date_range |
import numpy as np
import pandas as pd
from scipy import sparse
def genpoisson_spiketrain(rate, dt, duration):
offset = duration
events = np.cumsum(np.random.exponential(scale = 1 / rate, size = int(duration*rate + offset)))
return np.round(events[np.logical_and(0 < events, events < duration)], -int(np.log10(dt)))
def genpoisson_spiketrains(nneurons, rate, dt, duration):
spike_timings = np.array([], dtype = np.float)
spike_neurons = np.array([], dtype = np.int)
for n in range(nneurons):
spike_train = genpoisson_spiketrain(rate, dt, duration)
spike_timings = np.r_[spike_timings, spike_train]
spike_neurons = np.r_[spike_neurons, n * np.ones_like(spike_train, dtype = np.int)]
return pd.DataFrame({
"neuronid": spike_neurons,
"spiketime": spike_timings
})
def gen_sequence(nneurons = 10, seqlen = 0.1, dt = 0.001):
return np.round(np.linspace(dt, seqlen-dt, nneurons), int(-np.log10(dt)))
def gen_sequences(neurons = np.arange(10), nsequences = 10, start = 0, end = 60, seqlen = 0.1, dt = 0.001, shrink = 1):
spike_timings = np.array([], dtype = np.float)
spike_neurons = np.array([], dtype = np.int)
nneurons = len(neurons)
sequence_onsets = np.random.choice(
np.arange(start, end - seqlen, seqlen),
nsequences,
replace = False
)
for onset in sequence_onsets:
spike_timings = np.r_[spike_timings, onset + gen_sequence(nneurons, seqlen / shrink, dt)]
spike_neurons = np.r_[spike_neurons, neurons]
return pd.DataFrame({
"neuronid": spike_neurons,
"spiketime": spike_timings
})
def gen_sequences_with_replay(shrinkages = [2], neurons = np.arange(10), nsequences = 10, duration = 60, seqlen = 0.1, dt = 0.001):
duration_per_type = duration / (len(shrinkages) + 1)
sequences = gen_sequences(neurons,
nsequences,
0,
duration_per_type,
seqlen,
dt)
for idx, shrinkage in enumerate(shrinkages):
replay = gen_sequences(neurons,
nsequences,
duration_per_type * (idx + 1),
duration_per_type * (idx + 2),
seqlen,
dt,
abs(shrinkage))
if shrinkage < 0: # reverse replay
replay = pd.DataFrame({
"neuronid": replay.neuronid,
"spiketime": np.copy(replay.spiketime[::-1])
})
sequences = pd.concat([sequences, replay])
return sequences
def df2binarray_csc(df, duration_ms = 61, binwidth = 1):
neuronids = df.neuronid
spikes_ms = df.spiketime * 1000
nneurons = int(neuronids.max()+1)
nrow = nneurons
ncol = int(duration_ms) // binwidth + 1000
binarray_lil = sparse.lil_matrix((nrow, ncol))
for neuronid in range(nneurons):
spike_train_of_a_neuron = spikes_ms[neuronids == neuronid]
bins = np.arange(0, ncol * binwidth, binwidth)
digitized_spike_train_of_a_neuron = np.digitize(spike_train_of_a_neuron, bins) - 1
binned_spike_train_of_a_neuron = np.bincount(digitized_spike_train_of_a_neuron)
binarray_lil[neuronid, digitized_spike_train_of_a_neuron] = binned_spike_train_of_a_neuron[digitized_spike_train_of_a_neuron]
return binarray_lil.tocsc()
def gendata():
dt = 0.001
# nsequences = 10
# seqlen = 0.3
nsequences = 10
seqlen = 0.1
shrinkages = [1, 1]
nneurons = 10
duration = nsequences * seqlen * (len(shrinkages) + 1) + 0.2
nseqkinds = 3
df = pd.DataFrame()
for idx in range(nseqkinds):
df_seq = gen_sequences_with_replay(
shrinkages = shrinkages,
neurons = np.arange(nneurons*(idx), nneurons*(idx+1)),
nsequences = nsequences,
duration = duration,
seqlen = seqlen,
dt = dt)
df_seq = pd.DataFrame({
"neuronid": df_seq.neuronid,
"spiketime": np.copy(df_seq.spiketime + duration * idx + idx)
})
df = | pd.concat([df, df_seq]) | pandas.concat |
from __future__ import print_function, division
import os
import glob
import re
import copy
import warnings
import numpy as np
import pandas as pd
pd.options.display.max_colwidth = 100
import pyemu
from ..pyemu_warnings import PyemuWarning
from pyemu.pst.pst_controldata import ControlData, SvdData, RegData
from pyemu.pst import pst_utils
from pyemu.plot import plot_utils
# from pyemu.utils.os_utils import run
class Pst(object):
"""All things PEST(++) control file
Args:
filename (`str`): the name of the control file
load (`bool`, optional): flag to load the control file. Default is True
resfile (`str`, optional): corresponding residual file. If `None`, a residual file
with the control file base name is sought. Default is `None`
Note:
This class is the primary mechanism for dealing with PEST control files. Support is provided
for constructing new control files as well as manipulating existing control files.
Example::
pst = pyemu.Pst("my.pst")
pst.control_data.noptmax = -1
pst.write("my_new.pst")
"""
def __init__(self, filename, load=True, resfile=None):
self.parameter_data = None
"""pandas.DataFrame: '* parameter data' information. Columns are
standard PEST variable names
Example::
pst.parameter_data.loc[:,"partrans"] = "log"
pst.parameter_data.loc[:,"parubnd"] = 10.0
"""
self.observation_data = None
"""pandas.DataFrame: '* observation data' information. Columns are standard PEST
variable names
Example::
pst.observation_data.loc[:,"weight"] = 1.0
pst.observation_data.loc[:,"obgnme"] = "obs_group"
"""
self.prior_information = None
"""pandas.DataFrame: '* prior information' data. Columns are standard PEST
variable names"""
self.model_input_data = pst_utils.pst_config["null_model_io"]
self.model_output_data = pst_utils.pst_config["null_model_io"]
self.filename = filename
self.resfile = resfile
self.__res = None
self.__pi_count = 0
self.with_comments = False
self.comments = {}
self.other_sections = {}
self.new_filename = None
for key, value in pst_utils.pst_config.items():
self.__setattr__(key, copy.copy(value))
# self.tied = None
self.control_data = ControlData()
"""pyemu.pst.pst_controldata.ControlData: '* control data' information.
Access with standard PEST variable names
Example::
pst.control_data.noptmax = 2
pst.control_data.pestmode = "estimation"
"""
self.svd_data = SvdData()
"""pyemu.pst.pst_controldata.SvdData: '* singular value decomposition' section information.
Access with standard PEST variable names
Example::
pst.svd_data.maxsing = 100
"""
self.reg_data = RegData()
"""pyemu.pst.pst_controldata.RegData: '* regularization' section information.
Access with standard PEST variable names.
Example::
pst.reg_data.phimlim = 1.00 #yeah right!
"""
if load:
if not os.path.exists(filename):
raise Exception("pst file not found:{0}".format(filename))
self.load(filename)
def __setattr__(self, key, value):
if key == "model_command":
if isinstance(value, str):
value = [value]
super(Pst, self).__setattr__(key, value)
@classmethod
def from_par_obs_names(cls, par_names=["par1"], obs_names=["obs1"]):
"""construct a shell `Pst` instance from parameter and observation names
Args:
par_names ([`str`]): list of parameter names. Default is [`par1`]
obs_names ([`str`]): list of observation names. Default is [`obs1`]
Note:
While this method works, it does not make template or instruction files.
Users are encouraged to use `Pst.from_io_files()` for more usefulness
Example::
par_names = ["par1","par2"]
obs_names = ["obs1","obs2"]
pst = pyemu.Pst.from_par_obs_names(par_names,obs_names)
"""
return pst_utils.generic_pst(par_names=par_names, obs_names=obs_names)
@property
def phi(self):
"""get the weighted total objective function.
Returns:
`float`: sum of squared residuals
Note:
Requires `Pst.res` (the residuals file) to be available
"""
psum = 0.0
for _, contrib in self.phi_components.items():
psum += contrib
return psum
@property
def phi_components(self):
"""get the individual components of the total objective function
Returns:
`dict`: dictionary of observation group, contribution to total phi
Note:
Requires `Pst.res` (the residuals file) to be available
"""
# calculate phi components for each obs group
components = {}
ogroups = self.observation_data.groupby("obgnme").groups
rgroups = self.res.groupby("group").groups
self.res.index = self.res.name
for og, onames in ogroups.items():
# assert og in rgroups.keys(),"Pst.phi_componentw obs group " +\
# "not found: " + str(og)
# og_res_df = self.res.ix[rgroups[og]]
og_res_df = self.res.loc[onames, :].dropna(axis=1)
# og_res_df.index = og_res_df.name
og_df = self.observation_data.loc[ogroups[og], :]
og_df.index = og_df.obsnme
# og_res_df = og_res_df.loc[og_df.index,:]
assert og_df.shape[0] == og_res_df.shape[0], (
" Pst.phi_components error: group residual dataframe row length"
+ "doesn't match observation data group dataframe row length"
+ str(og_df.shape)
+ " vs. "
+ str(og_res_df.shape)
+ ","
+ og
)
if "modelled" not in og_res_df.columns:
print(og_res_df)
m = self.res.loc[onames, "modelled"]
print(m.loc[m.isna()])
raise Exception("'modelled' not in res df columns for group " + og)
# components[og] = np.sum((og_res_df["residual"] *
# og_df["weight"]) ** 2)
components[og] = np.sum(
(
(og_df.loc[:, "obsval"] - og_res_df.loc[og_df.obsnme, "modelled"])
* og_df.loc[:, "weight"]
)
** 2
)
if (
not self.control_data.pestmode.startswith("reg")
and self.prior_information.shape[0] > 0
):
ogroups = self.prior_information.groupby("obgnme").groups
for og in ogroups.keys():
if og not in rgroups.keys():
raise Exception(
"Pst.adjust_weights_res() obs group " + "not found: " + str(og)
)
og_res_df = self.res.loc[rgroups[og], :]
og_res_df.index = og_res_df.name
og_df = self.prior_information.loc[ogroups[og], :]
og_df.index = og_df.pilbl
og_res_df = og_res_df.loc[og_df.index, :]
if og_df.shape[0] != og_res_df.shape[0]:
raise Exception(
" Pst.phi_components error: group residual dataframe row length"
+ "doesn't match observation data group dataframe row length"
+ str(og_df.shape)
+ " vs. "
+ str(og_res_df.shape)
)
components[og] = np.sum((og_res_df["residual"] * og_df["weight"]) ** 2)
return components
@property
def phi_components_normalized(self):
"""get the individual components of the total objective function
normalized to the total PHI being 1.0
Returns:
`dict`: dictionary of observation group,
normalized contribution to total phi
Note:
Requires `Pst.res` (the residuals file) to be available
"""
# use a dictionary comprehension to go through and normalize each component of phi to the total
phi = self.phi
comps = self.phi_components
norm = {i: c / phi for i, c in comps.items()}
print(phi, comps, norm)
return norm
def set_res(self, res):
"""reset the private `Pst.res` attribute.
Args:
res : (`pandas.DataFrame` or `str`): something to use as Pst.res attribute.
If `res` is `str`, a dataframe is read from file `res`
"""
if isinstance(res, str):
res = pst_utils.read_resfile(res)
self.__res = res
@property
def res(self):
"""get the residuals dataframe attribute
Returns:
`pandas.DataFrame`: a dataframe containing the
residuals information.
Note:
if the Pst.__res attribute has not been loaded,
this call loads the res dataframe from a file
"""
if self.__res is not None:
return self.__res
else:
if self.resfile is not None:
if not os.path.exists(self.resfile):
raise Exception(
"Pst.res: self.resfile " + str(self.resfile) + " does not exist"
)
else:
self.resfile = self.filename.replace(".pst", ".res")
if not os.path.exists(self.resfile):
self.resfile = self.resfile.replace(".res", ".rei")
if not os.path.exists(self.resfile):
self.resfile = self.resfile.replace(".rei", ".base.rei")
if not os.path.exists(self.resfile):
if self.new_filename is not None:
self.resfile = self.new_filename.replace(".pst", ".res")
if not os.path.exists(self.resfile):
self.resfile = self.resfile.replace(".res", ".rei")
if not os.path.exists(self.resfile):
raise Exception(
"Pst.res: "
+ "could not residual file case.res"
+ " or case.rei"
+ " or case.base.rei"
+ " or case.obs.csv"
)
res = pst_utils.read_resfile(self.resfile)
missing_bool = self.observation_data.obsnme.apply(
lambda x: x not in res.name
)
missing = self.observation_data.obsnme[missing_bool]
if missing.shape[0] > 0:
raise Exception(
"Pst.res: the following observations "
+ "were not found in "
+ "{0}:{1}".format(self.resfile, ",".join(missing))
)
self.__res = res
return self.__res
@property
def nprior(self):
"""number of prior information equations
Returns:
`int`: the number of prior info equations
"""
self.control_data.nprior = self.prior_information.shape[0]
return self.control_data.nprior
@property
def nnz_obs(self):
"""get the number of non-zero weighted observations
Returns:
`int`: the number of non-zeros weighted observations
"""
nnz = 0
for w in self.observation_data.weight:
if w > 0.0:
nnz += 1
return nnz
@property
def nobs(self):
"""get the number of observations
Returns:
`int`: the number of observations
"""
self.control_data.nobs = self.observation_data.shape[0]
return self.control_data.nobs
@property
def npar_adj(self):
"""get the number of adjustable parameters (not fixed or tied)
Returns:
`int`: the number of adjustable parameters
"""
pass
np = 0
for t in self.parameter_data.partrans:
if t not in ["fixed", "tied"]:
np += 1
return np
@property
def npar(self):
"""get number of parameters
Returns:
`int`: the number of parameters
"""
self.control_data.npar = self.parameter_data.shape[0]
return self.control_data.npar
@property
def forecast_names(self):
"""get the forecast names from the pestpp options (if any).
Returns None if no forecasts are named
Returns:
[`str`]: a list of forecast names.
"""
if "forecasts" in self.pestpp_options.keys():
if isinstance(self.pestpp_options["forecasts"], str):
return self.pestpp_options["forecasts"].lower().split(",")
else:
return [f.lower() for f in self.pestpp_options["forecasts"]]
elif "predictions" in self.pestpp_options.keys():
if isinstance(self.pestpp_options["predictions"], str):
return self.pestpp_options["predictions"].lower().split(",")
else:
return [f.lower() for f in self.pestpp_options["predictions"]]
else:
return None
@property
def obs_groups(self):
"""get the observation groups
Returns:
[`str`]: a list of unique observation groups
"""
og = self.observation_data.obgnme.unique().tolist()
return og
@property
def nnz_obs_groups(self):
"""get the observation groups that contain at least one non-zero weighted
observation
Returns:
[`str`]: a list of observation groups that contain at
least one non-zero weighted observation
"""
obs = self.observation_data
og = obs.loc[obs.weight > 0.0, "obgnme"].unique().tolist()
return og
@property
def adj_par_groups(self):
"""get the parameter groups with atleast one adjustable parameter
Returns:
[`str`]: a list of parameter groups with
at least one adjustable parameter
"""
par = self.parameter_data
tf = set(["tied", "fixed"])
adj_pargp = par.loc[par.partrans.apply(lambda x: x not in tf), "pargp"].unique()
return adj_pargp.tolist()
@property
def par_groups(self):
"""get the parameter groups
Returns:
[`str`]: a list of parameter groups
"""
return self.parameter_data.pargp.unique().tolist()
@property
def prior_groups(self):
"""get the prior info groups
Returns:
[`str`]: a list of prior information groups
"""
og = self.prior_information.obgnme.unique().tolist()
return og
@property
def prior_names(self):
"""get the prior information names
Returns:
[`str`]: a list of prior information names
"""
return self.prior_information.pilbl.tolist()
@property
def par_names(self):
"""get the parameter names
Returns:
[`str`]: a list of parameter names
"""
return self.parameter_data.parnme.tolist()
@property
def adj_par_names(self):
"""get the adjustable (not fixed or tied) parameter names
Returns:
[`str`]: list of adjustable (not fixed or tied)
parameter names
"""
par = self.parameter_data
tf = set(["tied", "fixed"])
adj_names = par.loc[par.partrans.apply(lambda x: x not in tf), "parnme"]
return adj_names.tolist()
@property
def obs_names(self):
"""get the observation names
Returns:
[`str`]: a list of observation names
"""
return self.observation_data.obsnme.tolist()
@property
def nnz_obs_names(self):
"""get the non-zero weight observation names
Returns:
[`str`]: a list of non-zero weighted observation names
"""
obs = self.observation_data
nz_names = obs.loc[obs.weight > 0.0, "obsnme"]
return nz_names.tolist()
@property
def zero_weight_obs_names(self):
"""get the zero-weighted observation names
Returns:
[`str`]: a list of zero-weighted observation names
"""
obs = self.observation_data
return obs.loc[obs.weight == 0.0, "obsnme"].tolist()
@property
def estimation(self):
"""check if the control_data.pestmode is set to estimation
Returns:
`bool`: True if `control_data.pestmode` is estmation, False otherwise
"""
return self.control_data.pestmode == "estimation"
@property
def tied(self):
"""list of tied parameter names
Returns:
`pandas.DataFrame`: a dataframe of tied parameter information.
Columns of `tied` are `parnme` and `partied`. Returns `None` if
no tied parameters are found.
"""
par = self.parameter_data
tied_pars = par.loc[par.partrans == "tied", "parnme"]
if tied_pars.shape[0] == 0:
return None
if "partied" not in par.columns:
par.loc[:, "partied"] = np.NaN
tied = par.loc[tied_pars, ["parnme", "partied"]]
return tied
@property
def template_files(self):
"""list of template file names
Returns:
`[str]`: a list of template file names, extracted from
`Pst.model_input_data.pest_file`. Returns `None` if this
attribute is `None`
"""
if (
self.model_input_data is not None
and "pest_file" in self.model_input_data.columns
):
return self.model_input_data.pest_file.to_list()
else:
return None
@property
def input_files(self):
"""list of model input file names
Returns:
`[str]`: a list of model input file names, extracted from
`Pst.model_input_data.model_file`. Returns `None` if this
attribute is `None`
"""
if (
self.model_input_data is not None
and "model_file" in self.model_input_data.columns
):
return self.model_input_data.model_file.to_list()
else:
return None
@property
def instruction_files(self):
"""list of instruction file names
Returns:
`[str]`: a list of instruction file names, extracted from
`Pst.model_output_data.pest_file`. Returns `None` if this
attribute is `None`
"""
if (
self.model_output_data is not None
and "pest_file" in self.model_output_data.columns
):
return self.model_output_data.pest_file.to_list()
else:
return None
@property
def output_files(self):
"""list of model output file names
Returns:
`[str]`: a list of model output file names, extracted from
`Pst.model_output_data.model_file`. Returns `None` if this
attribute is `None`
"""
if (
self.model_output_data is not None
and "model_file" in self.model_output_data.columns
):
return self.model_output_data.model_file.to_list()
else:
return None
@staticmethod
def _read_df(f, nrows, names, converters, defaults=None):
"""a private method to read part of an open file into a pandas.DataFrame.
Args:
f (`file`): open file handle
nrows (`int`): number of rows to read
names ([`str`]): names to set the columns of the dataframe with
converters (`dict`): dictionary of lambda functions to convert strings
to numerical format
defaults (`dict`): dictionary of default values to assign columns.
Default is None
Returns:
`pandas.DataFrame`: dataframe of control file section info
"""
seek_point = f.tell()
line = f.readline()
raw = line.strip().split()
if raw[0].lower() == "external":
filename = raw[1]
if not os.path.exists(filename):
raise Exception(
"Pst._read_df() error: external file '{0}' not found".format(
filename
)
)
df = pd.read_csv(filename, index_col=False, comment="#")
df.columns = df.columns.str.lower()
for name in names:
if name not in df.columns:
raise Exception(
"Pst._read_df() error: name"
+ "'{0}' not in external file '{1}' columns".format(
name, filename
)
)
if name in converters:
df.loc[:, name] = df.loc[:, name].apply(converters[name])
if defaults is not None:
for name in names:
df.loc[:, name] = df.loc[:, name].fillna(defaults[name])
else:
if nrows is None:
raise Exception(
"Pst._read_df() error: non-external sections require nrows"
)
f.seek(seek_point)
df = pd.read_csv(
f,
header=None,
names=names,
nrows=nrows,
delim_whitespace=True,
converters=converters,
index_col=False,
comment="#",
)
# in case there was some extra junk at the end of the lines
if df.shape[1] > len(names):
df = df.iloc[:, len(names)]
df.columns = names
if defaults is not None:
for name in names:
df.loc[:, name] = df.loc[:, name].fillna(defaults[name])
elif np.any(pd.isnull(df).values.flatten()):
raise Exception("NANs found")
f.seek(seek_point)
extras = []
for _ in range(nrows):
line = f.readline()
extra = np.NaN
if "#" in line:
raw = line.strip().split("#")
extra = " # ".join(raw[1:])
extras.append(extra)
df.loc[:, "extra"] = extras
return df
def _read_line_comments(self, f, forgive):
comments = []
while True:
org_line = f.readline()
line = org_line.lower().strip()
self.lcount += 1
if org_line == "":
if forgive:
return None, comments
else:
raise Exception("unexpected EOF")
if line.startswith("++") and line.split("++")[1].strip()[0] != "#":
self._parse_pestpp_line(line)
elif "++" in line:
comments.append(line.strip())
elif line.startswith("#"):
comments.append(line.strip())
else:
break
return org_line.strip(), comments
def _read_section_comments(self, f, forgive):
lines = []
section_comments = []
while True:
line, comments = self._read_line_comments(f, forgive)
section_comments.extend(comments)
if line is None or line.startswith("*"):
break
if len(line.strip()) == 0:
continue
lines.append(line)
return line, lines, section_comments
@staticmethod
def _parse_external_line(line, pst_path="."):
raw = line.strip().split()
existing_path, filename = Pst._parse_path_agnostic(raw[0])
if pst_path is not None:
if pst_path != ".":
filename = os.path.join(pst_path, filename)
else:
filename = os.path.join(existing_path, filename)
raw = line.lower().strip().split()
options = {}
if len(raw) > 1:
if len(raw) % 2 == 0:
s = "wrong number of entries on 'external' line:'{0}\n".format(line)
s += "Should include 'filename', then pairs of key-value options"
raise Exception(s)
options = {k.lower(): v.lower() for k, v in zip(raw[1:-1], raw[2:])}
return filename, options
@staticmethod
def _parse_path_agnostic(filename):
filename = filename.replace("\\", os.sep).replace("/", os.sep)
return os.path.split(filename)
@staticmethod
def _cast_df_from_lines(
section, lines, fieldnames, converters, defaults, alias_map={}, pst_path="."
):
# raw = lines[0].strip().split()
# if raw[0].lower() == "external":
if section.lower().strip().split()[-1] == "external":
dfs = []
for line in lines:
filename, options = Pst._parse_external_line(line, pst_path)
if not os.path.exists(filename):
raise Exception(
"Pst._cast_df_from_lines() error: external file '{0}' not found".format(
filename
)
)
sep = options.get("sep", ",")
missing_vals = options.get("missing_values", None)
if sep.lower() == "w":
df = pd.read_csv(
filename, delim_whitespace=True, na_values=missing_vals
)
else:
df = | pd.read_csv(filename, sep=sep, na_values=missing_vals) | pandas.read_csv |
"""
Created on May 21, 2020
@author: <NAME>
start server with the following command:
bokeh serve --show OS_Report
view at: http://localhost:5006/OS_Report
"""
import os, sys
import pandas as pd
import numpy as np
import logging
from bokeh.io import curdoc
from bokeh.models import TextInput, Button, TextAreaInput, Select
from bokeh.models.widgets.markups import Div, PreText
from bokeh.layouts import layout, column, row
from bokeh.models.widgets import Panel, Tabs
import datetime
import socket
from bokeh.layouts import column, layout, row, gridplot
from bokeh.models.widgets import Panel
from bokeh.models import CustomJS, ColumnDataSource, Select, Slider, CheckboxGroup, RadioButtonGroup
from bokeh.models import ColumnDataSource, DataTable, DateFormatter, StringFormatter, BooleanFormatter, TableColumn
from bokeh.models import CheckboxButtonGroup
import gspread
from gspread_dataframe import get_as_dataframe
from oauth2client.service_account import ServiceAccountCredentials
import smtplib, ssl
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
os.environ['OPSTOOL_DIR'] = '/Users/pfagrelius/Research/DESI/Operations/desilo/ops_tool'
class OpsTool(object):
def __init__(self):
self.test = True
self.get_all_emails = False #Change this if you want the code to print out all email addresses.
self.semester = '2022A' #None means all combined. Options are 2021B, 2022A
logging.basicConfig(filename=os.path.join(os.environ['OPSTOOL_DIR'], 'auto_ops_tool.log'),
level=logging.DEBUG, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
self.logger = logging.getLogger(__name__)
self.url = "https://docs.google.com/spreadsheets/d/1nzShIvgfqqeibVcGpHzm7dSpjJ78vDtWz-3N0wV9hu0/edit#gid=0"
self.feedback_url = "https://docs.google.com/spreadsheets/d/1rivcM5d5U2_WcVTfNcLFQRkZSE8I55VuEdS3ui2e_VU/edit?resourcekey#gid=1162490958"
self.preops_url = 'https://docs.google.com/spreadsheets/d/1HkoRySeJmrU_K39L_jsFLLhXl2mCbzG9OPgacRRN1xU/edit?resourcekey#gid=1462663923'
self.credentials = "./credentials.json"
self.creds = ServiceAccountCredentials.from_json_keyfile_name(self.credentials)
self.client = gspread.authorize(self.creds)
#self.sheet = self.client.open_by_url(self.url).sheet1
#self.df = get_as_dataframe(self.sheet, header=0)
#self.df = self.df[['Date', 'Comment', 'LO', 'SO_1', 'SO_2', 'OA', 'EM']]
self.feedback_sheet = self.client.open_by_url(self.feedback_url).sheet1
self.feedback_df = get_as_dataframe(self.feedback_sheet, header=0)
self.preops_sheet = self.client.open_by_url(self.preops_url).sheet1
self.preops_df = get_as_dataframe(self.preops_sheet, header=0)
for col in ['Timestamp','Your Name','Start date of your shift']:
self.preops_df[col] = self.preops_df[col].astype(str)
self.title = Div(text='Observing Operations Dashboard', css_classes=['h1-title-style'])
if self.semester == None:
pass
else:
try:
self.df = pd.read_csv(os.path.join(os.environ['OPSTOOL_DIR'], 'obs_schedule_{}.csv'.format(self.semester)))
except Exception as e:
print(e)
self.df['Date'] = pd.to_datetime(self.df['Date'], format='%m/%d/%y')
self.user_info = pd.read_csv(os.path.join(os.environ['OPSTOOL_DIR'], 'user_info.csv'))
self.today = datetime.datetime.now().strftime('%Y-%m-%d')
self.today_df = self.df[self.df.Date == self.today]
self.per_shift_filen = os.path.join(os.environ['OPSTOOL_DIR'], 'per_shift.csv')
self.per_shift_df = pd.read_csv(self.per_shift_filen)
self.per_observer_filen = os.path.join(os.environ['OPSTOOL_DIR'], 'per_observer.csv')
self.per_observer_df = pd.read_csv(self.per_observer_filen)
for col in self.per_observer_df.columns:
self.per_observer_df[col] = self.per_observer_df[col].astype(str)
hostname = socket.gethostname()
if 'desi' in hostname:
self.location = 'kpno'
else:
self.location = 'home'
all_names = []
for name in np.unique(self.df.SO_1):
all_names.append(name.strip())
for name in np.unique(self.df.SO_2):
all_names.append(name.strip())
all_names = np.unique(all_names)
self.all_names = all_names
email_list = []
print('These Names Dont have Emails:')
for name in all_names:
emails = self.user_info[self.user_info['name'] == name]['email']
try:
email = emails.values[0]
email_list.append(email)
except:
print(name)
if self.get_all_emails:
print(email_list)
def gave_feedback(self, shift_df):
"""Expect columns to be Observer, Shift Type, Start, End
"""
returns = []
for i, row in shift_df.iterrows():
obs = row['Observer']
these_rows = self.feedback_df[self.feedback_df['Observer Name'] == obs]
try:
last_row = these_rows.iloc[[-1]]
if row['Start'] == last_row['Observing Start'].values[0]:
returns.append(last_row['Timestamp'].values[0])
else:
returns.append('{}'.format(last_row['Timestamp'].values[0]))
except:
returns.append('None')
return returns
def filled_preops_form(self, shift_df):
"""Expect columns to be Observer, Shift Type, Start, End
"""
returns = []
for i, row in shift_df.iterrows():
obs = row['Observer']
these_rows = self.preops_df[self.preops_df['Your Name'] == obs.strip()]
try:
last_row = these_rows.iloc[[-1]]
if row['Start'] == last_row['Start date of your shift'].values[0]:
returns.append(last_row['Timestamp'].values[0])
else:
returns.append('{}'.format(last_row['Timestamp'].values[0]))
except:
returns.append('None')
return returns
def get_email(self, name):
try:
email = self.user_info[self.user_info['name'] == str(name).strip()]['email'].values[0]
except:
email = None
return email
def new_day(self):
self.today = self.enter_date.value
self.today_df = self.df[self.df.Date == self.today]
self.today_source.data = self.today_df
self.daily_report()
def sched_load(self):
sheet = self.client.open_by_url(self.url).sheet1
df = get_as_dataframe(sheet, usecols = [0,1,2,3,4,5,6,7], header = 5)
self.df = df.dropna(thresh=1)
self.df.Date = | pd.to_datetime(self.df.Date,format='%b. %d, %Y') | pandas.to_datetime |
from contextlib import nullcontext
import copy
import numpy as np
import pytest
from pandas._libs.missing import is_matching_na
from pandas.core.dtypes.common import is_float
from pandas import (
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize(
"arr, idx",
[
([1, 2, 3, 4], [0, 2, 1, 3]),
([1, np.nan, 3, np.nan], [0, 2, 1, 3]),
(
[1, np.nan, 3, np.nan],
MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c"), (3, "c")]),
),
],
)
def test_equals(arr, idx):
s1 = Series(arr, index=idx)
s2 = s1.copy()
assert s1.equals(s2)
s1[1] = 9
assert not s1.equals(s2)
@pytest.mark.parametrize(
"val", [1, 1.1, 1 + 1j, True, "abc", [1, 2], (1, 2), {1, 2}, {"a": 1}, None]
)
def test_equals_list_array(val):
# GH20676 Verify equals operator for list of Numpy arrays
arr = np.array([1, 2])
s1 = Series([arr, arr])
s2 = s1.copy()
assert s1.equals(s2)
s1[1] = val
cm = (
tm.assert_produces_warning(FutureWarning, check_stacklevel=False)
if isinstance(val, str)
else nullcontext()
)
with cm:
assert not s1.equals(s2)
def test_equals_false_negative():
# GH8437 Verify false negative behavior of equals function for dtype object
arr = [False, np.nan]
s1 = Series(arr)
s2 = s1.copy()
s3 = Series(index=range(2), dtype=object)
s4 = s3.copy()
s5 = s3.copy()
s6 = s3.copy()
s3[:-1] = s4[:-1] = s5[0] = s6[0] = False
assert s1.equals(s1)
assert s1.equals(s2)
assert s1.equals(s3)
assert s1.equals(s4)
assert s1.equals(s5)
assert s5.equals(s6)
def test_equals_matching_nas():
# matching but not identical NAs
left = Series([np.datetime64("NaT")], dtype=object)
right = Series([np.datetime64("NaT")], dtype=object)
assert left.equals(right)
assert Index(left).equals(Index(right))
assert left.array.equals(right.array)
left = Series([np.timedelta64("NaT")], dtype=object)
right = Series([np.timedelta64("NaT")], dtype=object)
assert left.equals(right)
assert Index(left).equals(Index(right))
assert left.array.equals(right.array)
left = Series([np.float64("NaN")], dtype=object)
right = Series([np.float64("NaN")], dtype=object)
assert left.equals(right)
assert Index(left, dtype=left.dtype).equals(Index(right, dtype=right.dtype))
assert left.array.equals(right.array)
def test_equals_mismatched_nas(nulls_fixture, nulls_fixture2):
# GH#39650
left = nulls_fixture
right = nulls_fixture2
if hasattr(right, "copy"):
right = right.copy()
else:
right = copy.copy(right)
ser = Series([left], dtype=object)
ser2 = Series([right], dtype=object)
if is_matching_na(left, right):
assert ser.equals(ser2)
elif (left is None and is_float(right)) or (right is None and | is_float(left) | pandas.core.dtypes.common.is_float |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2019 European Commission (JRC);
# Licensed under the EUPL (the 'Licence');
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl
"""*(TO BE DEFUNCT)* The core that accepts a vehicle-model and wltc-classes, runs the simulation and updates the model with results.
.. Attention:: The documentation of this core module has several issues and needs work.
Notation
--------
* ALL_CAPITAL variables denote *vectors* over the velocity-profile (the cycle),
* ALL_CAPITAL starting with underscore (`_`) denote *matrices* (gears x time).
For instance, GEARS is like that::
[0, 0, 1, 1, 1, 2, 2, ... 1, 0, 0]
<---- cycle time-steps ---->
and _GEARS is like that::
t:||: 0 1 2 3
---+-------------
g1:|[[ 1, 1, 1, 1, ... 1, 1
g2:| 2, 2, 2, 2, ... 2, 2
g3:| 3, 3, 3, 3, ... 3, 3
g4:| 4, 4, 4, 4, ... 4, 4 ]]
Major vectors & matrices
------------------------
V: floats (#cycle_steps)
The wltp-class velocity profile.
_GEARS: integers (#gears X #cycle_steps)
One row for each gear (starting with 1 to #gears).
_N_GEARS: floats (#gears X #cycle_steps)
One row per gear with the Engine-revolutions required to follow the V-profile (unfeasable revs included),
produced by multiplying ``V * gear-rations``.
_GEARS_YES: boolean (#gears X #cycle_steps)
One row per gear having ``True`` wherever gear is possible for each step.
.. Seealso:: :mod:`~,datamodel` for in/out schemas
"""
import logging
import re
import sys
from typing import Union
import numpy as np
import pandas as pd
from . import (
io as wio,
cycles,
invariants,
vehicle,
engine,
vmax,
downscale,
datamodel,
cycler,
)
from .invariants import v_decimals, vround
log = logging.getLogger(__name__)
def _shapes(*arrays):
import operator
op_shape = operator.attrgetter("shape")
return list(map(op_shape, arrays))
def _dtypes(*arrays):
import operator
op_shape = operator.attrgetter("dtype")
return list(map(op_shape, arrays))
class Experiment(object):
"""Runs the vehicle and cycle data describing a WLTC experiment.
See :mod:`wltp.experiment` for documentation.
"""
def __init__(self, mdl, skip_model_validation=False, validate_wltc_data=False):
"""
:param mdl:
trees (formed by dicts & lists) holding the experiment data.
:param skip_model_validation:
when true, does not validate the model.
"""
self._set_model(
mdl,
skip_validation=skip_model_validation,
validate_wltc_data=validate_wltc_data,
)
self.wltc = self._model["wltc_data"]
def run(self):
"""Invokes the main-calculations and extracts/update Model values!
@see: Annex 2, p 70
"""
m = wio.pstep_factory.get()
c = wio.pstep_factory.get().cycle
w = wio.pstep_factory.get().wot
mdl = self._model
## Prepare results
#
cycle = mdl.get(m.cycle)
if cycle is None:
cycle = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.