prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Unit tests for the reading functionality in dframeio.parquet"""
# pylint: disable=redefined-outer-name
from pathlib import Path
import pandas as pd
import pandera as pa
import pandera.typing
import pytest
from pandas.testing import assert_frame_equal
import dframeio
class SampleDataSchema(pa.SchemaModel):
"""pandera schema of the parquet test dataset"""
registration_dttm: pa.typing.Series[pa.typing.DateTime]
id: pa.typing.Series[pd.Int64Dtype] = pa.Field(nullable=True, coerce=True)
first_name: pa.typing.Series[pa.typing.String]
last_name: pa.typing.Series[pa.typing.String]
email: pa.typing.Series[pa.typing.String]
gender: pa.typing.Series[pa.typing.String] = pa.Field(coerce=True)
ip_address: pa.typing.Series[pa.typing.String]
cc: pa.typing.Series[pa.typing.String]
country: pa.typing.Series[pa.typing.String]
birthdate: pa.typing.Series[pa.typing.String]
salary: pa.typing.Series[pa.typing.Float64] = pa.Field(nullable=True)
title: pa.typing.Series[pa.typing.String]
comments: pa.typing.Series[pa.typing.String] = pa.Field(nullable=True)
@staticmethod
def length():
"""Known length of the data"""
return 5000
@staticmethod
def n_salary_over_150000():
"""Number of rows with salary > 150000"""
return 2384
@pytest.fixture(params=["multifile", "singlefile.parquet", "multifolder"])
def sample_data_path(request):
"""Path of a parquet dataset for testing"""
return Path(__file__).parent / "data" / "parquet" / request.param
def read_sample_dataframe():
"""Read the sample dataframe to pandas and return a cached copy"""
if not hasattr(read_sample_dataframe, "df"):
parquet_file = Path(__file__).parent / "data" / "parquet" / "singlefile.parquet"
backend = dframeio.ParquetBackend(str(parquet_file.parent))
read_sample_dataframe.df = backend.read_to_pandas(parquet_file.name)
return read_sample_dataframe.df.copy()
@pytest.fixture(scope="function")
def sample_dataframe():
"""Provide the sample dataframe"""
return read_sample_dataframe()
@pytest.fixture(scope="function")
def sample_dataframe_dict():
"""Provide the sample dataframe"""
parquet_file = Path(__file__).parent / "data" / "parquet" / "singlefile.parquet"
backend = dframeio.ParquetBackend(str(parquet_file.parent))
return backend.read_to_dict(parquet_file.name)
@pytest.mark.parametrize(
"kwargs, exception",
[
({"base_path": "/some/dir", "partitions": -1}, TypeError),
({"base_path": "/some/dir", "partitions": 2.2}, TypeError),
({"base_path": "/some/dir", "partitions": "abc"}, TypeError),
({"base_path": "/some/dir", "partitions": b"abc"}, TypeError),
({"base_path": "/some/dir", "rows_per_file": b"abc"}, TypeError),
({"base_path": "/some/dir", "rows_per_file": 1.1}, TypeError),
({"base_path": "/some/dir", "rows_per_file": -5}, ValueError),
],
)
def test_init_argchecks(kwargs, exception):
"""Challenge the argument validation of the constructor"""
with pytest.raises(exception):
dframeio.ParquetBackend(**kwargs)
def test_read_to_pandas(sample_data_path):
"""Read a sample dataset into a pandas dataframe"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_pandas_some_columns(sample_data_path):
"""Read a sample dataset into a pandas dataframe, selecting some columns"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, columns=["id", "first_name"])
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_pandas_some_rows(sample_data_path):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, row_filter="salary > 150000")
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.n_salary_over_150000()
def test_read_to_pandas_sample(sample_data_path):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, sample=10)
SampleDataSchema.to_schema().validate(df)
assert len(df) == 10
@pytest.mark.parametrize("limit", [0, 10])
def test_read_to_pandas_limit(sample_data_path, limit):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, limit=limit)
SampleDataSchema.to_schema().validate(df)
assert len(df) == limit
def test_read_to_pandas_base_path_check(sample_data_path):
"""Try if it isn't possible to read from outside the base path"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
with pytest.raises(ValueError):
backend.read_to_pandas("/tmp")
def test_read_to_dict(sample_data_path):
"""Read a sample dataset into a dictionary"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name)
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = | pd.DataFrame(df) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 22 21:28:00 2020
@author: ike
"""
import sys
import numpy as np
import pandas as pd
import os.path as op
from time import time as tt
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from ..main import tqdm
from ..utils.visualization import wait
from ..utils.pathutils import makeParent
class Trainer(object):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def __init__(self, model, dataset, weight=True):
weights = (dataset.weights if weight else ((dataset.weights * 0) + 1))
weights = torch.tensor(weights).double().to(self.device)
self.model = model.double().to(self.device)
self.dataset = dataset
self.optim = torch.optim.Adam(params=self.model.parameters(), lr=0.001)
self.decay = torch.optim.lr_scheduler.ExponentialLR(
optimizer=self.optim, gamma=0.9)
self.criterion = nn.CrossEntropyLoss(weight=weights)
self.columns = ["Mode", "Epoch", "Time", "loss", "Total"] + [
"Class {} Accuracy".format(x) for x in range(self.dataset.Cout)]
self.dfs = | pd.DataFrame(columns=self.columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy import interp
from scipy.stats import norm
from statsmodels.tsa.stattools import adfuller
def getWeights(d, size):
'''
Returns a list of coefficients to fractionally differentiate a time series.
@param d A non-negative real that represents the degree of the differentiation.
@param size The number of items .
@return A list with the fractionally differentiated coefficients.
'''
w = [1.]
for k in range(1, size):
w_ = -w[-1]/k*(d-k+1)
w.append(w_)
w = np.array(w[::-1]).reshape(-1, 1)
return w
def getWeights_FFD(d, thres):
'''
Returns a list of coefficients to fractionally differentiate a time series.
@param d A non-negative real that represents the degree of the differentiation.
@param thres The minimum absolute value that helps the stop adding items to the list of coefficients.
@return A list with the fractionally differentiated coefficients.
'''
w = [1.]
k = 1
while True:
w_ = -w[-1]/k*(d-k+1)
if abs(w_) < thres:
break
w.append(w_)
k += 1
return np.array(w[::-1]).reshape(-1, 1)
def fracDiff(series, d, thres=0.01):
'''
Applies fractionally differentiation to time series. Uses threshold to determine the minimum
value of coefficients the window will have.
@param series A time series to apply the fractionally differentiation.
@param d A non-negative real that represents the degree of the differentiation.
@param thres A threshold to omit samples below that value. When it is 1, nothing is skipped.
@return A DataFrame whose values are fractionally differentiated.
'''
# 1) Compute weights for the longest series
w = getWeights(d, series.shape[0])
# 2) Determine initial calcs to be skipped based on weight-loss threshold
w_ = np.cumsum(abs(w))
w_ /= w_[-1]
skip = w_[w_ > thres].shape[0]
# 3) Apply weights to values
df = {}
for name in series.columns:
seriesF = series[[name]].fillna(method='ffill').dropna()
df_ = | pd.Series() | pandas.Series |
#!/usr/bin/env python3
# nxG2clusters.py assembly_graph_with_scaffolds.gfa outdir
import sys
import os
import subprocess
import time
import argparse
import networkx as nx
import pandas as pd
from sklearn.preprocessing import StandardScaler
from persona.persona import CreatePersonaGraph
from persona.directed_persona import CreateDirectedPersonaGraph
from persona.persona import PersonaOverlappingClustering
from persona.flags import _CLUSTERING_FN
from persona.splitter import do_embedding
from gfa_parser import gfa_to_G
import graphs
import visualising_embedding
def parse_args():
parser = argparse.ArgumentParser(description='Clustering on graphs',
usage='{} --gfa assembly_graph_with_scaffolds.gfa '
'--friendships_reads reads_alignment.tsv '
'-k 49 --outdir clustering_out'.format(sys.argv[0]))
parser.add_argument('--clustering', '-c', dest='c_name', default='best_partition',
required=False, type=str,
help='Choose the algorithm for local and global clustering',
choices=['label_prop', 'modularity', 'connected_components',
'weakly_connected_components', 'best_partition'])
parser.add_argument('--weight', '-w', dest='w_name', default='reads_and_db',
required=False, type=str,
help='Choose the weight for clustering',
choices=['cov_diff', 'reads_and_db', 'geometric_mean', 'harmonic_mean'])
parser.add_argument('--gfa', '-g', required=True, help='Assembly graph')
parser.add_argument('--grp', required=True, help='Readable grseq format. For this use show_saves.py. Helps preserve conjugate names.')
parser.add_argument('--friendships_reads', dest='long_reads_readable_mpr', required=False,
help='Long reads aligned to assembly graph '
'(or any other confirmation of belonging to one transcript) [tsv]')
parser.add_argument('--friendships_db', dest='db_readable_mpr', required=False,
help='Reference transcripts aligned to assembly graph '
'(or any other confirmation of belonging to one transcript) [tsv]')
parser.add_argument('-k', type=int, required=True,
help='k-mer value used in assembly graph construction')
parser.add_argument('--outdir', '-o', required=True)
parser.add_argument('--filter', default=None, type=float,
help='Filter this percent of edges based on their weights')
args = parser.parse_args()
return args
def remove_regular_model(in_path, out_path):
fout = open(out_path, 'w')
with open(in_path, 'r') as fin:
for line in fin:
node = line.split()[0]
if '+_' in node or '-_' in node:
fout.write(line)
fout.close()
return out_path
def get_tst_G(G):
# path1 930004-,278546-,36185+,278990+,283130+,352975-,37703+
# path2 930004-,239212-,36185+,365256-,283130+,352975-,37703+
nodes_tst = ['36185+', '37703+', '239212-', '278546-', '278990+',
'283130+', '352975-', '365256-', '930004-', '2326645-']
G_tst = G.subgraph(nodes_tst).copy()
return G_tst
def get_total_emb(p_emb_tsv, features_tsv, persona_to_node_tsv):
# concatenate structural features (persona graph embedding)
# and node features (len, cov, A, C, G, T)
p_emb = pd.read_csv(p_emb_tsv, sep=' ', header=None, index_col=0)
features_df = pd.read_csv(features_tsv, sep=' ',
header=None, index_col=0, skiprows=1,
names=range(p_emb.shape[1], p_emb.shape[1] + 7))
# It will be helpful to convert each feature into z-scores
# (number of standard deviations from the mean) for comparability
scaled_features = StandardScaler().fit_transform(features_df.values)
scaled_features_df = | pd.DataFrame(scaled_features, index=features_df.index, columns=features_df.columns) | pandas.DataFrame |
import pylinkedcmd
from sqlalchemy import create_engine
import pandas as pd
from joblib import Parallel, delayed
import tqdm
import os
cmd_wd = pylinkedcmd.pylinkedcmd.Wikidata()
pg_user = os.environ["PG_USER"]
pg_pass = os.environ["PG_PASS"]
pg_host = os.environ["PG_HOST"]
pg_port = os.environ["PG_PORT"]
pg_db = os.environ["PG_DB"]
pg_engine = create_engine(f'postgresql://{pg_user}:{pg_pass}@{pg_host}:{pg_port}/{pg_db}')
wikidata_ids = pd.read_sql_query(
'SELECT identifier_wikidata FROM sb_usgs_staff WHERE identifier_wikidata IS NOT NULL',
pg_engine
).identifier_wikidata.to_list()
wikidata_entities = list()
wikidata_claims = list()
def accumulator_wikidata(qid):
entity_data, entity_claims = cmd_wd.entity_data(qid)
if entity_data is not None:
wikidata_entities.append(entity_data)
if entity_claims is not None:
wikidata_claims.extend(entity_claims)
Parallel(n_jobs=50, prefer="threads")(
delayed(accumulator_wikidata)
(
url
) for url in tqdm.tqdm(wikidata_ids)
)
with pg_engine.connect() as con:
con.execute("DELETE FROM wikidata_entities")
con.execute("DELETE FROM wikidata_claims")
con.execute("DELETE FROM wikidata_properties")
# con.execute("DELETE FROM wikidata_claims_entities")
con.close()
pd.DataFrame(wikidata_entities).to_sql(
"wikidata_entities",
pg_engine,
index=False,
if_exists="append",
chunksize=1000
)
| pd.DataFrame(wikidata_claims) | pandas.DataFrame |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
This file performs the renaming of all statistical variables present in the Data
Commons Knowledge Graph. Human-intelligible StatVar names are useful for end
users as they may be pulled from both the Python API or Google Sheets API by
name.
1) Base Schema: The basic schema for any human readable statistical variable is
mprop_popType_v1_v2_v3... For example, Count_Person_BornInStateOfResidence
2) Optional inclusion of StatType: statType is included when the StatType is not
measuredValue or Unknown. For example, instead of Age_Person, we output
MedianAge_Person
3) Certain data sets are blacklisted: for example, all bio data sets and a few
miscellaneous ones are excluded. This blacklist was created by tjann.
4) Dependent variables are removed. dependent variables are constraints that are
inherently included, but not really necessary. For example, a person earning
an income of 10k to 15k USD may only be measured by the US Census if they are
older than 15 and have an income. For example,
"Count_Person_Years15Onwards_IncomeOfUSDollar10000To14999_WithIncome" becomes
"Count_Person_IncomeOfUSDollar10000To14999" after accounting for the
unnecessary variables. These dependent variables are defined in the textproto
stat vars config file.
4) Boolean constraints are replaced by their populations: for example, p1 =
isInternetUser and v1=True/False becomes v1=isInternetUser/notInternetUser.
5) Measurement properties are stripped from constraints: for example,
p1 = employment and v1 = USC_Unemployed becomes v1=Unemployed
6) NAICS Industry codes are replaced by industry names: we have a combination of
NAICS specific and overview codes. In both cases, we replace the industry
code (e.g. NAICS/23) with the industry. An example statistical variable is
WagesAnnual_Establishment_NAICSConstruction
7) Cause of death properties are renamed: e.g., p1 = causeOfDeath and
v1="ICD10/E00-E89" becomes v1="EndocrineNutritionalMetabolicDiseases". These
names are generated directly from the ICD10 names stored in BigQuery.
Exceptionally long or confusing names were manually renamed.
8) DEA drug names are renamed: e.g., p1="drugPrescribed" and v1="drug/dea/9250"
become v1="Methadone". These are manually renamed. Some drug names are
intentionally left as their codes. For example, dea/7444 corresponds to
"4-Hydroxy-3-methoxy-methamphetamine", which does not have a common name.
Both the codes and drug names will be valid constraints.
9) Certain variables have text prepended or appended to their constraints to
improve readability: for example p1 = childSchoolEnrollment and
v1=EnrolledInPublicSchool is changed to v1="ChildEnrolledInPublicSchool".
These mappings are applied to ~15 variables.
10) Miscellaneous changes: a) MeasuredProp InsuredUnemploymentRate changed to
Rate_InsuredUnemployment to match the existing formula.
"""
from absl import app
from google.protobuf import text_format
from google.cloud import bigquery
from google.colab import auth
import re
import os
import pandas as pd
import numpy as np
import stat_var_renaming_constants as svrc
import stat_var_renaming_functions as svrf
# Constants
# Max total number of constraints of a variable to include (Dependent
# variables excluded).
_MAX_CONSTRAINTS = 3
_MAX_CONSTRAINTS_WITH_DPV = 6
# If true, no new statistical variables will be introduced.
_ONLY_REGENERATE_EXISTING = False
def authenticate_bq_client():
""" Authenticates and returns a BigQuery client connection. By default this
code assumes it will be run in Google Colab which handles BigQuery
authentication. To run this code elsewhere this method needs to be updated
to properly authenticate a BigQuery client.
Returns:
An authenticated SQL client with a function called query that given a SQL
query returns a response object that can be converted into a dataframe.
"""
# Users should update the authentication method if not using Google CoLab.
auth.authenticate_user()
# Create and return client.
project_id = "google.com:datcom-store-dev"
return bigquery.Client(project=project_id)
def download_stat_vars(client):
""" Queries unique list of statistical variables from BigQuery.
Creates a join across statistical populations and observations to generate
distinct list of statistical variables. Certain datasets like bio are
excluded. The original dpvs are preserved in new columns.
Args:
client: An authenticate BigQuery SQL client.
Returns:
stat_vars: Pandas dataframe containing unique information for all
potential stat vars in the database.
Raises:
Query failure: If improper authentication is given.
"""
# Dynamically create query for constraints in SQL query.
constraint_string = ""
pop_string = ""
for num in range(1, _MAX_CONSTRAINTS_WITH_DPV + 1):
constraint_string += f"SP.v{num} as v{num},\n"
pop_string += f"SP.p{num} as p{num},\n"
# Dynamically create list of blacklisted provences, as a string.
blacklist = [
'"%s"' % prov_id
for prov_id in frozenset().union(*[svrc._MISC_DATASETS,
svrc._BIO_DATASETS])
]
blacklist_str = ', '.join(blacklist) if blacklist else '""'
# Input information into SQL template and perform the query.
query_for_all_stat_vars = (svrc.QUERY_FOR_ALL_STAT_VARS.replace(
"{CONSTRAINTS}",
constraint_string).replace("{POPULATIONS}", pop_string).replace(
"{comma_sep_prov_blacklist}",
blacklist_str).replace("{MAX_CONTRAINTS}", str(_MAX_CONSTRAINTS)))
stat_vars = client.query(query_for_all_stat_vars).to_dataframe()
# Make a pristine copy of constraint names for output MCF.
for c in range(1, _MAX_CONSTRAINTS_WITH_DPV + 1):
stat_vars[f"orig_p{c}"] = stat_vars[f"p{c}"]
stat_vars[f"orig_v{c}"] = stat_vars[f"v{c}"]
stat_vars["orig_populationType"] = stat_vars['populationType']
return stat_vars
### Variable renaming scripts
def addPropertyRemapping(remapper, prop, function):
""" Helper function to add new remapping function to a certain property.
Args:
remapper: Dictionary with mapping from properties to renaming functions.
prop: Property to perform the remapping on.
function: Renaming function that takes three arguments
(prop, constraint, popType) and returns the new name for the constraint.
"""
if prop not in remapper:
remapper[prop] = []
remapper[prop].append(function)
def remap_constraint_from_prop(row, prop_remap):
""" Helper which applies property remappings to all constraints in a dataset.
Args:
row: Pandas row to apply function to.
prop_remap: Dictionary of renaming functions for each property.
"""
for constraint in range(1, min(_MAX_CONSTRAINTS_WITH_DPV, 1 + row['numConstraints'])):
prop = row[f"p{constraint}"]
if prop in prop_remap:
# May need to apply multiple functions for a single property.
remapper = prop_remap[prop]
for function in remapper:
row[f"v{constraint}"] = function(prop, row[f"v{constraint}"],
row['populationType'])
return row
def generate_dependent_constraint_list():
""" Generates a list of dependent variables.
Using an OS system call, a protobuf definition is compiled. A definition
file is then read in and used to generate a pandas dataframe of dependent
variable definitions.
Returns:
obs_spec_list: Observation for statistical variables in
protobuf object format.
"""
# Generate population observation spec. Creates a new python file.
os.system("protoc -I=. --python_out=. pop_obs_spec_common.proto")
# Load newly created protobuf class definition.
import pop_obs_spec_common_pb2
obs_spec_list = pop_obs_spec_common_pb2.PopObsSpecList()
# Load in PV list from spec proto. Note that covid cases was temporarily
# added as a DPV for display, but shouldn't truly be one.
with open("pop_obs_spec_nocovid.textproto") as f:
counts = f.read()
text_format.Parse(counts, obs_spec_list)
# Create a dataframe that matches the greater stat_vars from DB for merging.
dpvs = | pd.DataFrame() | pandas.DataFrame |
"""
SparseArray data structure
"""
from __future__ import division
import numbers
import operator
import re
from typing import Any, Callable, Union
import warnings
import numpy as np
from pandas._libs import index as libindex, lib
import pandas._libs.sparse as splib
from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
from pandas._libs.tslibs import NaT
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import (
astype_nansafe, construct_1d_arraylike_from_scalar, find_common_type,
infer_dtype_from_scalar, maybe_convert_platform)
from pandas.core.dtypes.common import (
is_array_like, is_bool_dtype, is_datetime64_any_dtype, is_dtype_equal,
is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype,
pandas_dtype)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.generic import (
ABCIndexClass, ABCSeries, ABCSparseSeries)
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
import pandas.io.formats.printing as printing
# ----------------------------------------------------------------------------
# Dtype
@register_extension_dtype
class SparseDtype(ExtensionDtype):
"""
Dtype for data stored in :class:`SparseArray`.
This dtype implements the pandas ExtensionDtype interface.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
The dtype of the underlying array storing the non-fill value values.
fill_value : scalar, optional
The scalar value not stored in the SparseArray. By default, this
depends on `dtype`.
=========== ==========
dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool ``False``
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The default value may be overridden by specifying a `fill_value`.
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
# hash(nan) is (sometimes?) 0.
_metadata = ('_dtype', '_fill_value', '_is_na_fill_value')
def __init__(self, dtype=np.float64, fill_value=None):
# type: (Union[str, np.dtype, 'ExtensionDtype', type], Any) -> None
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.dtypes.common import (
pandas_dtype, is_string_dtype, is_scalar
)
if isinstance(dtype, type(self)):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype):
dtype = np.dtype('object')
if fill_value is None:
fill_value = na_value_for_dtype(dtype)
if not | is_scalar(fill_value) | pandas.core.dtypes.common.is_scalar |
"""Crime prediction
The data is from: https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2.
"""
import os
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from scipy.stats import poisson
import strat_models
from utils import latexify
# Download data and preprocess
if not os.path.exists('data/crimes.fea'):
raw_df = pd.read_csv(
'data/crimes.csv', low_memory=False, parse_dates=["Date"])
raw_df.to_feather('data/crimes.fea')
raw_df = | pd.read_feather('data/crimes.fea') | pandas.read_feather |
"""
This tests whether the Study object was created correctly.
No computation or visualization tests yet.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from six import iteritems
from collections import Iterable
import itertools
import json
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as pdt
import pytest
import semantic_version
##############################################################################
# FIXTURES
@pytest.fixture(params=['expression', 'splicing'])
def data_type(request):
"""data_type fixture"""
return request.param
@pytest.fixture(params=[None, 'subset1'],
ids=['color_samples_by_none', 'color_samples_by_subset1'])
def color_samples_by(request, metadata_phenotype_col):
"""color_samples_by fixture"""
if request.param == 'phenotype':
return metadata_phenotype_col
else:
return request.param
class TestStudy(object):
# @pytest.fixture
# def n_groups(self):
# return 3
##########################################################################
@pytest.fixture
def study(self,
metadata_data, metadata_kws,
mapping_stats_data, mapping_stats_kws,
expression_data, expression_kws,
splicing_data, splicing_kws,
gene_ontology_data):
"""study fixture"""
from flotilla import Study
kwargs = {}
metadata = metadata_data.copy()
splicing = splicing_data.copy()
expression = expression_data.copy()
mapping_stats = mapping_stats_data.copy()
gene_ontology = gene_ontology_data.copy()
kw_pairs = (('metadata', metadata_kws),
('mapping_stats', mapping_stats_kws),
('expression', expression_kws),
('splicing', splicing_kws))
for data_type, kws in kw_pairs:
for kw_name, kw_value in iteritems(kws):
kwargs['{}_{}'.format(data_type, kw_name)] = kw_value
return Study(metadata,
mapping_stats_data=mapping_stats,
expression_data=expression,
splicing_data=splicing,
gene_ontology_data=gene_ontology,
**kwargs)
def test_init(self, metadata_data):
from flotilla import Study
metadata = metadata_data.copy()
study = Study(metadata)
metadata['outlier'] = False
true_default_sample_subsets = list(sorted(list(set(
study.metadata.sample_subsets.keys()).difference(
set(study.default_sample_subset)))))
true_default_sample_subsets.insert(0, study.default_sample_subset)
pdt.assert_frame_equal(study.metadata.data, metadata)
pdt.assert_equal(study.version, '0.1.0')
pdt.assert_equal(study.pooled, None)
pdt.assert_equal(study.technical_outliers, None)
pdt.assert_equal(study.phenotype_col, study.metadata.phenotype_col)
pdt.assert_equal(study.phenotype_order, study.metadata.phenotype_order)
pdt.assert_equal(study.phenotype_to_color,
study.metadata.phenotype_to_color)
pdt.assert_equal(study.phenotype_to_marker,
study.metadata.phenotype_to_marker)
pdt.assert_series_equal(study.sample_id_to_phenotype,
study.metadata.sample_id_to_phenotype)
pdt.assert_series_equal(study.sample_id_to_color,
study.metadata.sample_id_to_color)
pdt.assert_numpy_array_equal(study.phenotype_transitions,
study.metadata.phenotype_transitions)
pdt.assert_numpy_array_equal(study.phenotype_color_ordered,
study.metadata.phenotype_color_order)
pdt.assert_equal(study.default_sample_subset, 'all_samples')
pdt.assert_equal(study.default_feature_subset, 'variant')
pdt.assert_numpy_array_equal(study.default_sample_subsets,
true_default_sample_subsets)
pdt.assert_dict_equal(study.default_feature_subsets, {})
#########################################################################
@pytest.mark.xfail
def test_setattr(self, metadata_data):
# warnings.simplefilter("error")
from flotilla import Study
study = Study(metadata_data.copy())
study.pooled = 'asdf'
# warnings.simplefilter('default')
#########################################################################
def test_init_metdadata_kws(self, metadata_data, metadata_kws):
# Also need to check for when these are NAs
from flotilla import Study
kws = dict(('metadata_'+k, v) for k, v in metadata_kws.items())
study = Study(metadata_data, **kws)
pdt.assert_frame_equal(study.metadata.data,
metadata_data)
pdt.assert_equal(study.version, '0.1.0')
npt.assert_equal(study.pooled, None)
# npt.assert_equal(study.outliers, None)
def test_init_pooled(self, metadata_data,
metadata_kws,
pooled):
from flotilla import Study
metadata = metadata_data.copy()
kws = dict(('metadata_'+k, v) for k, v in metadata_kws.items())
metadata['pooled'] = metadata.index.isin(pooled)
study = Study(metadata, **kws)
npt.assert_array_equal(sorted(study.pooled), sorted(pooled))
def test_init_bad_pooled(self, metadata_data, metadata_kws, pooled):
from flotilla import Study
metadata = metadata_data.copy()
kws = dict(('metadata_' + k, v) for k, v in metadata_kws.items())
metadata['pooled_asdf'] = metadata.index.isin(pooled)
study = Study(metadata, **kws)
true_pooled = None
if study.metadata.pooled_col is not None:
if study.metadata.pooled_col in study.metadata.data:
try:
true_pooled = study.metadata.data.index[
study.metadata.data[
study.metadata.pooled_col].astype(bool)]
except KeyError:
true_pooled = None
npt.assert_equal(study.pooled, true_pooled)
def test_init_outlier(self, metadata_data, metadata_kws, outliers):
from flotilla import Study
metadata = metadata_data.copy()
kws = dict(('metadata_' + k, v) for k, v in metadata_kws.items())
metadata['outlier'] = metadata.index.isin(outliers)
study = Study(metadata, **kws)
npt.assert_array_equal(study.metadata.data, metadata)
def test_init_technical_outlier(self, metadata_data, metadata_kws,
technical_outliers, mapping_stats_data,
mapping_stats_kws):
from flotilla import Study
metadata = metadata_data.copy()
kw_pairs = (('metadata', metadata_kws),
('mapping_stats', mapping_stats_kws))
kwargs = {}
for name, kws in kw_pairs:
for k, v in kws.items():
kwargs['{}_{}'.format(name, k)] = v
study = Study(metadata, mapping_stats_data=mapping_stats_data,
**kwargs)
pdt.assert_numpy_array_equal(sorted(study.technical_outliers),
sorted(technical_outliers))
def test_init_expression(self, metadata_data, metadata_kws,
expression_data, expression_kws):
from flotilla import Study
metadata = metadata_data.copy()
expression = expression_data.copy()
kw_pairs = (('metadata', metadata_kws),
('expression', expression_kws))
kwargs = {}
for name, kws in kw_pairs:
for k, v in kws.items():
kwargs['{}_{}'.format(name, k)] = v
study = Study(metadata, expression_data=expression,
**kwargs)
pdt.assert_numpy_array_equal(study.expression.data_original,
expression_data)
def test_init_splicing(self, metadata_data, metadata_kws,
splicing_data, splicing_kws):
from flotilla import Study
metadata = metadata_data.copy()
splicing = splicing_data.copy()
kw_pairs = (('metadata', metadata_kws),
('splicing', splicing_kws))
kwargs = {}
for name, kws in kw_pairs:
for k, v in kws.items():
kwargs['{}_{}'.format(name, k)] = v
study = Study(metadata, splicing_data=splicing,
**kwargs)
pdt.assert_numpy_array_equal(study.splicing.data_original,
splicing_data)
def test_feature_subset_to_feature_ids(self, study, data_type,
feature_subset):
test_feature_subset = study.feature_subset_to_feature_ids(
data_type, feature_subset)
if 'expression'.startswith(data_type):
true_feature_subset = \
study.expression.feature_subset_to_feature_ids(feature_subset,
rename=False)
elif 'splicing'.startswith(data_type):
true_feature_subset = \
study.splicing.feature_subset_to_feature_ids(feature_subset,
rename=False)
pdt.assert_numpy_array_equal(test_feature_subset, true_feature_subset)
def test_sample_subset_to_sample_ids(self, study, sample_subset):
test_sample_subset = study.sample_subset_to_sample_ids(sample_subset)
try:
true_sample_subset = study.metadata.sample_subsets[sample_subset]
except (KeyError, TypeError):
try:
ind = study.metadata.sample_id_to_phenotype == sample_subset
if ind.sum() > 0:
true_sample_subset = \
study.metadata.sample_id_to_phenotype.index[ind]
else:
if sample_subset is None or 'all_samples'.startswith(
sample_subset):
sample_ind = np.ones(study.metadata.data.shape[0],
dtype=bool)
elif sample_subset.startswith("~"):
sample_ind = ~pd.Series(
study.metadata.data[sample_subset.lstrip("~")],
dtype='bool')
else:
sample_ind = pd.Series(
study.metadata.data[sample_subset], dtype='bool')
true_sample_subset = study.metadata.data.index[sample_ind]
except (AttributeError, ValueError):
true_sample_subset = sample_subset
pdt.assert_numpy_array_equal(true_sample_subset, test_sample_subset)
##########################################################################
@pytest.fixture(params=[True, False])
def multiple_genes_per_event(self, request):
"""multiple_genes_per_event fixture"""
return request.param
def test_tidy_splicing_with_expression(self, study, monkeypatch,
multiple_genes_per_event):
if multiple_genes_per_event:
df = study.splicing.feature_data.copy()
events = df.index[:5]
column = study.splicing.feature_expression_id_col
# fixed for unicode issue
# when multiple_genes_per_event == True,
# was getting this kind of value in gene_name column:
# "b'gene_1',b'gene_2'"
# df.ix[events, column] = '{},{}'.format(
# *study.expression.data.columns[:2])
df.ix[events, column] = u','.join(
study.expression.data.columns[:2])
monkeypatch.setattr(study.splicing, 'feature_data', df)
test = study.tidy_splicing_with_expression
splicing_common_id = study.splicing.feature_data[
study.splicing.feature_expression_id_col]
# Tidify splicing
splicing = study.splicing.data
splicing_index_name = study._maybe_get_axis_name(splicing, axis=0)
splicing_columns_name = study._maybe_get_axis_name(splicing, axis=1)
splicing_tidy = pd.melt(splicing.reset_index(),
id_vars=splicing_index_name,
value_name='psi',
var_name=splicing_columns_name)
s = splicing_common_id.dropna()
event_name_to_ensembl_ids = list(
itertools.chain(*[zip([k] * len(v.split(u',')), v.split(u','))
for k, v in iteritems(s)])
)
index, data = zip(*event_name_to_ensembl_ids)
event_name_to_ensembl_ids = pd.Series(data, index=index,
name=study._common_id)
rename_columns = {}
if splicing_index_name == 'index':
rename_columns[splicing_index_name] = study._sample_id
if splicing_columns_name == 'columns':
rename_columns[splicing_columns_name] = study._event_name
splicing_columns_name = study._event_name
splicing_tidy = splicing_tidy.rename(columns=rename_columns)
splicing_tidy = splicing_tidy.set_index(splicing_columns_name)
splicing_tidy = splicing_tidy.ix[event_name_to_ensembl_ids.index]
splicing_tidy = splicing_tidy.join(event_name_to_ensembl_ids)
splicing_tidy = splicing_tidy.dropna().reset_index()
splicing_tidy = splicing_tidy.rename(
columns={'index': study._event_name})
# Tidify expression
expression = study.expression.data_original
expression_index_name = study._maybe_get_axis_name(expression, axis=0)
expression_tidy = pd.melt(expression.reset_index(),
id_vars=expression_index_name,
value_name='expression',
var_name=study._common_id)
# This will only do anything if there is a column named "index" so
# no need to check anything
expression_tidy = expression_tidy.rename(
columns={'index': study._sample_id})
expression_tidy = expression_tidy.dropna()
true = splicing_tidy.merge(
expression_tidy, left_on=[study._sample_id, study._common_id],
right_on=[study._sample_id, study._common_id])
pdt.assert_frame_equal(test, true)
assert 'event_name' in test
assert 'event_name' in true
assert 'common_id' in true
assert 'common_id' in test
def test_filter_splicing_on_expression(self, study):
expression_thresh = 5
sample_subset = None
test_filtered_splicing = study.filter_splicing_on_expression(
expression_thresh)
columns = study._maybe_get_axis_name(study.splicing.data, axis=1,
alt_name=study._event_name)
index = study._maybe_get_axis_name(study.splicing.data, axis=0,
alt_name=study._sample_id)
sample_ids = study.sample_subset_to_sample_ids(sample_subset)
splicing_with_expression = \
study.tidy_splicing_with_expression.ix[
study.tidy_splicing_with_expression.sample_id.isin(
sample_ids)]
ind = splicing_with_expression.expression >= expression_thresh
splicing_high_expression = splicing_with_expression.ix[ind]
splicing_high_expression = \
splicing_high_expression.reset_index().dropna()
if isinstance(columns, list) or isinstance(index, list):
true_filtered_splicing = splicing_high_expression.pivot_table(
columns=columns, index=index, values='psi')
else:
true_filtered_splicing = splicing_high_expression.pivot(
columns=columns, index=index, values='psi')
pdt.assert_frame_equal(true_filtered_splicing, test_filtered_splicing)
def test_plot_gene(self, study):
feature_id = study.expression.data.columns[0]
study.plot_gene(feature_id)
fig = plt.gcf()
test_figsize = fig.get_size_inches()
feature_ids = [feature_id]
groupby = study.sample_id_to_phenotype
grouped = groupby.groupby(groupby)
single_violin_width = 0.5
ax_width = max(4, single_violin_width*grouped.size().shape[0])
nrows = len(feature_ids)
ncols = 1
true_figsize = ax_width * ncols, 4 * nrows
npt.assert_array_equal(true_figsize, test_figsize)
def test_plot_event(self, study):
feature_id = study.splicing.data.columns[0]
col_wrap = 4
study.plot_event(feature_id, col_wrap=col_wrap)
fig = plt.gcf()
test_figsize = fig.get_size_inches()
feature_ids = [feature_id]
groupby = study.sample_id_to_phenotype
grouped = groupby.groupby(groupby)
single_violin_width = 0.5
ax_width = max(4, single_violin_width*grouped.size().shape[0])
nrows = 1
ncols = 1
while nrows * ncols < len(feature_ids):
if ncols > col_wrap:
nrows += 1
else:
ncols += 1
true_figsize = ax_width * ncols, 4 * nrows
npt.assert_array_equal(true_figsize, test_figsize)
def test_plot_event_multiple_events_per_id(self, study):
grouped = study.splicing.feature_data.groupby(
study.splicing.feature_rename_col)
ids_with_multiple_genes = grouped.filter(lambda x: len(x) > 1)
feature_id = ids_with_multiple_genes[
study.splicing.feature_rename_col].values[0]
col_wrap = 4
study.plot_event(feature_id, col_wrap=col_wrap)
fig = plt.gcf()
test_figsize = fig.get_size_inches()
feature_ids = study.splicing.maybe_renamed_to_feature_id(feature_id)
groupby = study.sample_id_to_phenotype
grouped = groupby.groupby(groupby)
single_violin_width = 0.5
ax_width = max(4, single_violin_width*grouped.size().shape[0])
nrows = 1
ncols = 1
while nrows * ncols < len(feature_ids):
if ncols > col_wrap:
nrows += 1
else:
ncols += 1
true_figsize = ax_width * ncols, 4 * nrows
npt.assert_array_equal(true_figsize, test_figsize)
##########################################################################
@pytest.fixture(params=[True, False])
def plot_violins(self, request):
"""plot_violins fixture"""
return request.param
def test_plot_pca(self, study, data_type, plot_violins):
study.plot_pca(feature_subset='all', data_type=data_type,
plot_violins=plot_violins)
plt.close('all')
def test_plot_clustermap(self, study, data_type):
study.plot_clustermap(feature_subset='all', data_type=data_type)
plt.close('all')
def test_plot_correlations(self, study, featurewise, data_type):
study.plot_correlations(feature_subset='all', featurewise=featurewise,
data_type=data_type)
plt.close('all')
def test_plot_lavalamps(self, study):
study.plot_lavalamps()
plt.close('all')
def test_plot_two_samples(self, study, data_type):
sample1 = study.expression.data.index[0]
sample2 = study.expression.data.index[-1]
study.plot_two_samples(sample1, sample2, data_type=data_type)
def test_plot_two_features(self, study, data_type):
if data_type == 'expression':
feature1 = study.expression.data.columns[0]
feature2 = study.expression.data.columns[-1]
elif data_type == 'splicing':
feature1 = study.splicing.data.columns[0]
feature2 = study.splicing.data.columns[-1]
study.plot_two_features(feature1, feature2, data_type=data_type)
##########################################################################
@pytest.fixture(params=[None, 'gene'])
def gene_of_interest(self, request, genes):
"""gene_of_interest feature"""
if request is not None:
return genes[0]
else:
return request.param
@staticmethod
def get_data_eval_command(data_type, attribute):
if 'feature' in data_type:
# Feature data doesn't have "data_original", only "data"
if attribute == 'data_original':
attribute = 'data'
command = 'study.{}.feature_{}'.format(
data_type.split('_feature')[0], attribute)
else:
command = 'study.{}.{}'.format(data_type, attribute)
print("command :", command)
return command
def test_save(self, study, tmpdir):
from flotilla.datapackage import name_to_resource
study_name = 'test_save'
study.supplemental.expression_corr = study.expression.data.corr()
###########################################
study.save(study_name, flotilla_dir=tmpdir)
###########################################
assert len(tmpdir.listdir()) == 1
save_dir = tmpdir.listdir()[0]
with open('{}/datapackage.json'.format(save_dir)) as f:
test_datapackage = json.load(f)
assert study_name == save_dir.purebasename
# resource_keys_to_ignore = ('compression', 'format', 'path', 'url')
keys_from_study = {'splicing': [],
'expression': ['thresh',
'log_base',
'plus_one'],
'metadata': ['phenotype_order',
'phenotype_to_color',
'phenotype_col',
'phenotype_to_marker',
'pooled_col',
'minimum_samples'],
'mapping_stats': ['number_mapped_col',
'min_reads'],
'expression_feature': ['rename_col',
'ignore_subset_cols'],
'splicing_feature': ['rename_col',
'ignore_subset_cols',
'expression_id_col'],
'gene_ontology': []}
resource_names = keys_from_study.keys()
# Add auto-generated attributes into the true datapackage
for name, keys in iteritems(keys_from_study):
resource = name_to_resource(test_datapackage, name)
for key in keys:
command = self.get_data_eval_command(name, key)
test_value = resource[key]
true_value = eval(command)
if isinstance(test_value, dict):
#############################################
pdt.assert_dict_equal(test_value, true_value)
elif isinstance(test_value, Iterable):
####################################################
pdt.assert_numpy_array_equal(test_value, true_value)
for name in resource_names:
resource = name_to_resource(test_datapackage, name)
# TODO compression
# path = '{}.csv.gz'.format(name)
path = '{}.csv'.format(name)
###############################
assert resource['path'] == path
test_df = pd.read_csv(
'{}/{}/{}'.format(tmpdir, study_name, path), index_col=0
# TODO compressiom
# , compression='gzip'
)
command = self.get_data_eval_command(name, 'data_original')
true_df = eval(command)
| pdt.assert_frame_equal(test_df, true_df) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python3
#
# author: <NAME> <<EMAIL>>
# latest version: https://github.com/swo/dbotu3
from __future__ import print_function
import argparse, sys
import pandas as pd, numpy as np
import Levenshtein
from Bio import SeqIO
import scipy.stats
class OTU:
'''
Object for keeping track of an OTU's distribution and computing genetic distances
'''
def __init__(self, name, sequence, counts):
'''
name: str
OTU ID
sequence: str
OTU's nucleotide sequence
counts: numpy.Array
length of sequence should be the number of samples
'''
# make this assertion so that lists of counts don't get concatenated
self.name = name
self.sequence = sequence
self.counts = np.array(counts)
self.abundance = sum(self.counts)
def __eq__(self, other):
return self.name == other.name and self.sequence == other.sequence and all(self.counts == other.counts)
def __repr__(self):
return "OTU(name={}, sequence={}, counts={})".format(repr(self.name), repr(self.sequence), repr(self.counts))
def absorb(self, other):
'''
Add another OTU's counts to this one
other: OTU
returns: nothing
'''
self.counts += other.counts
self.abundance += other.abundance
def distance_to(self, other):
'''
Length-adjusted Levenshtein "distance" to other OTU
other: OTU
distance to this OTU
returns: float
'''
return Levenshtein.distance(self.sequence, other.sequence) / (0.5 * (len(self.sequence) + len(other.sequence)))
@staticmethod
def _D_helper(x):
'''A helper function for the _D method'''
x = np.array(x)
x = x[x > 0]
return np.sum(x * np.log(x)) - (np.sum(x) * np.log(np.sum(x)))
@classmethod
def _D(cls, x, y):
'''
Statistic for the likelihood ratio test. See docs for mathematical derivation.
'''
x = np.array(x)
y = np.array(y)
return -2.0 * (cls._D_helper(x + y) - cls._D_helper(x) - cls._D_helper(y))
@classmethod
def _distribution_test_pval(cls, x, y):
'''
P-value from the likelihood ratio test comparing the distribution of the abundances
of two taxa (x and y). See docs for explanation of the test.
'''
assert len(x) == len(y)
df = len(x) - 1
return scipy.stats.chi2.sf(cls._D(x, y), df=df)
def distribution_pval(self, other):
'''
P-value from the likelihood ratio test comparing the distribution of the abundances
of two OTU objects. See docs for explanation of the test.
other: OTU
returns: float
'''
return self._distribution_test_pval(self.counts, other.counts)
class DBCaller:
'''
Object for processing the sequence table and distance matrix into an OTU table.
'''
def __init__(self, seq_table, records, max_dist, min_fold, threshold_pval, log=None):
'''
seq_table: pandas.DataFrame
Samples on the columns; sequences on the rows
records: index of Bio.Seq
Indexed, unaligned input sequences. This could come from BioPython's
SeqIO.to_dict or SeqIO.index.
max_dist: float
genetic distance cutoff above which a sequence will not be merged into an OTU
min_fold: float
Multiply the sequence's abundance by this fold to get the minimum abundance
of an OTU for merging
threshold_pval: float
P-value below which a sequence will not be merged into an OTU
log: filehandle
Log file reporting the abundance, genetic, and distribution checks.
'''
self.seq_table = seq_table
self.records = records
self.max_dist = max_dist
self.min_fold = min_fold
self.threshold_pval = threshold_pval
self.log = log
# get a list of the names of the sequences in order of their (decreasing) abundance
self.seq_abunds = self.seq_table.sum(axis=1).sort_values(ascending=False)
# check that all sequence IDs in the table are in the fasta
missing_ids = [seq_id for seq_id in self.seq_abunds.index if seq_id not in self.records]
if len(missing_ids) > 0:
raise RuntimeError("{} sequence IDs found in the sequence table but not in the fasta: {}".format(len(missing_ids), missing_ids))
# initialize OTU information
self.membership = {}
self.otus = []
def ga_matches(self, candidate):
'''
OTUs that meet the genetic and abundance criteria
candidate: OTU
sequence to evaluate
'''
# find abundance matches
min_abundance = self.min_fold * candidate.abundance
abundance_matches = [otu for otu in self.otus if otu.abundance > min_abundance]
if self.log is not None:
print(candidate.name, 'abundance_check', *[otu.name for otu in abundance_matches], sep='\t', file=self.log)
if len(abundance_matches) == 0:
return []
else:
# find genetic matches (in order of decreasing genetic distance)
matches_distances = [(otu.distance_to(candidate), otu) for otu in abundance_matches]
matches_distances.sort(key=lambda x: (x[0], -x[1].abundance, x[1].name))
matches = [otu for dist, otu in matches_distances if dist < self.max_dist]
if self.log is not None:
print(candidate.name, 'genetic_check', *[otu.name for otu in matches], sep='\t', file=self.log)
return matches
def _process_record(self, record_id):
'''
Process the next sequence: run the genetic, abundance, and distribution checks, either
merging the sequence into an existing OTU or creating a new OTU.
'''
assert record_id in self.seq_table.index
record = self.records[record_id]
candidate = OTU(record.id, str(record.seq), self.seq_table.loc[record.id])
if self.log is not None:
print('seq', candidate.name, sep='\t', file=self.log)
merged = False
for otu in self.ga_matches(candidate):
test_pval = candidate.distribution_pval(otu)
if self.log is not None:
print(candidate.name, 'distribution_check', otu.name, test_pval, sep='\t', file=self.log)
if test_pval > self.threshold_pval:
otu.absorb(candidate)
self.membership[otu.name].append(candidate.name)
merged = True
break
if not merged:
# form own otu
self.otus.append(candidate)
self.membership[candidate.name] = [candidate.name]
def generate_otu_table(self):
'''
Process all the input sequences to make an OTU table.
returns: pandas.DataFrame
OTU table (which can also be found at instance.otu_table)
'''
for record_id in self.seq_abunds.index:
self._process_record(record_id)
self.otus.sort(key=lambda otu: otu.abundance, reverse=True)
self.otu_table = pd.DataFrame([otu.counts for otu in self.otus], index=[otu.name for otu in self.otus])
self.otu_table.columns = self.seq_table.columns
return self.otu_table
def write_otu_table(self, output):
'''
Write the QIIME-style OTU table to a file.
output: filehandle
'''
self.otu_table.to_csv(output, sep='\t')
def write_membership(self, output):
'''
Write the QIIME-style OTU mapping information to a file.
output: filehandle
'''
for otu in self.otus:
print(otu.name, *self.membership[otu.name], sep='\t', file=output)
def read_sequence_table(fn):
'''
Read in a table of sequences. Expect a header and the sequence IDs in the
first column. Samples are on the columns.
fn: filename (or handle)
returns: pandas.DataFrame
'''
df = | pd.read_table(fn, dtype={0: str}, header=0) | pandas.read_table |
# module model
import pandas as pd
from fbprophet import Prophet
import matplotlib.pyplot as plt
from sklearn import metrics, ensemble, model_selection
from sklearn.preprocessing import MinMaxScaler
from math import sqrt
import numpy as np
import datetime
from dateutil import relativedelta
import os
import io
import json
import base64
from xgboost import XGBRegressor
import tensorflow as tf
from tensorflow import keras
from statsmodels.tsa.ar_model import AutoReg
np.random.seed(42)
tf.random.set_seed(42)
def buildProphet(train_data_path, test_data_path):
print("\nBuilding Prophet model ...")
df = pd.read_csv(train_data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
y = df['RENEWABLES_PCT']
daily = y.resample('24H').mean()
dd = pd.DataFrame(daily)
dd.reset_index(inplace=True)
dd.columns = ['ds','y']
mR = Prophet(daily_seasonality=False)
mR.fit(dd)
futureR=mR.make_future_dataframe(periods=365*5)
forecastR=mR.predict(futureR)
rmse = -1.0
if len(test_data_path) > 0:
dft = pd.read_csv(test_data_path)
dft['TIMESTAMP'] = dft['TIMESTAMP'].astype('datetime64')
dft.set_index('TIMESTAMP',inplace=True)
dft_start_datetime = min(dft.index)
dft_end_datetime = max(dft.index)
actual_mean = dft['RENEWABLES_PCT'].resample('24H').mean()
predicted_mean = forecastR.loc[(forecastR['ds'] >= dft_start_datetime) & (forecastR['ds'] <= dft_end_datetime)]
predicted_mean.set_index('ds', inplace=True)
actual_mean = actual_mean[min(predicted_mean.index):]
mse = metrics.mean_squared_error(actual_mean, predicted_mean.yhat)
rmse = sqrt(mse)
print(str.format("Prophet RMSE: {:.2f}", rmse))
return rmse
def predictProphet(data_path,periods):
print("\nTraining prophet model with full dataset ...")
df = pd.read_csv(data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
y = df['RENEWABLES_PCT']
daily = y.resample('24H').mean()
dd = pd.DataFrame(daily)
dd.reset_index(inplace=True)
dd.columns = ['ds','y']
m = Prophet(daily_seasonality=False)
m.fit(dd)
future=m.make_future_dataframe(periods=periods)
print(str.format("\nPredicting with prophet model for {0} days ({1} years) ...",periods, int(periods/365)))
plt.subplot(1,1,1)
forecast=m.predict(future)
fig = m.plot(forecast,ylabel='Renewable Power Production %', xlabel='Date')
plt.suptitle('\nCA Predicted Renewable Power Production %')
#plt.title('\nCA Predicted Renewable Power Production %')
axes = plt.gca()
wd = os.path.dirname(data_path) + '/../images'
os.makedirs(wd, exist_ok=True)
fig.savefig(wd + '/prediction-prophet.png')
forecast.rename(columns={'ds':'TIMESTAMP'}, inplace=True)
forecast.set_index('TIMESTAMP',inplace=True)
prediction = pd.DataFrame({'RENEWABLES_PCT_MEAN':forecast['yhat'].resample('1Y').mean(),'RENEWABLES_PCT_LOWER':forecast['yhat_lower'].resample('1Y').mean(),'RENEWABLES_PCT_UPPER':forecast['yhat_upper'].resample('1Y').mean()})
return prediction
def rmse_calc(actual,predict):
predict = np.array(predict)
actual = np.array(actual)
distance = predict - actual
square_distance = distance ** 2
mean_square_distance = square_distance.mean()
score = np.sqrt(mean_square_distance)
return score
def transformDataset(df):
# Add pct from one and two days ago as well as difference in yesterday-1 and yesterday-1
df['YESTERDAY'] = df['RENEWABLES_PCT'].shift()
df['YESTERDAY_DIFF'] = df['YESTERDAY'].diff()
df['YESTERDAY-1']=df['YESTERDAY'].shift()
df['YESTERDAY-1_DIFF'] = df['YESTERDAY-1'].diff()
df=df.dropna()
x_train=pd.DataFrame({'YESTERDAY':df['YESTERDAY'],'YESTERDAY_DIFF':df['YESTERDAY_DIFF'],'YESTERDAY-1':df['YESTERDAY-1'],'YESTERDAY-1_DIFF':df['YESTERDAY-1_DIFF']})
y_train = df['RENEWABLES_PCT']
return x_train,y_train
def buildRandomForestRegression(train_data_path,test_data_path):
print("\nBuilding Random Forest Regression Model ...")
print("Preparing training dataset ...")
df = pd.read_csv(train_data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
df = df.resample('1M').mean()
x_train, y_train = transformDataset(df)
print("Preparing testing dataset ...")
dt = pd.read_csv(test_data_path)
dt['TIMESTAMP'] = dt['TIMESTAMP'].astype('datetime64')
dt.set_index('TIMESTAMP',inplace=True)
x_test, y_test = transformDataset(dt)
print("Searching for best regressor ...")
model = ensemble.RandomForestRegressor()
param_search = {
'n_estimators': [100],
'max_features': ['auto'],
'max_depth': [10]
}
tscv = model_selection.TimeSeriesSplit(n_splits=2)
rmse_score = metrics.make_scorer(rmse_calc, greater_is_better = False)
gsearch = model_selection.GridSearchCV(estimator=model, cv=tscv, param_grid=param_search, scoring=rmse_score)
gsearch.fit(x_train, y_train)
best_score = gsearch.best_score_
best_model = gsearch.best_estimator_
y_true = y_test.values
print("Predicting with best regressor ...")
y_pred = best_model.predict(x_test)
mse = metrics.mean_squared_error(y_true, y_pred)
rmse = sqrt(mse)
print(str.format("Random Forest Regression RMSE: {:.2f}", rmse))
return rmse
def predictRandomForestRegression(data_path,periods):
print("\nTraining Random Forest Regression model with full dataset ...")
df = pd.read_csv(data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
dfmean = df.resample('1M').mean()
dfmin = df.resample('1M').min()
dfmax = df.resample('1M').max()
x_train,y_train = transformDataset(dfmean)
xmin_train, ymin_train = transformDataset(dfmin)
xmax_train, ymax_train = transformDataset(dfmax)
model = ensemble.RandomForestRegressor()
model_min = ensemble.RandomForestRegressor()
model_max = ensemble.RandomForestRegressor()
param_search = {
'n_estimators': [100],
'max_features': ['auto'],
'max_depth': [10]
}
tscv = model_selection.TimeSeriesSplit(n_splits=2)
rmse_score = metrics.make_scorer(rmse_calc, greater_is_better = False)
gsearch = model_selection.GridSearchCV(estimator=model, cv=tscv, param_grid=param_search, scoring=rmse_score)
gsearch_min = model_selection.GridSearchCV(estimator=model_min, cv=tscv, param_grid=param_search, scoring=rmse_score)
gsearch_max = model_selection.GridSearchCV(estimator=model_max, cv=tscv, param_grid=param_search, scoring=rmse_score)
gsearch.fit(x_train, y_train)
gsearch_min.fit(xmin_train, ymin_train)
gsearch_max.fit(xmax_train, ymax_train)
best_score = gsearch.best_score_
best_model = gsearch.best_estimator_
best_model_min = gsearch_min.best_estimator_
best_model_max = gsearch_max.best_estimator_
print("\nPredicting with Random Forest regressor ...")
prediction = pd.DataFrame(columns=['TIMESTAMP','RENEWABLES_PCT'])
l = len(x_train)
x_pred = x_train.iloc[[l-1]]
y_pred = best_model.predict(x_pred)
xmin_pred = xmin_train.iloc[[l-1]]
ymin_pred = best_model_min.predict(xmin_pred)
xmax_pred = xmax_train.iloc[[l-1]]
ymax_pred = best_model_max.predict(xmax_pred)
prediction = prediction.append({'TIMESTAMP':x_pred.index[0],'RENEWABLES_PCT_MEAN':y_pred[0],'RENEWABLES_PCT_LOWER':ymin_pred[0],'RENEWABLES_PCT_UPPER':ymax_pred[0]}, ignore_index=True)
for i in range(1,periods):
ti = prediction.iloc[i-1]['TIMESTAMP'] + pd.offsets.DateOffset(months=1)
xi_pred = pd.DataFrame({'YESTERDAY':y_pred,'YESTERDAY_DIFF':y_pred-x_pred['YESTERDAY'],'YESTERDAY-1':x_pred['YESTERDAY'],'YESTERDAY-1_DIFF':x_pred['YESTERDAY_DIFF']})
yi_pred = best_model.predict(xi_pred)
xmini_pred = pd.DataFrame({'YESTERDAY':ymin_pred,'YESTERDAY_DIFF':ymin_pred-xmin_pred['YESTERDAY'],'YESTERDAY-1':xmin_pred['YESTERDAY'],'YESTERDAY-1_DIFF':xmin_pred['YESTERDAY_DIFF']})
ymini_pred = best_model.predict(xmini_pred)
xmaxi_pred = pd.DataFrame({'YESTERDAY':ymax_pred,'YESTERDAY_DIFF':ymax_pred-xmax_pred['YESTERDAY'],'YESTERDAY-1':xmax_pred['YESTERDAY'],'YESTERDAY-1_DIFF':xmax_pred['YESTERDAY_DIFF']})
ymaxi_pred = best_model.predict(xmaxi_pred)
prediction = prediction.append({'TIMESTAMP':ti,'RENEWABLES_PCT_MEAN':yi_pred[0],'RENEWABLES_PCT_LOWER':ymini_pred[0],'RENEWABLES_PCT_UPPER':ymaxi_pred[0]}, ignore_index=True)
x_pred = xi_pred
y_pred = yi_pred
xmin_pred = xmini_pred
ymin_pred = ymini_pred
xmax_pred = xmaxi_pred
ymax_pred = ymaxi_pred
prediction.set_index('TIMESTAMP',inplace=True)
prediction = prediction.resample('1Y').mean()
p = prediction.plot()
p.set_title('CA Predicted Renewables % by Random Forest Regression')
p.set_ylabel('Renewables %')
wd = os.path.dirname(data_path) + '/../images'
os.makedirs(wd, exist_ok=True)
plt.savefig(wd + '/prediction-randomforest.png')
return prediction
# transform a time series dataset into a supervised learning dataset
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols = list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
# put it all together
agg = pd.concat(cols, axis=1)
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg.values
# walk-forward validation for univariate data
def walk_forward_validation(data, n_test):
predictions = list()
# split dataset
train, test = train_test_split(data, n_test)
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the test set
for i in range(len(test)):
# split test row into input and output columns
testX, testy = test[i, :-1], test[i, -1]
# fit model on history and make a prediction
yhat = xgboost_forecast(history, testX)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# summarize progress
print('>expected=%.1f, predicted=%.1f' % (testy, yhat))
# estimate prediction error
error = model_selection.mean_absolute_error(test[:, -1], predictions)
return error, test[:, 1], predictions
# split a univariate dataset into train/test sets
def train_test_split(data, n_test):
return data[:-n_test, :], data[-n_test:, :]
# fit an xgboost model and make a one step prediction
def xgboost_forecast(train, testX):
# transform list into array
train = np.asarray(train)
# split into input and output columns
trainX, trainy = train[:, :-1], train[:, -1]
# fit model
model = XGBRegressor(objective='reg:squarederror', n_estimators=1000)
model.fit(trainX, trainy)
# make a one-step prediction
yhat = model.predict(np.asarray([testX]))
return yhat[0]
# walk-forward validation for univariate data
def walk_forward_validation(data, n_test):
predictions = list()
# split dataset
train, test = train_test_split(data, n_test)
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the test set
for i in range(len(test)):
# split test row into input and output columns
testX, testy = test[i, :-1], test[i, -1]
# fit model on history and make a prediction
yhat = xgboost_forecast(history, testX)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# summarize progress
print('>expected=%.1f, predicted=%.1f' % (testy, yhat))
# estimate prediction error
error = metrics.mean_squared_error(test[:, -1], predictions)
return error, test[:, -1], predictions
def buildXGBoostRegression(train_data_path,test_data_path):
print("\nBuilding XGBoost Regression model ...")
df = pd.read_csv(train_data_path)
dt = pd.read_csv(test_data_path)
df = df.append(dt)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
dfmean = df.resample('1Y').mean()
dmean = series_to_supervised(dfmean[['RENEWABLES_PCT']], n_in=2, n_out=1, dropnan=True)
# transform list into array
mse, y, yhat = walk_forward_validation(dmean, 8)
rmse = sqrt(mse)
print(str.format("XGBoostRegression RMSE: {:.2f}", rmse))
return rmse
def buildLSTM(train_data_path,test_data_path):
print("\nBuilding LSTM Model ...")
time_steps = 3
print("Preparing training dataset ...")
df = pd.read_csv(train_data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
df = df[['RENEWABLES_PCT']]
df = df.resample('1M').mean()
scaler = MinMaxScaler()
df = scaler.fit_transform(df)
scaling_model = scaler.fit(df)
df = scaling_model.transform(df)
daily_train = | pd.DataFrame(df, columns=['RENEWABLES_PCT']) | pandas.DataFrame |
#!/usr/bin/env python3
# combineNunlComments.py: combine comments of nu.nl stored in csv files
# usage: combineNunlComments.py file1 file2 [file3 ...]
# note: news article comments volume can both grow and shrink over time:
# this comments extracts all the comments on one article from
# different files (nontrivial because the ids are different)
# 20200915 erikt(at)xs4all.nl
import csv
import pandas as pd
import sys
ID,NAME,DATE,TEXT,PARENT = "id name date text parent".split()
def readFiles(inFileNameList):
ids = {}
commentsByKey = {}
for inFileName in inFileNameList:
try:
df = pd.read_csv(inFileName)
for i in range(0,len(df)):
thisId,name,date,text,parent = df.iloc[i]
key = " ".join([name,date,text])
if not key in commentsByKey:
commentsByKey[key] = {ID:thisId,NAME:name,DATE:date,TEXT:text,PARENT:parent}
else:
if thisId in ids and ids[thisId] != commentsByKey[key][ID]:
print(f"cannot happen! ids {thisId}",file=sys.stderr)
if parent in ids and ids[parent] != commentsByKey[key][PARENT]:
print(f"cannot happen! ids {parent}",file=sys.stderr)
ids[thisId] = commentsByKey[key][ID]
ids[parent] = commentsByKey[key][PARENT]
except:
print(f"read error for file {inFileName} (empty file?)",file=sys.stderr)
return(commentsByKey,ids)
def updateCommentIds(commentsByKey,ids):
commentsById = {}
for key in commentsByKey:
if commentsByKey[key][ID] in ids:
commentsByKey[key][ID] = ids[commentsByKey[key][ID]]
if commentsByKey[key][PARENT] in ids:
commentsByKey[key][PARENT] = ids[commentsByKey[key][PARENT]]
commentsById[commentsByKey[key][ID]] = commentsByKey[key]
return(commentsById)
def showComments(commentsById):
if len(commentsById) > 0:
| pd.DataFrame(commentsById) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 17 09:27:27 2021
@author: <NAME>
"""
import numpy as np
import pandas as pd
from warnings import simplefilter
from collections import Counter
from aif360.datasets import BinaryLabelDataset
from aif360.datasets import AdultDataset, GermanDataset, CompasDataset
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.metrics import ClassificationMetric
from aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions\
import load_preproc_data_adult, load_preproc_data_german, load_preproc_data_compas
from kmodes.kprototypes import *
from kmodes.kmodes import *
from kmodes.util.dissim import matching_dissim, ng_dissim, euclidean_dissim
from deepcopy import *
from imblearn.over_sampling import SMOTENC
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import ADASYN
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
import matplotlib.pyplot as plt
#Function to call the desired dataset with chosen features
'''dataset_used = "adult", "german", "compas"
protected_attribute_used = 1,2 (check the aif360 docs for the correct sens. attr.)
preprocessed_dataset = True, original_dataset = False'''
def aif_data(dataset_used, preprocessed):
if dataset_used == "adult":
if preprocessed == True:
dataset_orig = load_preproc_data_adult()
privileged_groups = [{'sex': 1,'race': 1}]
unprivileged_groups = [{'sex': 0,'race': 0}]
else:
dataset_orig = AdultDataset()
privileged_groups = [{'sex': 1,'race': 1}]
unprivileged_groups = [{'sex': 0,'race': 0}]
elif dataset_used == "german":
if preprocessed == True:
dataset_orig = load_preproc_data_german()
privileged_groups = [{'sex': 1,'age': 1}]
unprivileged_groups = [{'sex': 0,'age': 0}]
for i in range(1000):
if (dataset_orig.labels[i] == 2.0):
dataset_orig.labels[i] = 0
else:
dataset_orig.labels[i] = 1
dataset_orig.favorable_label = 1
dataset_orig.unfavorable_label = 0
else:
dataset_orig = GermanDataset()
privileged_groups = [{'sex': 1,'age': 1}]
unprivileged_groups = [{'sex': 0,'age': 0}]
for i in range(1000):
if (dataset_orig.labels[i] == 2.0):
dataset_orig.labels[i] = 0
else:
dataset_orig.labels[i] = 1
dataset_orig.favorable_label = 1
dataset_orig.unfavorable_label = 0
elif dataset_used == "compas":
if preprocessed == True:
dataset_orig = load_preproc_data_compas()
privileged_groups = [{'sex': 1,'race': 1}]
unprivileged_groups = [{'sex': 0,'race': 0}]
else:
dataset_orig = CompasDataset()
privileged_groups = [{'sex': 1,'race': 1}]
unprivileged_groups = [{'sex': 0,'race': 0}]
return dataset_orig, privileged_groups, unprivileged_groups
'----------------------------------------------------------------------------'
#Function to create sublabel for the datasets
def sublabel(dataset_df, sens_attr, decision_label):
sub_labels = pd.Series(dtype='object')
#sensitive attribute(s) must be given as a list
if len(sens_attr) == 2:
for i in range(len(dataset_df)):
if ((dataset_df[sens_attr[0]].iloc[i] == 0) & (dataset_df[sens_attr[1]].iloc[i]==0) & (dataset_df[decision_label].iloc[i]==0)):
sub_labels = sub_labels.append(pd.Series([0], index=[i]))
elif ((dataset_df[sens_attr[0]].iloc[i] == 0) & (dataset_df[sens_attr[1]].iloc[i]==0) & (dataset_df[decision_label].iloc[i]==1)):
sub_labels = sub_labels.append(pd.Series([1], index=[i]))
elif ((dataset_df[sens_attr[0]].iloc[i] == 1) & (dataset_df[sens_attr[1]].iloc[i]==0) & (dataset_df[decision_label].iloc[i]==0)):
sub_labels = sub_labels.append(pd.Series([2], index=[i]))
elif ((dataset_df[sens_attr[0]].iloc[i] == 1) & (dataset_df[sens_attr[1]].iloc[i]==0) & (dataset_df[decision_label].iloc[i]==1)):
sub_labels = sub_labels.append(pd.Series([3], index=[i]))
elif ((dataset_df[sens_attr[0]].iloc[i] == 0) & (dataset_df[sens_attr[1]].iloc[i]==1) & (dataset_df[decision_label].iloc[i]==0)):
sub_labels = sub_labels.append(pd.Series([4], index=[i]))
elif ((dataset_df[sens_attr[0]].iloc[i] == 0) & (dataset_df[sens_attr[1]].iloc[i]==1) & (dataset_df[decision_label].iloc[i]==1)):
sub_labels = sub_labels.append(pd.Series([5], index=[i]))
elif ((dataset_df[sens_attr[0]].iloc[i] == 1) & (dataset_df[sens_attr[1]].iloc[i]==1) & (dataset_df[decision_label].iloc[i]==0)):
sub_labels = sub_labels.append(pd.Series([6], index=[i]))
elif ((dataset_df[sens_attr[0]].iloc[i] == 1) & (dataset_df[sens_attr[1]].iloc[i]==1) & (dataset_df[decision_label].iloc[i]==1)):
sub_labels = sub_labels.append(pd.Series([7], index=[i]))
#if there is a single binary attribute
elif len(sens_attr) == 1:
for i in range(len(dataset_df)):
if ((dataset_df[sens_attr[0]].iloc[i] == 0) & (dataset_df[decision_label].iloc[i]==0)):
sub_labels = sub_labels.append(pd.Series([0], index=[i]))
elif ((dataset_df[sens_attr[0]].iloc[i] == 0) & (dataset_df[decision_label].iloc[i]==1)):
sub_labels = sub_labels.append(pd.Series([1], index=[i]))
elif ((dataset_df[sens_attr[0]].iloc[i] == 1) & (dataset_df[decision_label].iloc[i]==0)):
sub_labels = sub_labels.append(pd.Series([2], index=[i]))
elif ((dataset_df[sens_attr[0]].iloc[i] == 1) & (dataset_df[decision_label].iloc[i]==1)):
sub_labels = sub_labels.append( | pd.Series([3], index=[i]) | pandas.Series |
#!/usr/bin/env python3
import sys
from rdkit import Chem
from rdkit.Chem.Descriptors import MolWt, MolLogP, NumHDonors, NumHAcceptors, TPSA
from rdkit.Chem.rdMolDescriptors import CalcNumRotatableBonds
import multiprocessing as mp
from multiprocessing import Pool
import time
import pandas as pd
import os
import json
from docopt import docopt
import pkg_resources
cmd_str = """Usage:
rd_filters filter --in INPUT_FILE --prefix PREFIX [--rules RULES_FILE_NAME] [--alerts ALERT_FILE_NAME][--np NUM_CORES]
rd_filters template --out TEMPLATE_FILE [--rules RULES_FILE_NAME]
Options:
--in INPUT_FILE input file name
--prefix PREFIX prefix for output file names
--rules RULES_FILE_NAME name of the rules JSON file
--alerts ALERTS_FILE_NAME name of the structural alerts file
--np NUM_CORES the number of cpu cores to use (default is all)
--out TEMPLATE_FILE parameter template file name
"""
def read_rules(rules_file_name):
"""
Read rules from a JSON file
:param rules_file_name: JSON file name
:return: dictionary corresponding to the contents of the JSON file
"""
with open(rules_file_name) as json_file:
try:
rules_dict = json.load(json_file)
return rules_dict
except json.JSONDecodeError:
print(f"Error parsing JSON file {rules_file_name}")
sys.exit(1)
def write_rules(rule_dict, file_name):
"""
Write configuration to a JSON file
:param rule_dict: dictionary with rules
:param file_name: JSON file name
:return: None
"""
ofs = open(file_name, "w")
ofs.write(json.dumps(rule_dict, indent=4, sort_keys=True))
print(f"Wrote rules to {file_name}")
ofs.close()
def default_rule_template(alert_list, file_name):
"""
Build a default rules template
:param alert_list: list of alert set names
:param file_name: output file name
:return: None
"""
default_rule_dict = {
"MW": [0, 500],
"LogP": [-5, 5],
"HBD": [0, 5],
"HBA": [0, 10],
"TPSA": [0, 200],
"Rot": [0, 10]
}
for rule_name in alert_list:
if rule_name == "Inpharmatica":
default_rule_dict["Rule_" + rule_name] = True
else:
default_rule_dict["Rule_" + rule_name] = False
write_rules(default_rule_dict, file_name)
def get_config_file(file_name, environment_variable):
"""
Read a configuration file, first look for the file, if you can't find
it there, look in the directory pointed to by environment_variable
:param file_name: the configuration file
:param environment_variable: the environment variable
:return: the file name or file_path if it exists otherwise exit
"""
if os.path.exists(file_name):
return file_name
else:
config_dir = os.environ.get(environment_variable)
if config_dir:
config_file_path = os.path.join(os.path.sep, config_dir, file_name)
if os.path.exists(config_file_path):
return config_file_path
error_list = [f"Could not file {file_name}"]
if config_dir:
err_str = f"Could not find {config_file_path} based on the {environment_variable}" + \
"environment variable"
error_list.append(err_str)
error_list.append(f"Please check {file_name} exists")
error_list.append(f"Or in the directory pointed to by the {environment_variable} environment variable")
print("\n".join(error_list))
sys.exit(1)
class RDFilters:
def __init__(self, rules_file_name):
good_name = get_config_file(rules_file_name, "FILTER_RULES_DIR")
self.rule_df = | pd.read_csv(good_name) | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import yfinance as yf
yf.pdr_override()
import datetime as dt
symbol = 'AMD'
market = 'SPY'
num_of_years = 1
start = dt.date.today() - dt.timedelta(days=365*num_of_years)
end = dt.date.today()
dataset = yf.download(symbol,start,end)
benchmark = yf.download(market,start,end)
dataset['Returns'] = dataset['Adj Close'].pct_change().dropna()
PP = pd.Series((dataset['High'] + dataset['Low'] + dataset['Close']) / 3)
R1 = pd.Series(2 * PP - dataset['Low'])
S1 = pd.Series(2 * PP - dataset['High'])
R2 = pd.Series(PP + dataset['High'] - dataset['Low'])
S2 = pd.Series(PP - dataset['High'] + dataset['Low'])
R3 = pd.Series(dataset['High'] + 2 * (PP - dataset['Low']))
S3 = pd.Series(dataset['Low'] - 2 * (dataset['High'] - PP))
R4 = pd.Series(dataset['High'] + 3 * (PP - dataset['Low']))
S4 = pd.Series(dataset['Low'] - 3 * (dataset['High'] - PP))
R5 = pd.Series(dataset['High'] + 4 * (PP - dataset['Low']))
S5 = pd.Series(dataset['Low'] - 4 * (dataset['High'] - PP))
P = pd.Series((dataset['Open'] + (dataset['High'] + dataset['Low'] + dataset['Close'])) / 4) # Opening Price Formula
psr = {'P':P, 'R1':R1, 'S1':S1, 'R2':R2, 'S2':S2, 'R3':R3, 'S3':S3,'R4':R4, 'S4':S4,'R5':R5, 'S5':S5}
PSR = pd.DataFrame(psr)
dataset = dataset.join(PSR)
print(dataset.head())
pivot_point = pd.concat([dataset['Adj Close'],P,R1,S1,R2,S2,R3,S3],axis=1).plot(figsize=(18,12),grid=True)
plt.title('Stock Pivot Point')
plt.legend(['Price','P','R1','S1','R2','S2','R3','S3'], loc=0)
plt.show()
dataset['Adj Close']['2018-05-01':'2018-06-01']
date_range = dataset[['Adj Close','P','R1','S1','R2','S2','R3','S3']]['2018-05-01':'2018-06-01']# Pick Date Ranges
P = pd.Series((dataset['High'] + dataset['Low'] + 2*dataset['Close']) / 4)
R1 = pd.Series(2 * P - dataset['Low'])
S1 = pd.Series(2 * P - dataset['High'])
R2 = pd.Series(P + dataset['High'] - dataset['Low'])
S2 = pd.Series(P - dataset['High'] + dataset['Low'])
wpp = {'P':P, 'R1':R1, 'S1':S1, 'R2':R2, 'S2':S2}
WPP = pd.DataFrame(wpp)
print(WPP.head())
R1 = pd.Series((dataset['High'] - dataset['Low']) * 1.1 / (2+dataset['Close']))
R2 = pd.Series((dataset['High'] - dataset['Low']) * 1.1 / (4+dataset['Close']))
R3 = | pd.Series((dataset['High'] - dataset['Low']) * 1.1 / (6+dataset['Close'])) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
"""
Modelling: build reviewers clusters with K-means method
"""
#################################### Dependencies ##############################
import pandas as pd
import ast
import numpy as np
from sklearn.cluster import KMeans
from matplotlib.pyplot import cm
# import Elbow method visualizer
from yellowbrick.cluster import KElbowVisualizer
#from kmodes.kmodes import KModes
from sklearn.metrics import pairwise_distances_argmin_min
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
from collections import Counter
import pickle
import seaborn as sns
##################################### Functions ##############################
def get_unique_elements(column_name:str) -> list:
""" Extract unique elements from a column as a list
"""
c=data_to_cluster[column_name].values.tolist()
cuisines_list=[]
for i in range(len(c)):
item=ast.literal_eval(c[i])
for j in range(len(item)):
cuisines_list.append(item[j][0])
c_s=set(cuisines_list)
cuisines=list(c_s)
return cuisines
def map_count_list(x,element) -> int:
""" Counts the number of occurence of a specific element within the list of cuisines"""
x_list=ast.literal_eval(str(x))
value_count=0
for item in x_list:
if item[0]==element:
value_count=item[1]
return value_count
def total_tags(x) -> int:
"""Sum the total number of tags within a list of cuisines"""
x_list=ast.literal_eval(str(x))
total_tags=0
for el in x_list:
total_tags=total_tags+el[1]
return total_tags
def kmeans(points,n_clusters):
"""
Cluster observations with K-means method
returns labels and fitted model
"""
# create kmeans object
kmeans = KMeans(n_clusters=n_clusters)
# fit kmeans object to data
kmeans.fit(points)
# print location of clusters learned by kmeans object
print(kmeans.cluster_centers_)
# save new clusters for chart
y_km = kmeans.fit_predict(points)
print('Clusters partition: ', Counter(y_km))
return y_km, kmeans
def plot_clusters_cuisines(i,cuisine_countries_clusters):
""" Visual analysis of the clusters with types of cuisine
"""
df= | pd.DataFrame(group_by_cluster.iloc[i,:]) | pandas.DataFrame |
from probatus.metric_volatility import BaseVolatilityEstimator, TrainTestVolatility, SplitSeedVolatility,\
BootstrappedVolatility, get_metric, sample_data, check_sampling_input
from sklearn.tree import DecisionTreeClassifier
import pytest
import numpy as np
import pandas as pd
from unittest.mock import patch
from probatus.stat_tests.distribution_statistics import DistributionStatistics
from probatus.utils import Scorer, NotFittedError
import os
import matplotlib.pyplot as plt
import matplotlib
# Turn off interactive mode in plots
plt.ioff()
matplotlib.use('Agg')
@pytest.fixture(scope='function')
def X_array():
return np.array([[2, 1], [3, 2], [4, 3], [1, 2], [1, 1]])
@pytest.fixture(scope='function')
def y_list():
return [1, 0, 0, 1, 1]
@pytest.fixture(scope='function')
def y_array(y_list):
return np.array(y_list)
@pytest.fixture(scope='function')
def X_df(X_array):
return pd.DataFrame(X_array, columns=['c1', 'c2'])
@pytest.fixture(scope='function')
def y_series(y_list):
return pd.Series(y_list)
@pytest.fixture(scope='function')
def iteration_results():
iterations_cols = ['metric_name', 'train_score', 'test_score', 'delta_score']
return pd.DataFrame([['roc_auc', 0.8, 0.7, 0.1], ['roc_auc', 0.7, 0.6, 0.1], ['roc_auc', 0.9, 0.8, 0.1],
['accuracy', 1, 0.9, 0.1], ['accuracy', 0.8, 0.7, 0.1], ['accuracy', 0.9, 0.8, 0.1]],
columns=iterations_cols)
@pytest.fixture(scope='function')
def report():
report_cols = ['train_mean', 'train_std', 'test_mean', 'test_std', 'delta_mean', 'delta_std']
report_index = ['roc_auc', 'accuracy']
return pd.DataFrame([[0.8, 0.08164, 0.7, 0.08164, 0.1, 0],
[0.9, 0.08164, 0.8, 0.08164, 0.1, 0]], columns=report_cols, index=report_index).astype(float)
@pytest.fixture(scope='function')
def iterations_train():
return pd.Series([0.8, 0.7, 0.9], name='train_score')
@pytest.fixture(scope='function')
def iterations_test():
return pd.Series([0.7, 0.6, 0.8], name='test_score')
@pytest.fixture(scope='function')
def iterations_delta():
return pd.Series([0.1, 0.1, 0.1], name='delta_score')
def test_inits(mock_model):
vol1 = SplitSeedVolatility(mock_model, scoring=['accuracy', 'roc_auc'], test_prc=0.3, n_jobs=2,
stats_tests_to_apply=['ES', 'KS'], random_state=1, iterations=20)
assert id(vol1.clf) == id(mock_model)
assert vol1.test_prc == 0.3
assert vol1.n_jobs == 2
assert vol1.stats_tests_to_apply == ['ES', 'KS']
assert vol1.random_state == 1
assert vol1.iterations == 20
assert len(vol1.stats_tests_objects) == 2
assert len(vol1.scorers) == 2
assert vol1.sample_train_test_split_seed is True
vol2 = BootstrappedVolatility(mock_model, scoring='roc_auc', stats_tests_to_apply='KS', test_sampling_fraction=0.8)
assert id(vol2.clf) == id(mock_model)
assert vol2.stats_tests_to_apply == ['KS']
assert len(vol2.stats_tests_objects) == 1
assert len(vol2.scorers) == 1
assert vol2.sample_train_test_split_seed is False
assert vol2.test_sampling_fraction == 0.8
assert vol2.fitted is False
assert vol2.iterations_results is None
assert vol2.report is None
def test_base_fit(mock_model, X_df, y_series):
vol = BaseVolatilityEstimator(mock_model, random_state=1)
with patch('numpy.random.seed') as mock_seed:
vol.fit(X_df, y_series)
mock_seed.assert_called_with(1)
assert vol.iterations_results is None
assert vol.report is None
assert vol.fitted is True
def test_compute(report, mock_model):
vol = BaseVolatilityEstimator(mock_model)
with pytest.raises(NotFittedError):
vol.compute()
vol.fit()
with pytest.raises(ValueError):
vol.compute()
vol.report = report
pd.testing.assert_frame_equal(vol.compute(), report)
pd.testing.assert_frame_equal(vol.compute(metrics=['roc_auc']), report.loc[['roc_auc']])
pd.testing.assert_frame_equal(vol.compute(metrics='roc_auc'), report.loc[['roc_auc']])
def test_plot(report, mock_model, iterations_train, iterations_test, iterations_delta):
with patch.object(BaseVolatilityEstimator, 'compute', return_value=report.loc[['roc_auc']]) as mock_compute:
with patch.object(BaseVolatilityEstimator, '_get_samples_to_plot',
return_value=(iterations_train, iterations_test, iterations_delta)) as mock_get_samples:
vol = BaseVolatilityEstimator(mock_model)
vol.fitted = True
vol.plot(metrics='roc_auc')
mock_compute.assert_called_with(metrics='roc_auc')
mock_get_samples.assert_called_with(metric_name='roc_auc')
def test_get_samples_to_plot(mock_model, iteration_results, iterations_train, iterations_test, iterations_delta):
vol = BaseVolatilityEstimator(mock_model)
vol.fitted = True
vol.iterations_results=iteration_results
train, test, delta = vol._get_samples_to_plot(metric_name='roc_auc')
pd.testing.assert_series_equal(train, iterations_train)
pd.testing.assert_series_equal(test, iterations_test)
pd.testing.assert_series_equal(delta, iterations_delta)
def test_create_report(mock_model, iteration_results, report):
vol = BaseVolatilityEstimator(mock_model)
vol.fitted = True
vol.iterations_results = iteration_results
vol._create_report()
pd.testing.assert_frame_equal(vol.report, report, check_less_precise=3)
def test_compute_mean_std_from_runs(mock_model, iteration_results):
vol = BaseVolatilityEstimator(mock_model)
results = vol._compute_mean_std_from_runs(iteration_results[iteration_results['metric_name'] == 'roc_auc'])
expected_results = [0.8, 0.08164, 0.7, 0.08164, 0.1, 0]
for idx, item in enumerate(results):
assert pytest.approx(item, 0.01) == expected_results[idx]
def test_compute_stats_tests_values(mock_model, iteration_results):
vol = BaseVolatilityEstimator(mock_model, stats_tests_to_apply=['KS'])
with patch.object(DistributionStatistics, 'compute', return_value=(0.1, 0.05)):
stats = vol._compute_stats_tests_values(iteration_results)
assert stats[0] == 0.1
assert stats[1] == 0.05
def test_fit_compute(mock_model, report, X_df, y_series):
vol = BaseVolatilityEstimator(mock_model)
with patch.object(BaseVolatilityEstimator, 'fit') as mock_fit:
with patch.object(BaseVolatilityEstimator, 'compute', return_value=report) as mock_compute:
result = vol.fit_compute(X_df, y_series)
mock_fit.assert_called_with(X_df, y_series)
mock_compute.assert_called_with()
pd.testing.assert_frame_equal(result, report)
def test_fit_train_test_sample_seed(mock_model, X_df, y_series, iteration_results):
vol = TrainTestVolatility(mock_model, scoring='roc_auc', iterations=3, sample_train_test_split_seed=True)
with patch.object(BaseVolatilityEstimator, 'fit') as mock_base_fit:
with patch.object(TrainTestVolatility, '_create_report') as mock_create_report:
with patch('probatus.metric_volatility.volatility.get_metric', side_effect=[iteration_results.iloc[[0]], iteration_results.iloc[[1]], iteration_results.iloc[[2]]]):
vol.fit(X_df, y_series)
mock_base_fit.assert_called_once()
mock_create_report.assert_called_once()
pd.testing.assert_frame_equal(vol.iterations_results, iteration_results.iloc[[0, 1, 2]])
def test_get_metric(mock_model, X_df, y_series):
split_seed = 1
test_prc = 0.6
with patch('probatus.metric_volatility.metric.train_test_split',
return_value=(X_df.iloc[[0, 1, 2]], X_df.iloc[[3, 4]], y_series.iloc[[0, 1, 2]],
y_series.iloc[[3, 4]])) as mock_split:
with patch('probatus.metric_volatility.metric.sample_data',
side_effect=[(X_df.iloc[[0, 1, 1]], y_series.iloc[[0, 1, 1]]),
(X_df.iloc[[3, 3]], y_series.iloc[[3, 3]])]) as mock_sample:
with patch.object(Scorer, 'score', side_effect=[0.8, 0.7]):
output = get_metric(X_df, y_series, mock_model, test_size=test_prc, split_seed=split_seed,
scorers=[Scorer('roc_auc')], train_sampling_type='bootstrap',
test_sampling_type='bootstrap', train_sampling_fraction=1,
test_sampling_fraction=1)
mock_split.assert_called_once()
mock_sample.assert_called()
mock_model.fit.assert_called()
expected_output = pd.DataFrame([['roc_auc', 0.8, 0.7, 0.1]],
columns=['metric_name', 'train_score', 'test_score', 'delta_score'])
pd.testing.assert_frame_equal(expected_output, output)
def test_sample_data_no_sampling(X_df, y_series):
with patch('probatus.metric_volatility.utils.check_sampling_input') as mock_sampling_input:
X_out, y_out = sample_data(X_df, y_series, sampling_type=None, sampling_fraction=1)
mock_sampling_input.assert_called_once()
pd.testing.assert_frame_equal(X_out, X_df)
pd.testing.assert_series_equal(y_out, y_series)
def test_sample_data_bootstrap(X_df, y_series):
with patch('probatus.metric_volatility.utils.check_sampling_input') as mock_sampling_input:
X_out, y_out = sample_data(X_df, y_series, sampling_type='bootstrap', sampling_fraction=0.8)
mock_sampling_input.assert_called_once()
assert X_out.shape == (4, 2)
assert y_out.shape == (4, )
def test_sample_data_sample(X_df, y_series):
with patch('probatus.metric_volatility.utils.check_sampling_input') as mock_sampling_input:
X_out, y_out = sample_data(X_df, y_series, sampling_type='subsample', sampling_fraction=1)
mock_sampling_input.assert_called_once()
pd.testing.assert_frame_equal(X_out, X_df)
| pd.testing.assert_series_equal(y_out, y_series) | pandas.testing.assert_series_equal |
import os
import wget
import yfinance as yf
from sqlalchemy import create_engine
import pandas_market_calendars as cal
import pandas as pd
from tqdm import tqdm
from arctic import Arctic
def chunks(l, n):
"""
Yield successive n-sized chunks from a list l.
https://stackoverflow.com/a/312464/4549682
"""
for i in range(0, len(l), n):
yield l[i:i + n]
class downloader():
def __init__(self, stocks=['QQQ', 'TQQQ', 'SQQQ'], db='sqlite', storage_dir=None, db_file='stock_data.sqlite'):
self.db = db
if storage_dir is None:
home_dir = os.path.expanduser("~")
self.storage_dir = os.path.join(home_dir, '.yfinance_data')
if not os.path.exists(self.storage_dir):
os.makedirs(self.storage_dir)
else:
self.storage_dir = storage_dir
if db == 'sqlite':
self.db_file = db_file
# 4 slashes for absolute path: https://docs.sqlalchemy.org/en/13/core/engines.html#sqlite
self.e = create_engine('sqlite:///{}/{}'.format(self.storage_dir, self.db_file))
self.con = self.e.connect()
else:
self.e = None
self.con = None
if db == 'arctic':
self.store = Arctic('localhost')
self.store.initialize_library('yfinance_stockdata')
self.library = self.store['yfinance_stockdata']
self.stocks = stocks
def get_stock_groups(self):
"""
gets latest downloaded dates for all stocks in list;
groups stocks by start date for multithreaded download
"""
print('getting start dates for existing data...')
start_dates = {}
if self.db == 'sqlite':
if self.e.dialect.has_table(self.e, 'data'):
for s in tqdm(self.stocks):
res = self.con.execute('select max(Date) from data where ticker = "{}"'.format(s))
date = pd.to_datetime(res.fetchone()[0])
start_dates[s] = pd.NaT
if date is not None:
start_dates[s] = date
else:
for s in self.stocks:
start_dates[s] = pd.NaT
elif self.db == 'arctic':
symbols_in_lib = set(self.library.list_symbols())
for s in self.stocks:
if s in symbols_in_lib:
item = self.library.read(s)
df = item.data
if df.shape[0] == 0:
start_dates[s] = pd.NaT
else:
start_dates[s] = df.index.max()
else:
start_dates[s] = pd.NaT
# group by start dates so we can multithread download
start_date_df = pd.DataFrame(data={'ticker': list(start_dates.keys()), 'start_date': list(start_dates.values())})
unique_dates = start_date_df['start_date'].dt.date.unique()
groups = []
for udate in unique_dates:
if pd.isnull(udate):
tickers = start_date_df[pd.isnull(start_date_df['start_date'])]['ticker'].tolist()
else:
tickers = start_date_df[start_date_df['start_date'] == pd.Timestamp(udate)]['ticker'].tolist()
groups.append(tickers)
return unique_dates, groups
def download_stock_data(self):
unique_dates, groups = self.get_stock_groups()
today = pd.to_datetime(pd.datetime.utcnow()).tz_localize('UTC')
ndq = cal.get_calendar('NASDAQ')
sched = ndq.schedule(start_date='01-01-1900', end_date=today)
end_date = today
# if before the close today, use yesterday as last day
if today < sched.iloc[-1]['market_close']:
end_date = sched.iloc[-2]['market_close']
for start, grp in zip(unique_dates, groups):
if pd.isnull(start) or start < today.date() and start != end_date.date():
# start is non-inclusive, end is inclusive
if pd.isnull(start):
start = None
data = yf.download(grp, auto_adjust=True, rounding=False, start=start, end=end_date)
dfs = []
for s in grp:
try:
df = data.xs(s, level=1, axis=1).copy()
except AttributeError:
df = data.copy()
if 'Adj Close' in df.columns:
df.drop(columns='Adj Close', inplace=True)
# on error some dfs have 0 rows, but adj close column...ignore these
if df.shape[0] == 0:
continue
df.dropna(inplace=True)
if self.db in ['sqlite']:
df['ticker'] = s
dfs.append(df)
elif self.db == 'arctic':
if start is None:
self.library.write(s, df)
else:
self.library.append(s, df)
# store data in sql
if self.db in ['sqlite']:
print('writing data to sql db...')
# write to db in chunks to avoid crashing on full memory
# only seem to be able to write about 30k rows at once with sqlite3
for c in tqdm(chunks(dfs, 100)):
full_df = pd.concat(dfs, axis=0)
full_df.to_sql(name='data', con=self.con, if_exists='append', index_label='date', method='multi', chunksize=30000)
else:
print('Stock group up to date, not downloading.')
def get_stocklists(self):
link = 'ftp://ftp.nasdaqtrader.com/symboldirectory/{}.txt'
for l in ['nasdaqlisted', 'otherlisted']:
filename = '{}.txt'.format(l)
if os.path.exists(filename):
os.remove(filename)
wget.download(link.format(l))
ndq = | pd.read_csv('nasdaqlisted.txt', sep='|') | pandas.read_csv |
import numpy as np
from sklearn import linear_model
import pandas as pd
import logging
from scipy import signal
reg = linear_model.LinearRegression(fit_intercept=True)
import numpy as np
from sklearn import linear_model
reg = linear_model.LinearRegression(fit_intercept=True)
import pandas as pd
from scipy import signal
def make_relatinoal_data_struture():
path_save = '/mnt/368AE7F88AE7B313/Files_Programming/Git/ads_covid-19-sem/data/processed/COVID_relational_confirmed.csv'
data_path = '/mnt/368AE7F88AE7B313/Files_Programming/Git/ads_covid-19-sem/data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
pd_raw = pd.read_csv(data_path).copy()
pd_data_base = pd_raw.rename(columns={'Country/Region': 'country', 'Province/State': 'state'})
pd_data_base = pd_data_base.drop(['Lat', 'Long'], axis=1)
test_pd = pd_data_base.set_index(['state', 'country']).T
pd_relational_model = test_pd.stack(level=[0, 1]).reset_index().rename(columns={'level_0': 'date', 0: 'confirmed'})
pd_relational_model['date'] = pd_relational_model.date.astype('datetime64[ns]')
logging.info("The dates are {}".format(pd_relational_model.date))
pd_relational_model.to_csv(path_save, sep=';')
def get_doubling_time_via_regression(in_array):
''' Use a linear regression to approximate the doubling rate
Parameters:
----------
in_array : pandas.series
Returns:
----------
Doubling rate: double
'''
y = np.array(in_array)
X = np.arange(-1, 2).reshape(-1, 1)
assert len(in_array) == 3
reg.fit(X, y)
intercept = reg.intercept_
slope = reg.coef_
return intercept / slope
def rolling_reg(df_input, col='confirmed'):
''' Rolling Regression to approximate the doubling time'
Parameters:
----------
df_input: pd.DataFrame
col: str
defines the used column
Returns:
----------
result: pd.DataFrame
'''
days_back = 3
result = df_input[col].rolling(
window=days_back,
min_periods=days_back).apply(get_doubling_time_via_regression, raw=False)
return result
def calc_doubling_rate(df_input, filter_on='confirmed'):
''' Calculate approximated doubling rate and return merged data frame
Parameters:
----------
df_input: pd.DataFrame
filter_on: str
defines the used column
Returns:
----------
df_output: pd.DataFrame
the result will be joined as a new column on the input data frame
'''
must_contain = set(['state', 'country', filter_on])
assert must_contain.issubset(set(df_input.columns)), ' Erro in calc_filtered_data not all columns in data frame'
pd_DR_result = df_input[['state','country',filter_on]].groupby(['state', 'country']).apply(rolling_reg, filter_on).reset_index()
pd_DR_result = pd_DR_result.rename(columns={filter_on: filter_on +'_doubling_rate',
'level_2': 'index'})
df_input = df_input.reset_index()
df_output = pd.merge(df_input, pd_DR_result[['index', filter_on + '_doubling_rate']], on=['index'], how='left')
logging.warning("Doubling rate for {} calculated".format(filter_on))
return df_output
def savgol_filter(df_input, column='confirmed', window=5):
''' Savgol Filter which can be used in groupby apply function (data structure kept)
parameters:
----------
df_input : pandas.series
column : str
window : int
used data points to calculate the filter result
Returns:
----------
df_result: pd.DataFrame
the index of the df_input has to be preserved in result
'''
degree = 1
df_result = df_input
filter_in = df_input[column].fillna(0) # attention with the neutral element here
result = signal.savgol_filter(np.array(filter_in),
window, # window size used for filtering
1)
df_result[column + '_filtered'] = result
return df_result
def calc_filtered_data(df_input, filter_on='confirmed'):
''' Calculate savgol filter and return merged data frame
Parameters:
----------
df_input: pd.DataFrame
filter_on: str
defines the used column
Returns:
----------
df_output: pd.DataFrame
the result will be joined as a new column on the input data frame
'''
must_contain = set(['state', 'country', filter_on])
assert must_contain.issubset(set(df_input.columns)), ' Erro in calc_filtered_data not all columns in data frame'
pd_filtered_result = df_input[['state', 'country', filter_on]].groupby(['state', 'country']).apply(
savgol_filter).reset_index()
df_output = pd.merge(df_input, pd_filtered_result[['index', filter_on + '_filtered']], on=['index'], how='left')
logging.warning("Doubling rate for {} calculated".format(filter_on))
return df_output
def main():
path_main = '/mnt/368AE7F88AE7B313/Files_Programming/Git/ads_covid-19-sem/data/processed/COVID_relational_confirmed.csv'
make_relatinoal_data_struture()
pd_JH_data = | pd.read_csv(path_main, sep=';', parse_dates=[0]) | pandas.read_csv |
import streamlit as st
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
import plotly.express as px
#in this one I'm letting people see all of the items for a portal. So they pic that, the data is filtered
#and then you get a chart with all of the items
def comparebar():
# Add histogram data
df = pd.read_csv("https://raw.githubusercontent.com/tyrin/info-topo-dash/master/data/freshdata.csv")
#define variables that the customer will input
portal=""
sdf=df.sort_values(by='Portal')
site= sdf['Portal'].unique()
portal = st.sidebar.multiselect(
'Portal:', site)
message = st.empty()
if len(portal) == 0:
message.text("Select a portal")
#filter the data by the selected portal
if len(portal) > 0:
dff = df.loc[df['Portal'].isin(portal)]
dfs = dff.sort_values(by='Group')
#group = dfs['Group'].unique()
# convert the 'Date' column to datetime format
dfs['Date']= | pd.to_datetime(df['Date']) | pandas.to_datetime |
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import svm
import math
from sklearn.metrics import f1_score
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
#get the training dataset
get_ipython().system('wget -O loan_train.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_train.csv')
#Some visualisation code
#df['loan_status'].value_counts()
#df.groupby(['Gender'])['loan_status'].value_counts(normalize=True)
#df.groupby(['education'])['loan_status'].value_counts(normalize=True)
#load the training dataset
df = pd.read_csv('loan_train.csv')
#pre-process the data
df['due_date'] = pd.to_datetime(df['due_date'])
df['effective_date'] = pd.to_datetime(df['effective_date'])
df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
#define Feature set
Feature = df[['Principal','terms','age','Gender']]
Feature = pd.concat([Feature,pd.get_dummies(df['education'])], axis=1)
Feature.drop(['Master or Above'], axis = 1,inplace=True)
#create feature set and labels
x = Feature
x[0:5]
y = df['loan_status'].values
y[0:5]
#create two dictionaries to hold the evaluation metrics
trainscores={}
testscores={}
#split the given training dataset
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test=train_test_split(x,y)
#normalize the data
x= preprocessing.StandardScaler().fit(x).transform(x.astype(float))
#start training and testing on the dataset
#KNN
#run the loop to find the best k
Ks = int(math.sqrt(x.shape[0]))
j_acc = np.zeros((Ks-1))
f_acc = np.zeros((Ks-1))
#ConfustionMx = [];
for n in range(1,Ks):
#Train Model and Predict
neigh = KNeighborsClassifier(n_neighbors = k).fit(x_train,y_train)
#predict
y_hat=neigh.predict(x_test)
#jaccard similarity score
j_acc[n-1] = jaccard_similarity_score(y_test, y_hat)
#f1score
f_acc[n-1]=f1_score(y_test, y_hat, average='weighted',labels=np.unique(y_hat))
trainscores['KNN-jaccard']=f_acc.max()
trainscores['KNN-F1']=j_acc.max()
#Decision Tree
drugTree = DecisionTreeClassifier(criterion="entropy", max_depth = 4)
drugTree.fit(x_train,y_train)
y__hat = drugTree.predict(x_test)
trainscores['Decision tree-Jaccard']= jaccard_similarity_score(y_test, y__hat)
trainscores['Decision tree- f1 score']=f1_score(y_test, y__hat, average='weighted', labels=np.unique(y__hat))
#Support Vector Machine
clf = svm.SVC(kernel='rbf', gamma="auto")
clf.fit(x_train, y_train)
y___hat = clf.predict(x_test)
trainscores['svm -jaccard']=jaccard_similarity_score(y_test, y___hat)
trainscores['svm-f1 score']=f1_score(y_test, y___hat, average='weighted', labels=np.unique(y___hat))
#Logistic Regression
LR = LogisticRegression(C=0.000001, solver='saga').fit(x_train,y_train)
y____hat = LR.predict(x_test)
yhat_prob = LR.predict_proba(x_test)
trainscores['logistic regr- jaccard']=jaccard_similarity_score(y_test, y_hat)
trainscores['logistic regr- f1 score']=f1_score(y_test, y_hat, average='weighted', labels=np.unique(y_hat))
trainscores['logistic regr- log loss']=log_loss(y_test, yhat_prob)
#####################################################
#Out-of-sample prediction and model evaluation
#####################################################
#get the test dataset for prediction and evaluation of the models previously trained
get_ipython().system('wget -O loan_test.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_test.csv')
#Prepare Test set for evaluation
test_df = pd.read_csv('loan_test.csv')
test_df.head()
test_df['due_date']=pd.to_datetime(test_df['due_date'])
test_df['effective_date']= | pd.to_datetime(test_df['effective_date']) | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Mike
# @Contact : <EMAIL>
# @Time : 2020/1/6 22:46
# @File : cross_feature.py
"""
top100的特征强制相除交叉
"""
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import numpy as np
cross_feature_num = 100
def LGB_test(train_x, train_y, test_x, test_y, cate_col=None):
if cate_col:
data = pd.concat([train_x, test_x])
for fea in cate_col:
data[fea] = data[fea].fillna('-1')
data[fea] = LabelEncoder().fit_transform(data[fea].apply(str))
train_x = data[:len(train_x)]
test_x = data[len(train_x):]
print("LGB test")
clf = lgb.LGBMClassifier(
boosting_type='gbdt', num_leaves=31, reg_alpha=0.0, reg_lambda=1,
max_depth=-1, n_estimators=3000, objective='binary',
subsample=0.7, colsample_bytree=0.7, subsample_freq=1, # colsample_bylevel=0.7,
learning_rate=0.01, min_child_weight=25, random_state=2018, n_jobs=50
)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y), (test_x, test_y)], early_stopping_rounds=100)
feature_importances = sorted(zip(train_x.columns, clf.feature_importances_), key=lambda x: x[1])
return clf.best_score_['valid_1']['binary_logloss'], feature_importances
def off_test_split(org, cate_col=None):
data = org[org.is_trade > -1]
data = data.drop(
['hour48', 'hour', 'user_id', 'query1', 'query',
'instance_id', 'item_property_list', 'context_id', 'context_timestamp', 'predict_category_property'], axis=1)
data['item_category_list'] = LabelEncoder().fit_transform(data['item_category_list'])
y = data.pop('is_trade')
train_x, test_x, train_y, test_y = train_test_split(data, y, test_size=0.15, random_state=2018)
train_x.drop('day', axis=1, inplace=True)
test_x.drop('day', axis=1, inplace=True)
score = LGB_test(train_x, train_y, test_x, test_y, cate_col)
return score[1]
def LGB_predict(data, file):
data = data.drop(['hour48', 'hour', 'user_id', 'shop_id', 'query1', 'query',
'item_property_list', 'context_id', 'context_timestamp', 'predict_category_property'], axis=1)
data['item_category_list'] = LabelEncoder().fit_transform(data['item_category_list'])
train = data[data['is_trade'] > -1]
predict = data[data['is_trade'] == -2]
res = predict[['instance_id']]
train_y = train.pop('is_trade')
train_x = train.drop(['day', 'instance_id'], axis=1)
test_x = predict.drop(['day', 'instance_id', 'is_trade'], axis=1)
clf = lgb.LGBMClassifier(
boosting_type='gbdt', num_leaves=31, reg_alpha=0.0, reg_lambda=1,
max_depth=-1, n_estimators=3000, objective='binary',
subsample=0.7, colsample_bytree=0.7, subsample_freq=1, # colsample_bylevel=0.7,
learning_rate=0.01, min_child_weight=25, random_state=2018, n_jobs=50
)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y)])
res['predicted_score'] = clf.predict_proba(test_x)[:, 1]
testb = pd.read_csv('../data/round2_ijcai_18_test_b_20180510.txt', sep=' ')[['instance_id']]
res = pd.merge(testb, res, on='instance_id', how='left')
res[['instance_id', 'predicted_score']].to_csv('../submit/' + file + '.txt', sep=' ', index=False)
def add(f1, f2):
for i in f2:
f1 = pd.merge(f1, i, on='instance_id', how='left')
return f1
if __name__ == '__main__':
org = pd.read_csv('../data/origion_concat.csv')
train = org[org.day == 7]
query = pd.read_csv('../data/query_all.csv')
leak = pd.read_csv('../data/leak_all.csv')
comp = pd.read_csv('../data/compare_all.csv')
day6_cvr = pd.read_csv('../data/6day_cvr_feature.csv')
days7_cvr = pd.read_csv('../data/7days_cvr_feature.csv')
day6_rank = pd.read_csv('../data/rank_feature_6day.csv')
days7_rank = pd.read_csv('../data/rank_feature_7days.csv')
nobuy = pd.read_csv('../data/nobuy_feature.csv')
trend = pd.read_csv('../data/trend_feature.csv')
trend = trend[[i for i in trend.columns if 'cnt6' not in i]]
var = pd.read_csv('../data/item_shop_var_feature.csv')
user_buy_click = pd.read_csv('../data/user_buy_click_feature.csv') # need proc caterory feature
property = | pd.read_csv('../data/property_feature.csv') | pandas.read_csv |
import sys
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.font_manager import FontProperties
from statsmodels.tsa import stattools
from statsmodels.graphics import tsaplots
from statsmodels.tsa.arima_model import ARIMA
import arch.unitroot as unitroot
class Aqt001(object):
def __init__(self):
self.name = 'Aqt001'
# 数据文件格式:编号 日期 星期几 开盘价 最高价
# 最低价 收益价 收益
# Indexcd Trddt Daywk Opnindex Hiindex
# Loindex Clsindex Retindex
self.data_file = 'data/pqb/aqt001_001.txt'
def startup(self):
print('ARMA模型...')
#self.simulate_ar2()
#self.simulate_arima_p_d_q()
self.arima_demo()
#self.adf_demo()
def simulate_ar2(self):
print('模拟AR(2)')
alpha1 = 0.666
alpha2 = -0.333
wt = np.random.standard_normal(size=1000)
x = wt
for t in range(2, len(wt)):
x[t] = alpha1 * x[t-1] + alpha2 * x[t-2] + wt[t]
plt.plot(x, c='b')
plt.show()
ar2 = stattools.ARMA(x, (2, 0)).fit(disp=False)
print('p={0} **** {1}; q={2}***{3}; {4} - {5} - {6}'.format(
ar2.k_ar, ar2.arparams, ar2.k_ma, ar2.maparams,
ar2.aic, ar2.bic, ar2.hqic)
)
arima2_0_0 = ARIMA(x, order=(2, 0, 0)).fit(disp=False)
print('ARIMA: p={0} **** {1}; q={2}***{3}; {4} - {5} - {6}'. \
format(arima2_0_0.k_ar, arima2_0_0.arparams,
arima2_0_0.k_ma, arima2_0_0.maparams,
arima2_0_0.aic, arima2_0_0.bic,
arima2_0_0.hqic)
)
resid = arima2_0_0.resid
# 绘制ACF
acfs = stattools.acf(resid)
print(acfs)
tsaplots.plot_acf(resid, use_vlines=True, lags=30)
plt.title('ACF figure')
plt.show()
pacfs = stattools.pacf(resid)
print(pacfs)
tsaplots.plot_pacf(resid, use_vlines=True, lags=30)
plt.title('PACF figure')
plt.show()
def simulate_arima_p_d_q(self):
print('模拟ARIMA(p,d,q)过程')
np.random.seed(8)
alpha1 = 1.2
alpha2 = -0.7
beta1 = -0.06
beta2 = -0.02
w = np.random.standard_normal(size=1000)
x = w
for t in range(2, len(w)):
x[t] = alpha1 * x[t-1] + alpha2*x[t-2] + w[t] + beta1 * w[t-1] + beta2*w[t-2]
plt.plot(x, c='b')
plt.title('ARIMA(p, d, q) Figure')
plt.show()
# 查看ACF
acfs = stattools.acf(x)
print('ARIMA(q,d,q) ACFS:\r\n{0}'.format(acfs))
tsaplots.plot_acf(x, use_vlines=True, lags=30)
plt.title('ARIMA(p,d,q) ACF')
plt.show()
# ARIMA拟合
min_ABQIC = sys.float_info.max
arima_model = None
break_loop = False
'''
for p in range(0, 5):
if break_loop:
break
for q in range(0, 5):
print('try {0}, d, {1}...'.format(p, q))
try:
arima_p_d_q = ARIMA(x, order=(p, 0, q)).fit(disp=False)
print('..... fit ok')
if arima_p_d_q.aic < min_ABQIC:
print('..... record good model')
min_ABQIC = arima_p_d_q.aic
arima_model = arima_p_d_q
#if 1==p and 1==q:
# break_loop = True
except Exception as ex:
print('.....!!!!!! Exception')
print('ARIMA: p={0} **** {1}; q={2}***{3}; {4} - {5} - {6}'. \
format(arima_model.k_ar, arima_model.arparams,
arima_model.k_ma, arima_model.maparams,
arima_model.aic, arima_model.bic,
arima_model.hqic)
)
'''
arima_model = ARIMA(x, order=(2, 0, 2)).fit(disp=False)
print('God_View:ARIMA: p={0} **** {1}; q={2}***{3}; {4} - {5} - {6}'. \
format(arima_model.k_ar, arima_model.arparams,
arima_model.k_ma, arima_model.maparams,
arima_model.aic, arima_model.bic,
arima_model.hqic)
)
resid = arima_model.resid
# 绘制ACF
acfs = stattools.acf(resid)
print(acfs)
tsaplots.plot_acf(resid, use_vlines=True, lags=30)
plt.title('ARIMA(p,d,q) ACF figure')
plt.show()
pacfs = stattools.pacf(resid)
print(pacfs)
tsaplots.plot_pacf(resid, use_vlines=True, lags=30)
plt.title('ARIMA(p,d,q) PACF figure')
plt.show()
def arima_demo(self):
register_matplotlib_converters()
data = | pd.read_csv(self.data_file, sep='\t', index_col='Trddt') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Using kmeans to create a bag of visual words for SVM to predict
object recognition in images.
By: <NAME>, <NAME>, and <NAME>.
MLDM Master's Year 2
Fall Semester 2017
"""
import pymysql
import pymysql.cursors
import json
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
import time
import pandas as pd
import os
#%%
def load_objects_desc(n_objects):
# Connect to the database.
conn = pymysql.connect(db='images_db', user='mldm_gangster',
passwd='<PASSWORD>', port=3306,
host='mldm-cv-project.cnpjv4qug6jj.us-east-2.rds.amazonaws.com')
#conn = pymysql.connect(db='images_db', user='root', passwd='', host='localhost')
# Query
sql_get_descriptors = "SELECT * FROM desc_obj d INNER JOIN objects o \
ON d.id_obj = o.ID_object;"
#specify number of objects to load
#n_objects = 100000
#Load data
with conn.cursor() as cursor:
# Execute sql reques-
cursor.execute(sql_get_descriptors)
conn.commit()
#iterators and stoarge lists
it = 0
list_conca=[]
obj_name=[]
obj_id=[]
#Read data from query
for row in cursor:
if it < n_objects:
json_desc = json.loads(row[2])
list_conca = list_conca + json_desc['sift']
print('check1')
obj_name = obj_name + np.repeat(row[5],len(
json_desc['sift'])).tolist()
print('check2')
obj_id = obj_id + np.repeat([json_desc['obj']],len(
json_desc['sift'])).tolist()
print("Passage n°",it)
it+=1
else:
break
#Combine into df
#df_conca = pd.concat([pd.Series(list_conca),pd.Series(obj_name),
# pd.Series(obj_id)],axis=1)
return list_conca, obj_id, obj_name
#%%
#Find optimal number of principle components
def pca_test(pca_conca):
#Create table to store PCA values
pca_results = pd.DataFrame(columns=['evr','n_comp'])
#load pca and run
for n in range(64):
pca = PCA(n_components=n)
pca.fit(pca_conca)
df = pd.DataFrame.from_dict({"evr":[pca.explained_variance_ratio_.sum()],
"n_comp":[n]})
pca_results = pd.concat([pca_results,df])
print(n)
print(pca.explained_variance_ratio_.sum())
return pca_results
#%%
def kmeans_test(pca_conca, low_k, high_k, step_k):
#Table to store kmean reults
kmeans_results = pd.DataFrame(columns=['k','obj_name','distortion','run_time'])
#distortions = []
centroids = {}
#Run kmeans for each object
for obj in pca_conca.obj_name.unique():
#Only cluster descriptors from this object
df = pca_conca[pca_conca.obj_name==obj].drop(['obj_name'],axis=1)
print("subset taken")
#iterate through number of k
for k in range(low_k,high_k,step_k):
#for k in [50]:
#Track run time
start=time.time()
kmeans = KMeans(n_clusters=k).fit(df)
end=time.time()
print(obj + ':complete')
print(end-start)
results_df = pd.DataFrame.from_dict({"k":[k],"run_time":[(end-start)],
"distortion":[sum(np.min(cdist(df, kmeans.cluster_centers_,
'euclidean'),axis=1))/pca_conca.shape[1]],"obj_name":[obj]})
kmeans_results = pd.concat([kmeans_results,results_df])
centroids[obj] = kmeans.cluster_centers_
#Total time
print('Total run time:')
print(kmeans_results.run_time.sum())
return kmeans_results, centroids
''''
#Write results files to csv's
pca_results.to_csv('pca_results.csv')
kmeans_results.to_csv('kmeans_results.csv')
#Centroids
for keys, values in centroids.items():
file = 'kmeans_centroids' + str(keys)+'.csv'
df = pd.DataFrame(values)
df.to_csv(file)
kmeans_results = pd.read_csv('kmeans_results.csv')
'''
def plot_kmeans_results(kmeans_results):
# Plot the elbow
for i in kmeans_results.obj_name.unique():
k = kmeans_results[kmeans_results.obj_name == i].k
distortions = kmeans_results[kmeans_results.obj_name == i].distortion
plt.plot(k, distortions, 'bx-')
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title(i)
plt.show()
#run kmeans for each object
def kmeans_per_object(pca_conca,k):
#Table to store kmean reults
kmeans_results = pd.DataFrame(columns=['k','obj_name','distortion','run_time'])
#distortions = []
centroids = {}
#Run kmeans for each object
for obj in pca_conca.obj_name.unique():
#Only cluster descriptors from this object
df = pca_conca[pca_conca.obj_name==obj].drop(['obj_name'],axis=1)
print("subset taken")
#Track run time
start=time.time()
#create and fit kmeans
kmeans = KMeans(n_clusters=k).fit(df)
end=time.time()
print(obj + ':complete')
print(end-start)
results_df = pd.DataFrame.from_dict({"k":[k],"run_time":[(end-start)],
"distortion":[sum(np.min(cdist(df, kmeans.cluster_centers_,
'euclidean'),axis=1))/pca_conca.shape[1]],"obj_name":[obj]})
#save results
kmeans_results = pd.concat([kmeans_results,results_df])
centroids[obj] = kmeans.cluster_centers_
return kmeans_results, centroids
#%%
#Load centroid results for each object
def load_clusters(folder):
#folder="C:/Users/schwi/Google Drive/MLDM/Computer Vision Project/results"
centroids = {}
for filename in os.listdir(folder):
if filename != 'desktop.ini':
#Import image
cent = pd.DataFrame.from_csv(filename)
obj = filename[16:].replace('.csv','')
centroids[obj] = cent
return centroids
#%%
def combine_clusters(centroids):
#Combine all object centroids into one df
centroid_total = pd.DataFrame(columns=list(range(len(centroids['aeroplane']))))
for keys, values in centroids.items():
centroid_total = pd.concat([centroid_total, | pd.DataFrame(values) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import json, time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import timedelta
from scipy.signal import find_peaks
from scipy.special import expit
def mergeData(rssi, velo, disr):
rssi = rssi.loc[:, ['DateTime', 'PositionNoLeap', 'Latitude', 'Longitude',
'A1_ValidTel', 'A2_ValidTel', 'A2_RSSI']]
rssi.rename(columns={'PositionNoLeap':'Position'}, inplace=True)
# deltas
rssi['RSSI'] = rssi.A2_RSSI
rssi['deltaValidTel'] = (rssi.A1_ValidTel + rssi.A2_ValidTel).diff()
rssi.loc[0, 'deltaValidTel'] = 0
rssi.loc[rssi.deltaValidTel > 11, 'deltaValidTel'] = 5
rssi.loc[rssi.deltaValidTel < 0, 'deltaValidTel'] = 0
rssi.drop(['A2_RSSI', 'A1_ValidTel', 'A2_ValidTel'],
axis='columns',
inplace=True)
# import velocities
velo = velo.drop(['EmergencyStopLimit', 'ID'], axis='columns')
velo.rename(columns={'CurrentVelocity': 'Velocity'}, inplace=True)
velo = velo.loc[velo.Velocity!=0]
# disruptions
disr.loc[disr["DisruptionCode"]==960862267, ["Description"]] = "Zwangsbremse wurde aktiviert"
disr.loc[disr["DisruptionCode"]==960862258, ["Description"]] = "Keine Linienleitertelegramme empfangen"
disr["disr_connection"] = False
disr.loc[disr.DisruptionCode == 960862258, "disr_connection"] = True
# merge datasets
df = pd.merge(rssi, velo, on='DateTime', how='inner')
df = pd.merge(df, disr.loc[disr.disr_connection==True,['DateTime', 'disr_connection']].drop_duplicates(),
on='DateTime', how='outer', sort=True)
df.loc[df.disr_connection.isna(), 'disr_connection'] = False
df.fillna(method='pad', inplace=True)
df.fillna(0, inplace=True)
# create path chunks
nChunks = 300
chunkSize =(df.Position.max()-df.Position.min()) // 300
df["posChunk"] = (df.Position-df.Position.min())+1
df.posChunk = (df.posChunk//chunkSize).astype(int)
# create time chunks
# get signed speed
df["deltaS"] = pd.to_datetime(df.DateTime).diff().dt.total_seconds()
df.loc[df.deltaS.isna(), 'deltaS']=0
df["Position_D"] = df.Position.diff()/10000 / df.deltaS * 3600
df["Position_D"] = df.Position_D.rolling(window=300).mean()
# get direction
df["Direction"] = 0
df.loc[df.Position_D > 0, 'Direction'] = 1
df.loc[df.Position_D < 0, 'Direction'] = -1
df['TimeChunk'] = np.nan
df.loc[df.Direction.diff() != 0, 'TimeChunk'] = np.arange((df.Direction.diff() != 0).sum())
df.TimeChunk.fillna(method='pad', inplace=True)
df.TimeChunk.fillna(0, inplace=True)
# print("Number of time chunks: ", (df.Direction.diff() != 0).sum())
return df
# %%
deltaDays = 2
rssi = pd.read_csv("data/rssi.csv")
rssi_dt = pd.to_datetime(rssi.DateTime)
finalTime = rssi_dt.iloc[-1]
rssi_dt = finalTime - rssi_dt
velo = pd.read_csv("data/velocities.csv")
velo_dt = | pd.to_datetime(velo.DateTime) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 20:48:26 2018
@author: elcok
"""
import os
import sys
import numpy as np
import pandas as pd
import geopandas as gpd
import subprocess
import rasterio as rio
from rasterio.mask import mask
from shapely.geometry import mapping
from osgeo import ogr
from shapely.wkb import loads
from rasterstats import point_query
from itertools import product
sys.path.append(os.path.join( '..'))
from scripts.utils import load_config,get_num,int2date
from sklearn import metrics
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
pd.set_option('chained_assignment',None)
def region_exposure(region,include_storms=True,event_set=False,sens_analysis_storms=[],save=True):
"""Create a GeoDataframe with exposure and hazard information for each building in the specified region.
Arguments:
*region* (string) -- NUTS3 code of region to consider.
*include_storms* (bool) -- if set to False, it will only return a list of buildings and their characteristics (default: **True**)
*event_set* (bool) -- if set to True, we will calculate the exposure for the event set instead of the historical storms (default: **True**)
*sens_analysis_storms* (list) -- if empty, it will fill with default list
*save* (bool) -- boolean to decide whether you want to save the output to a csv file (default: **True**)
Returns:
*GeoDataFrame* with all **hazard** and **exposure** values.
"""
country = region[:2]
data_path = load_config()['paths']['data']
osm_path = os.path.join(data_path,'OSM','{}.osm.pbf'.format(country))
area_poly = os.path.join(data_path,country,'NUTS3_POLY','{}.poly'.format(region))
area_pbf = os.path.join(data_path,country,'NUTS3_OSM','{}.osm.pbf'.format(region))
if (region == 'UKN01') | (region == 'UKN02') | (region == 'UKN03') | (region == 'UKN04') | (region == 'UKN05'):
osm_path = os.path.join(data_path,'OSM','IE.osm.pbf')
clip_osm(data_path,osm_path,area_poly,area_pbf)
gdf_table = fetch_buildings(data_path,country,region,regional=True)
print ('Fetched all buildings from osm data for {}'.format(region))
# convert to european coordinate system for overlap
gdf_table = gdf_table.to_crs(epsg=3035)
print(len(gdf_table))
# Specify Country
gdf_table["COUNTRY"] = country
# give unique_id
gdf_table['ID_'] = [str(x)+'_'+region for x in gdf_table.index]
# Calculate area
gdf_table["AREA_m2"] = gdf_table.geometry.area
# Determine centroid
gdf_table["centroid"] = gdf_table.geometry.centroid
nuts_eu = gpd.read_file(os.path.join(data_path,'input_data','NUTS3_ETRS.shp'))
nuts_eu.loc[nuts_eu['NUTS_ID']==region].to_file(os.path.join(data_path,
country,'NUTS3_SHAPE','{}.shp'.format(region)))
# create geometry envelope outline for rasterstats. Use a buffer to make sure all buildings are in there.
geoms = [mapping(nuts_eu.loc[nuts_eu['NUTS_ID']==region].geometry.envelope.buffer(10000).values[0])]
# Get land use values
with rio.open(os.path.join(data_path,'input_data','g100_clc12_V18_5.tif')) as src:
out_image, out_transform = mask(src, geoms, crop=True)
out_image = out_image[0,:,:]
tqdm.pandas(desc='CLC_2012_'+region)
gdf_table['CLC_2012'] = gdf_table.centroid.progress_apply(lambda x: get_raster_value(x,out_image,out_transform))
# Obtain storm values for sensitivity analysis storms
if len(sens_analysis_storms) > 0:
storm_list = load_sens_analysis_storms(sens_analysis_storms)
for outrast_storm in storm_list:
storm_name = str(int2date(get_num(outrast_storm[-23:].split('_')[0][:-2])))
tqdm.pandas(desc=storm_name+'_'+region)
with rio.open(outrast_storm) as src:
out_image, out_transform = mask(src, geoms, crop=True)
out_image = out_image[0,:,:]
gdf_table[storm_name] = gdf_table.centroid.progress_apply(lambda x: get_raster_value(x,out_image,out_transform))
# Obtain storm values for historical storms
elif (include_storms == True) & (event_set == False):
storm_list = get_storm_list(data_path)
for outrast_storm in storm_list:
storm_name = str(int2date(get_num(outrast_storm[-23:].split('_')[0][:-2])))
tqdm.pandas(desc=storm_name+'_'+region)
with rio.open(outrast_storm) as src:
out_image, out_transform = mask(src, geoms, crop=True)
out_image = out_image[0,:,:]
gdf_table[storm_name] = gdf_table.centroid.progress_apply(lambda x: get_raster_value(x,out_image,out_transform))
gdf_table[storm_name].loc[gdf_table[storm_name] < 0] = 0
gdf_table[storm_name].loc[gdf_table[storm_name] > 500] = 0
# Obtain storm values for event set storms
elif (include_storms == True) & (event_set == True):
#geoms = [mapping(nuts_eu.loc[nuts_eu['NUTS_ID']==region].to_crs({'init': 'epsg:4326'}).geometry.envelope.buffer(0.1).values[0])]
storm_list = get_event_storm_list(data_path)[:10]
for outrast_storm in tqdm(storm_list,total=len(storm_list),desc=region):
storm_name = str(int2date(get_num(outrast_storm[-24:].split('_')[0][:-4])))
with rio.open(outrast_storm) as src:
out_image = src.read(1)
out_transform = src.transform
gdf_table[storm_name] = gdf_table.centroid.apply(lambda x: get_raster_value(x,out_image,out_transform))
if save == True:
df_exposure = pd.DataFrame(gdf_table)
df_exposure.to_csv(os.path.join(data_path,'output_exposure',country,'{}_exposure.csv'.format(region)))
print ('Obtained all storm information for {}'.format(region))
return gdf_table
def region_losses(region,storm_event_set=False,sample=(5, 0,95,20,80)):
"""Estimation of the losses for all buildings in a region for a pre-defined list of storms.
Arguments:
*region* (string) -- nuts code of region to consider.
*storm_event_set* (bool) -- calculates all regions within a country parallel. Set to **False** if you have little capacity on the machine (default: **True**).
*sample* (tuple) -- tuple of parameter values. This is a dummy placeholder, should be filled with either **load_sample(country)** values or **sens_analysis_param_list**.
Returns:
*pandas Dataframe* -- pandas dataframe with all buildings of the region and their **losses** for each wind storm.
"""
data_path = load_config()['paths']['data']
country = region[:2]
#load storms
if storm_event_set == False:
storm_list = get_storm_list(data_path)
storm_name_list = [str(int2date(get_num(x[-23:].split('_')[0][:-2]))) for x in storm_list]
else:
storm_list = get_event_storm_list(data_path)
storm_name_list = [str(int2date(get_num(x[-24:].split('_')[0][:-4]))) for x in storm_list]
#load max dam
max_dam = load_max_dam(data_path)
#load curves
curves = load_curves(data_path)
output_table = region_exposure(region,include_storms=True,event_set=storm_event_set)
no_storm_columns = list(set(output_table.columns).difference(list(storm_name_list)))
write_output = pd.DataFrame(output_table[no_storm_columns])
## Calculate losses for buildings in this NUTS region
for storm in storm_name_list:
write_output[storm] = loss_calculation(storm,country,output_table,max_dam,curves,sample)
df_losses = pd.DataFrame(write_output)
## save this regional file
if storm_event_set == False:
df_losses.to_csv(os.path.join(data_path,'output_losses',country,'{}_losses.csv'.format(region)))
print ('Finished with loss calculation for {}'.format(region))
return(gpd.GeoDataFrame(write_output))
else:
#Numpify data
pdZ = np.array(df_losses[storm_name_list],dtype=int)
write_output.drop(storm_name_list, axis=1, inplace=True)
output_ =[]
for row in pdZ:
H,X1 = np.histogram(row, bins = 100, normed = True )
dx = X1[1] - X1[0]
F1 = np.cumsum(np.append(0,H))*dx
output_.append(metrics.auc(X1, F1))
df_losses['Risk'] = output_
df_losses.to_csv(os.path.join(data_path,'output_risk',country,'{}_risk.csv'.format(region)))
print ('Finished with risk calculation for {}'.format(region))
return(gpd.GeoDataFrame(write_output))
def region_sens_analysis(region,samples,sens_analysis_storms=[],save=True):
"""Perform a sensitivity analysis for the specified region, based on a predefined list of storms.
Arguments:
*region* (string) -- nuts code of region to consider.
*samples* (list) -- list o tuples, where each tuple is a **unique** set of parameter values.
*sens_analysis_storms* (list) -- if empty, it will fill with default list
*save* (bool) -- boolean to decide whether you want to save the output to a csv file (default: **True**)
Returns:
*list* -- list with the total losses per storm for all parameter combinations
"""
data_path = load_config()['paths']['data']
country = region[:2]
# select storms to assess
if len(sens_analysis_storms) == 0:
sens_analysis_storms = ['19991203','19900125','20090124','20070118','19991226']
storm_list = sens_analysis_storms
all_combis = list(product(samples,storm_list))
#load max dam
max_dam = load_max_dam(data_path)
#load curves
curves = load_curves(data_path)
# get exposure table
output_table = region_exposure(region,include_storms=True,event_set=False,sens_analysis_storms=storm_list,save=True)
# calculate losses for all combinations
output_file = pd.DataFrame(index=list(range(len(samples))),columns=sens_analysis_storms)
for iter_,(sample,storm) in enumerate(all_combis):
output_file.loc[iter_,storm] = list(loss_calculation(storm,country,output_table,max_dam,curves,sample).sum())[0]
if save == True:
output_file.to_csv(os.path.join(data_path,'output_sens','{}_sens_analysis'.format(region)))
return(output_file)
def loss_calculation(storm,country,output_table,max_dam,curves,sample):
"""Calculate the losses per storm.
Arguments:
*storm* (string) -- date of the storm.
*region* (string) -- NUTS3 code of region to consider.
*output_table* (GeoDataFrame) -- GeoDataFrame with all buildings and the wind speed values for each storm.
*max_dam* (numpy array) -- table with maximum damages per building type/land-use class.
*curves* (pandas dataframe) -- fragility curves for the different building types.
*sample* (list) -- ratios of different curves used in this study. See the **Sensitivity analysis documentation** for an explanation.
Returns:
*pandas Series* -- losses to all buildings for the specified storm
"""
max_dam_country = np.asarray(max_dam[max_dam['CODE'].str.contains(country)].iloc[:,1:],dtype='int16')
df_C2 = pd.DataFrame(output_table[['AREA_m2','CLC_2012',storm]])
df_C3 = | pd.DataFrame(output_table[['AREA_m2','CLC_2012',storm]]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
| assert_copy(actual.labels, expected.labels) | pandas.util.testing.assert_copy |
import json
import os
import glob
import random
from typing import Union
try:
import xarray as xr
except ModuleNotFoundError:
xr = None
import numpy as np
import pandas as pd
from .datasets import Datasets
from .utils import check_attributes, download, sanity_check
from ai4water.utils.utils import dateandtime_now
try: # shapely may not be installed, as it may be difficult to isntall and is only needed for plotting data.
from ai4water.pre_processing.spatial_utils import plot_shapefile
except ModuleNotFoundError:
plot_shapefile = None
# directory separator
SEP = os.sep
def gb_message():
link = "https://doi.org/10.5285/8344e4f3-d2ea-44f5-8afa-86d2987543a9"
raise ValueError(f"Dwonlaoad the data from {link} and provide the directory "
f"path as dataset=Camels(data=data)")
class Camels(Datasets):
"""
Get CAMELS dataset.
This class first downloads the CAMELS dataset if it is not already downloaded.
Then the selected attribute for a selected id are fetched and provided to the
user using the method `fetch`.
Attributes
-----------
- ds_dir str/path: diretory of the dataset
- dynamic_features list: tells which dynamic attributes are available in
this dataset
- static_features list: a list of static attributes.
- static_attribute_categories list: tells which kinds of static attributes
are present in this category.
Methods
---------
- stations : returns name/id of stations for which the data (dynamic attributes)
exists as list of strings.
- fetch : fetches all attributes (both static and dynamic type) of all
station/gauge_ids or a speficified station. It can also be used to
fetch all attributes of a number of stations ids either by providing
their guage_id or by just saying that we need data of 20 stations
which will then be chosen randomly.
- fetch_dynamic_features :
fetches speficied dynamic attributes of one specified station. If the
dynamic attribute is not specified, all dynamic attributes will be
fetched for the specified station. If station is not specified, the
specified dynamic attributes will be fetched for all stations.
- fetch_static_features :
works same as `fetch_dynamic_features` but for `static` attributes.
Here if the `category` is not specified then static attributes of
the specified station for all categories are returned.
stations : returns list of stations
"""
DATASETS = {
'CAMELS-BR': {'url': "https://zenodo.org/record/3964745#.YA6rUxZS-Uk",
},
'CAMELS-GB': {'url': gb_message},
}
def stations(self):
raise NotImplementedError
def _read_dynamic_from_csv(self, stations, dynamic_features, st=None, en=None):
raise NotImplementedError
def fetch_static_features(self, station, features):
raise NotImplementedError
@property
def start(self): # start of data
raise NotImplementedError
@property
def end(self): # end of data
raise NotImplementedError
@property
def dynamic_features(self)->list:
raise NotImplementedError
def _check_length(self, st, en):
if st is None:
st = self.start
if en is None:
en = self.end
return st, en
def to_ts(self, static, st, en, as_ts=False, freq='D'):
st, en = self._check_length(st, en)
if as_ts:
idx = pd.date_range(st, en, freq=freq)
static = pd.DataFrame(np.repeat(static.values, len(idx), axis=0), index=idx,
columns=static.columns)
return static
else:
return static
@property
def camels_dir(self):
"""Directory where all camels datasets will be saved. This will under
datasets directory"""
return os.path.join(self.base_ds_dir, "CAMELS")
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
if x is None:
x = os.path.join(self.camels_dir, self.__class__.__name__)
if not os.path.exists(x):
os.makedirs(x)
# sanity_check(self.name, x)
self._ds_dir = x
def fetch(self,
stations: Union[str, list, int, float, None] = None,
dynamic_features: Union[list, str, None] = 'all',
static_features: Union[str, list, None] = None,
st: Union[None, str] = None,
en: Union[None, str] = None,
as_dataframe:bool = False,
**kwargs
) -> Union[dict, pd.DataFrame]:
"""
Fetches the attributes of one or more stations.
Arguments:
stations : if string, it is supposed to be a station name/gauge_id.
If list, it will be a list of station/gauge_ids. If int, it will
be supposed that the user want data for this number of
stations/gauge_ids. If None (default), then attributes of all
available stations. If float, it will be supposed that the user
wants data of this fraction of stations.
dynamic_features : If not None, then it is the attributes to be
fetched. If None, then all available attributes are fetched
static_features : list of static attributes to be fetches. None
means no static attribute will be fetched.
st : starting date of data to be returned. If None, the data will be
returned from where it is available.
en : end date of data to be returned. If None, then the data will be
returned till the date data is available.
as_dataframe : whether to return dynamic attributes as pandas
dataframe or as xarray dataset.
kwargs : keyword arguments to read the files
returns:
If both static and dynamic features are obtained then it returns a
dictionary whose keys are station/gauge_ids and values are the
attributes and dataframes.
Otherwise either dynamic or static features are returned.
"""
if isinstance(stations, int):
# the user has asked to randomly provide data for some specified number of stations
stations = random.sample(self.stations(), stations)
elif isinstance(stations, list):
pass
elif isinstance(stations, str):
stations = [stations]
elif isinstance(stations, float):
num_stations = int(len(self.stations()) * stations)
stations = random.sample(self.stations(), num_stations)
elif stations is None:
# fetch for all stations
stations = self.stations()
else:
raise TypeError(f"Unknown value provided for stations {stations}")
if xr is None:
raise ModuleNotFoundError("modeule xarray must be installed to use `datasets` module")
return self.fetch_stations_attributes(stations,
dynamic_features,
static_features,
st=st,
en=en,
as_dataframe=as_dataframe,
**kwargs)
def _maybe_to_netcdf(self, fname:str):
self.dyn_fname = os.path.join(self.ds_dir, f'{fname}.nc')
if not os.path.exists(self.dyn_fname):
# saving all the data in netCDF file using xarray
print(f'converting data to netcdf format for faster io operations')
data = self.fetch(static_features=None)
data_vars = {}
coords = {}
for k, v in data.items():
data_vars[k] = (['time', 'dynamic_features'], v)
index = v.index
index.name = 'time'
coords = {
'dynamic_features': list(v.columns),
'time': index
}
xds = xr.Dataset(
data_vars=data_vars,
coords=coords,
attrs={'date': f"create on {dateandtime_now()}"}
)
xds.to_netcdf(self.dyn_fname)
def fetch_stations_attributes(self,
stations: list,
dynamic_features='all',
static_features=None,
st=None,
en=None,
as_dataframe:bool = False,
**kwargs):
"""Reads attributes of more than one stations.
Arguments:
stations : list of stations for which data is to be fetched.
dynamic_features : list of dynamic attributes to be fetched.
if 'all', then all dynamic attributes will be fetched.
static_features : list of static attributes to be fetched.
If `all`, then all static attributes will be fetched. If None,
then no static attribute will be fetched.
st : start of data to be fetched.
en : end of data to be fetched.
as_dataframe : whether to return the data as pandas dataframe. default
is xr.dataset object
kwargs dict: additional keyword arguments
Returns:
Dynamic and static features of multiple stations. Dynamic features
are by default returned as xr.Dataset unless `as_dataframe` is True, in
such a case, it is a pandas dataframe with multiindex. If xr.Dataset,
it consists of `data_vars` equal to number of stations and for each
station, the `DataArray` is of dimensions (time, dynamic_features).
where `time` is defined by `st` and `en` i.e length of `DataArray`.
In case, when the returned object is pandas DataFrame, the first index
is `time` and second index is `dyanamic_features`. Static attributes
are always returned as pandas DataFrame and have following shape
`(stations, static_features). If `dynamic_features` is None,
then they are not returned and the returned value only consists of
static features. Same holds true for `static_features`.
If both are not None, then the returned type is a dictionary with
`static` and `dynamic` keys.
Raises:
ValueError, if both dynamic_features and static_features are None
"""
st, en = self._check_length(st, en)
if dynamic_features is not None:
dynamic_features = check_attributes(dynamic_features, self.dynamic_features)
if not os.path.exists(self.dyn_fname):
# read from csv files
# following code will run only once when fetch is called inside init method
dyn = self._read_dynamic_from_csv(stations, dynamic_features, st=st, en=en)
else:
dyn = xr.load_dataset(self.dyn_fname) # daataset
dyn = dyn[stations].sel(dynamic_features=dynamic_features, time=slice(st, en))
if as_dataframe:
dyn = dyn.to_dataframe(['time', 'dynamic_features'])
if static_features is not None:
static = self.fetch_static_features(stations, static_features)
stns = {'dynamic': dyn, 'static': static}
else:
stns = dyn
elif static_features is not None:
return self.fetch_static_features(stations, static_features)
else:
raise ValueError
return stns
def fetch_dynamic_features(self,
stn_id,
attributes='all',
st=None,
en=None,
as_dataframe=False):
"""Fetches all or selected dynamic attributes of one station."""
assert isinstance(stn_id, str)
station = [stn_id]
return self.fetch_stations_attributes(station,
attributes,
None,
st=st,
en=en,
as_dataframe=as_dataframe)
def fetch_station_attributes(self,
station: str,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
as_ts: bool = False,
st: Union[str, None] = None,
en: Union[str, None] = None,
**kwargs) -> pd.DataFrame:
"""
Fetches attributes for one station.
Arguments:
station : station id/gauge id for which the data is to be fetched.
dynamic_features
static_features
as_ts : whether static attributes are to be converted into a time
series or not. If yes then the returned time series will be of
same length as that of dynamic attribtues.
st : starting point from which the data to be fetched. By default
the data will be fetched from where it is available.
en : end point of data to be fetched. By default the dat will be fetched
Return:
dataframe if as_ts is True else it returns a dictionary of static and
dynamic attributes for a station/gauge_id
"""
st, en = self._check_length(st, en)
station_df = | pd.DataFrame() | pandas.DataFrame |
import argparse
import csv
import json
import pandas as pd
import os
import re
def get_example_split_set_from_id(question_id):
return question_id.split('_')[1]
def preprocess_input_file(input_file, lexicon_file=None, model=None):
if lexicon_file:
lexicon = [
json.loads(line)
for line in open(lexicon_file, "r").readlines()
]
else:
lexicon = None
examples = []
with open(input_file, encoding='utf-8') as f:
lines = csv.reader(f)
header = next(lines, None)
num_fields = len(header)
assert num_fields == 5
for i, line in enumerate(lines):
assert len(line) == num_fields, "read {} fields, and not {}".format(len(line), num_fields)
question_id, source, target, _, split = line
split = get_example_split_set_from_id(question_id)
target = process_target(target)
example = {'annotation_id': '', 'question_id': question_id,
'source': source, 'target': target, 'split': split}
if model:
parsed = model(source)
example['source_parsed'] = parsed
if lexicon:
assert example['source'] == lexicon[i]['source']
example['allowed_tokens'] = lexicon[i]['allowed_tokens']
examples.append(example)
return examples
def fix_references(string):
return re.sub(r'#([1-9][0-9]?)', '@@\g<1>@@', string)
def process_target(target):
# replace multiple whitespaces with a single whitespace.
target_new = ' '.join(target.split())
# replace semi-colons with @@SEP@@ token, remove 'return' statements.
parts = target_new.split(';')
new_parts = [re.sub(r'return', '', part.strip()) for part in parts]
target_new = ' @@SEP@@ '.join([part.strip() for part in new_parts])
# replacing references with special tokens, for example replacing #2 with @@2@@.
target_new = fix_references(target_new)
return target_new.strip()
def write_output_files(base_path, examples, dynamic_vocab):
# Output file is suitable for the allennlp seq2seq reader and predictor.
with open(base_path + '.tsv', 'w', encoding='utf-8') as fd:
for example in examples:
if dynamic_vocab:
output = example['source'] + '\t' + example['allowed_tokens'] + '\t' + example['target'] + '\n'
else:
output = example['source'] + '\t' + example['target'] + '\n'
fd.write(output)
with open(base_path + '.json', 'w', encoding='utf-8') as fd:
for example in examples:
output_dict = {'source': example['source']}
if dynamic_vocab:
output_dict['allowed_tokens'] = example['allowed_tokens']
fd.write(json.dumps(output_dict) + '\n')
print(base_path + '.tsv')
print(base_path + '.json')
def sample_examples(examples, configuration):
df = | pd.DataFrame(examples) | pandas.DataFrame |
"""
This module tests high level dataset API functions which require entire datasets, indices, etc
"""
from collections import OrderedDict
import pandas as pd
import pandas.testing as pdt
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
def test_dataset_get_indices_as_dataframe_partition_keys_only(
dataset_with_index, store_session
):
expected = pd.DataFrame(
OrderedDict([("P", [1, 2])]),
index=pd.Index(["P=1/cluster_1", "P=2/cluster_2"], name="partition"),
)
ds = dataset_with_index.load_partition_indices()
result = ds.get_indices_as_dataframe(columns=dataset_with_index.partition_keys)
pdt.assert_frame_equal(result, expected)
def test_dataset_get_indices_as_dataframe(dataset_with_index, store_session):
expected = pd.DataFrame(
OrderedDict([("L", [1, 2]), ("P", [1, 2])]),
index=pd.Index(["P=1/cluster_1", "P=2/cluster_2"], name="partition"),
)
ds = dataset_with_index.load_partition_indices()
ds = ds.load_index("L", store_session)
result = ds.get_indices_as_dataframe()
pdt.assert_frame_equal(result, expected)
def test_dataset_get_indices_as_dataframe_duplicates():
ds = DatasetMetadata(
"some_uuid",
indices={
"l_external_code": ExplicitSecondaryIndex(
"l_external_code", {"1": ["part1", "part2"], "2": ["part1", "part2"]}
),
"p_external_code": ExplicitSecondaryIndex(
"p_external_code", {"1": ["part1"], "2": ["part2"]}
),
},
)
expected = pd.DataFrame(
OrderedDict(
[
("p_external_code", ["1", "1", "2", "2"]),
("l_external_code", ["1", "2", "1", "2"]),
]
),
index=pd.Index(["part1", "part1", "part2", "part2"], name="partition"),
)
result = ds.get_indices_as_dataframe()
pdt.assert_frame_equal(result, expected)
def test_dataset_get_indices_as_dataframe_predicates():
ds = DatasetMetadata(
"some_uuid",
indices={
"l_external_code": ExplicitSecondaryIndex(
"l_external_code", {"1": ["part1", "part2"], "2": ["part1", "part2"]}
),
"p_external_code": ExplicitSecondaryIndex(
"p_external_code", {"1": ["part1"], "2": ["part2"]}
),
},
)
expected = pd.DataFrame(
OrderedDict([("p_external_code", ["1"])]),
index=pd.Index(["part1"], name="partition"),
)
result = ds.get_indices_as_dataframe(
columns=["p_external_code"], predicates=[[("p_external_code", "==", "1")]]
)
pdt.assert_frame_equal(result, expected)
result = ds.get_indices_as_dataframe(
columns=["l_external_code"], predicates=[[("l_external_code", "==", "1")]]
)
expected = pd.DataFrame(
OrderedDict([("l_external_code", "1")]),
index=pd.Index(["part1", "part2"], name="partition"),
)
pdt.assert_frame_equal(result, expected)
result = ds.get_indices_as_dataframe(
columns=["l_external_code"],
predicates=[[("l_external_code", "==", "1"), ("p_external_code", "==", "1")]],
)
expected = pd.DataFrame(
OrderedDict([("l_external_code", "1")]),
index= | pd.Index(["part1"], name="partition") | pandas.Index |
import pandas as pd
def merge_train_test(train_file, test_file, output_file):
train_set = pd.read_csv(train_file, encoding='gb18030')
print("train_set rows :", train_set.Idx.count())
test_set = pd.read_csv(test_file, encoding='gb18030')
print("test_set rows :", test_set.Idx.count())
train_set = train_set.append(test_set, ignore_index=True)
train_set.fillna({"target":-1}, inplace=True)
print("total rows :", train_set.Idx.count())
train_set.to_csv(output_file, index=False, encoding='utf-8')
def FeatureSplit(feature_type, data_file, categorical_output_file, numerical_output_file):
data = | pd.read_csv(data_file) | pandas.read_csv |
"""Alpha Vantage Model"""
__docformat__ = "numpy"
import logging
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
import requests
from alpha_vantage.fundamentaldata import FundamentalData
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import long_number_format
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.stocks.fundamental_analysis.fa_helper import clean_df_index
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_overview(ticker: str) -> pd.DataFrame:
"""Get alpha vantage company overview
Parameters
----------
ticker : str
Stock ticker
Returns
-------
pd.DataFrame
Dataframe of fundamentals
"""
# Request OVERVIEW data from Alpha Vantage API
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
# If the returned data was successful
if result.status_code == 200:
# Parse json data to dataframe
if "Note" in result.json():
console.print(result.json()["Note"], "\n")
return pd.DataFrame()
df_fa = pd.json_normalize(result.json())
# Keep json data sorting in dataframe
df_fa = df_fa[list(result.json().keys())].T
df_fa.iloc[5:] = df_fa.iloc[5:].applymap(lambda x: long_number_format(x))
clean_df_index(df_fa)
df_fa = df_fa.rename(
index={
"E b i t d a": "EBITDA",
"P e ratio": "PE ratio",
"P e g ratio": "PEG ratio",
"E p s": "EPS",
"Revenue per share t t m": "Revenue per share TTM",
"Operating margin t t m": "Operating margin TTM",
"Return on assets t t m": "Return on assets TTM",
"Return on equity t t m": "Return on equity TTM",
"Revenue t t m": "Revenue TTM",
"Gross profit t t m": "Gross profit TTM",
"Diluted e p s t t m": "Diluted EPS TTM",
"Quarterly earnings growth y o y": "Quarterly earnings growth YOY",
"Quarterly revenue growth y o y": "Quarterly revenue growth YOY",
"Trailing p e": "Trailing PE",
"Forward p e": "Forward PE",
"Price to sales ratio t t m": "Price to sales ratio TTM",
"E v to revenue": "EV to revenue",
"E v to e b i t d a": "EV to EBITDA",
}
)
return df_fa
return pd.DataFrame()
@log_start_end(log=logger)
def get_key_metrics(ticker: str) -> pd.DataFrame:
"""Get key metrics from overview
Parameters
----------
ticker : str
Stock ticker
Returns
-------
pd.DataFrame
Dataframe of key metrics
"""
# Request OVERVIEW data
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
# If the returned data was successful
if result.status_code == 200:
df_fa = pd.json_normalize(result.json())
df_fa = df_fa[list(result.json().keys())].T
df_fa = df_fa.applymap(lambda x: long_number_format(x))
clean_df_index(df_fa)
df_fa = df_fa.rename(
index={
"E b i t d a": "EBITDA",
"P e ratio": "PE ratio",
"P e g ratio": "PEG ratio",
"E p s": "EPS",
"Return on equity t t m": "Return on equity TTM",
"Price to sales ratio t t m": "Price to sales ratio TTM",
}
)
as_key_metrics = [
"Market capitalization",
"EBITDA",
"EPS",
"PE ratio",
"PEG ratio",
"Price to book ratio",
"Return on equity TTM",
"Price to sales ratio TTM",
"Dividend yield",
"50 day moving average",
"Analyst target price",
"Beta",
]
return df_fa.loc[as_key_metrics]
return pd.DataFrame()
@log_start_end(log=logger)
def get_income_statements(
ticker: str, number: int, quarterly: bool = False
) -> pd.DataFrame:
"""Get income statements for company
Parameters
----------
ticker : str
Stock ticker
number : int
Number of past to get
quarterly : bool, optional
Flag to get quarterly instead of annual, by default False
Returns
-------
pd.DataFrame
Dataframe of income statements
"""
url = f"https://www.alphavantage.co/query?function=INCOME_STATEMENT&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
r = requests.get(url)
if r.status_code == 200:
statements = r.json()
df_fa = pd.DataFrame()
if quarterly:
if "quarterlyReports" in statements:
df_fa = pd.DataFrame(statements["quarterlyReports"])
else:
if "annualReports" in statements:
df_fa = pd.DataFrame(statements["annualReports"])
if df_fa.empty:
return pd.DataFrame()
df_fa = df_fa.set_index("fiscalDateEnding")
df_fa = df_fa.head(number)
df_fa = df_fa.applymap(lambda x: long_number_format(x))
return df_fa[::-1].T
return pd.DataFrame()
@log_start_end(log=logger)
def get_balance_sheet(
ticker: str, number: int, quarterly: bool = False
) -> pd.DataFrame:
"""Get balance sheets for company
Parameters
----------
ticker : str
Stock ticker
number : int
Number of past to get
quarterly : bool, optional
Flag to get quarterly instead of annual, by default False
Returns
-------
pd.DataFrame
Dataframe of income statements
"""
url = f"https://www.alphavantage.co/query?function=BALANCE_SHEET&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
r = requests.get(url)
if r.status_code == 200:
statements = r.json()
df_fa = pd.DataFrame()
if quarterly:
if "quarterlyReports" in statements:
df_fa = pd.DataFrame(statements["quarterlyReports"])
else:
if "annualReports" in statements:
df_fa = pd.DataFrame(statements["annualReports"])
if df_fa.empty:
return pd.DataFrame()
df_fa = df_fa.set_index("fiscalDateEnding")
df_fa = df_fa.head(number)
df_fa = df_fa.applymap(lambda x: long_number_format(x))
return df_fa[::-1].T
return pd.DataFrame()
@log_start_end(log=logger)
def get_cash_flow(ticker: str, number: int, quarterly: bool = False) -> pd.DataFrame:
"""Get cash flows for company
Parameters
----------
ticker : str
Stock ticker
number : int
Number of past to get
quarterly : bool, optional
Flag to get quarterly instead of annual, by default False
Returns
-------
pd.DataFrame
Dataframe of income statements
"""
url = f"https://www.alphavantage.co/query?function=CASH_FLOW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
r = requests.get(url)
if r.status_code == 200:
statements = r.json()
df_fa = pd.DataFrame()
if quarterly:
if "quarterlyReports" in statements:
df_fa = pd.DataFrame(statements["quarterlyReports"])
else:
if "annualReports" in statements:
df_fa = | pd.DataFrame(statements["annualReports"]) | pandas.DataFrame |
# ===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import logging
import os
import subprocess
from pathlib import Path
from typing import Any
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_openml, load_svmlight_file
from sklearn.model_selection import train_test_split
from .loader_utils import retrieve
def a_nine_a(dataset_dir: Path) -> bool:
"""
Author: <NAME>","<NAME>
libSVM","AAD group
Source: original - Date unknown
Site: http://archive.ics.uci.edu/ml/datasets/Adult
Classification task. n_classes = 2.
a9a X train dataset (39073, 123)
a9a y train dataset (39073, 1)
a9a X test dataset (9769, 123)
a9a y test dataset (9769, 1)
"""
dataset_name = 'a9a'
os.makedirs(dataset_dir, exist_ok=True)
X, y = fetch_openml(name='a9a', return_X_y=True,
as_frame=False, data_home=dataset_dir)
X = pd.DataFrame(X.todense())
y = pd.DataFrame(y)
y[y == -1] = 0
logging.info(f'{dataset_name} is loaded, started parsing...')
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=11)
for data, name in zip((x_train, x_test, y_train, y_test),
('x_train', 'x_test', 'y_train', 'y_test')):
filename = f'{dataset_name}_{name}.npy'
np.save(os.path.join(dataset_dir, filename), data)
logging.info(f'dataset {dataset_name} is ready.')
return True
def airline(dataset_dir: Path) -> bool:
"""
Airline dataset
http://kt.ijs.si/elena_ikonomovska/data.html
TaskType:binclass
NumberOfFeatures:13
NumberOfInstances:115M
"""
dataset_name = 'airline'
os.makedirs(dataset_dir, exist_ok=True)
url = 'http://kt.ijs.si/elena_ikonomovska/datasets/airline/airline_14col.data.bz2'
local_url = os.path.join(dataset_dir, os.path.basename(url))
if not os.path.isfile(local_url):
logging.info(f'Started loading {dataset_name}')
retrieve(url, local_url)
logging.info(f'{dataset_name} is loaded, started parsing...')
cols = [
"Year", "Month", "DayofMonth", "DayofWeek", "CRSDepTime",
"CRSArrTime", "UniqueCarrier", "FlightNum", "ActualElapsedTime",
"Origin", "Dest", "Distance", "Diverted", "ArrDelay"
]
# load the data as int16
dtype = np.int16
dtype_columns = {
"Year": dtype, "Month": dtype, "DayofMonth": dtype, "DayofWeek": dtype,
"CRSDepTime": dtype, "CRSArrTime": dtype, "FlightNum": dtype,
"ActualElapsedTime": dtype, "Distance":
dtype,
"Diverted": dtype, "ArrDelay": dtype,
}
df: Any = pd.read_csv(local_url, names=cols, dtype=dtype_columns)
# Encode categoricals as numeric
for col in df.select_dtypes(['object']).columns:
df[col] = df[col].astype("category").cat.codes
# Turn into binary classification problem
df["ArrDelayBinary"] = 1 * (df["ArrDelay"] > 0)
X = df[df.columns.difference(["ArrDelay", "ArrDelayBinary"])
].to_numpy(dtype=np.float32)
y = df["ArrDelayBinary"].to_numpy(dtype=np.float32)
del df
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
for data, name in zip((X_train, X_test, y_train, y_test),
('x_train', 'x_test', 'y_train', 'y_test')):
filename = f'{dataset_name}_{name}.npy'
np.save(os.path.join(dataset_dir, filename), data)
logging.info(f'dataset {dataset_name} is ready.')
return True
def airline_ohe(dataset_dir: Path) -> bool:
"""
Dataset from szilard benchmarks: https://github.com/szilard/GBM-perf
TaskType:binclass
NumberOfFeatures:700
NumberOfInstances:10100000
"""
dataset_name = 'airline-ohe'
os.makedirs(dataset_dir, exist_ok=True)
url_train = 'https://s3.amazonaws.com/benchm-ml--main/train-10m.csv'
url_test = 'https://s3.amazonaws.com/benchm-ml--main/test.csv'
local_url_train = os.path.join(dataset_dir, os.path.basename(url_train))
local_url_test = os.path.join(dataset_dir, os.path.basename(url_test))
if not os.path.isfile(local_url_train):
logging.info(f'Started loading {dataset_name} train')
retrieve(url_train, local_url_train)
if not os.path.isfile(local_url_test):
logging.info(f'Started loading {dataset_name} test')
retrieve(url_test, local_url_test)
logging.info(f'{dataset_name} is loaded, started parsing...')
sets = []
labels = []
categorical_names = ["Month", "DayofMonth",
"DayOfWeek", "UniqueCarrier", "Origin", "Dest"]
for local_url in [local_url_train, local_url_test]:
df = pd.read_csv(local_url, nrows=1000000
if local_url.endswith('train-10m.csv') else None)
X = df.drop('dep_delayed_15min', 1)
y: Any = df["dep_delayed_15min"]
y_num = np.where(y == "Y", 1, 0)
sets.append(X)
labels.append(y_num)
n_samples_train = sets[0].shape[0]
X_final: Any = pd.concat(sets)
X_final = pd.get_dummies(X_final, columns=categorical_names)
sets = [X_final[:n_samples_train], X_final[n_samples_train:]]
for data, name in zip((sets[0], sets[1], labels[0], labels[1]),
('x_train', 'x_test', 'y_train', 'y_test')):
filename = f'{dataset_name}_{name}.npy'
np.save(os.path.join(dataset_dir, filename), data) # type: ignore
logging.info(f'dataset {dataset_name} is ready.')
return True
def bosch(dataset_dir: Path) -> bool:
"""
Bosch Production Line Performance data set
https://www.kaggle.com/c/bosch-production-line-performance
Requires Kaggle API and API token (https://github.com/Kaggle/kaggle-api)
Contains missing values as NaN.
TaskType:binclass
NumberOfFeatures:968
NumberOfInstances:1.184M
"""
dataset_name = 'bosch'
os.makedirs(dataset_dir, exist_ok=True)
filename = "train_numeric.csv.zip"
local_url = os.path.join(dataset_dir, filename)
if not os.path.isfile(local_url):
logging.info(f'Started loading {dataset_name}')
args = ["kaggle", "competitions", "download", "-c",
"bosch-production-line-performance", "-f", filename, "-p", str(dataset_dir)]
_ = subprocess.check_output(args)
logging.info(f'{dataset_name} is loaded, started parsing...')
X = pd.read_csv(local_url, index_col=0, compression='zip', dtype=np.float32)
y = X.iloc[:, -1].to_numpy(dtype=np.float32)
X.drop(X.columns[-1], axis=1, inplace=True)
X_np = X.to_numpy(dtype=np.float32)
X_train, X_test, y_train, y_test = train_test_split(X_np, y, random_state=77,
test_size=0.2,
)
for data, name in zip((X_train, X_test, y_train, y_test),
('x_train', 'x_test', 'y_train', 'y_test')):
filename = f'{dataset_name}_{name}.npy'
np.save(os.path.join(dataset_dir, filename), data)
logging.info(f'dataset {dataset_name} is ready.')
return True
def census(dataset_dir: Path) -> bool:
"""
# TODO: add an loading instruction
"""
return False
def codrnanorm(dataset_dir: Path) -> bool:
"""
Abstract: Detection of non-coding RNAs on the basis of predicted secondary
structure formation free energy change.
Author: <NAME>,<NAME>,<NAME>.
Source: [original](http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets)
Classification task. n_classes = 2.
codrnanorm X train dataset (390852, 8)
codrnanorm y train dataset (390852, 1)
codrnanorm X test dataset (97713, 8)
codrnanorm y test dataset (97713, 1)
"""
dataset_name = 'codrnanorm'
os.makedirs(dataset_dir, exist_ok=True)
X, y = fetch_openml(name='codrnaNorm', return_X_y=True,
as_frame=False, data_home=dataset_dir)
X = pd.DataFrame(X.todense())
y = pd.DataFrame(y)
logging.info(f'{dataset_name} is loaded, started parsing...')
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
for data, name in zip((x_train, x_test, y_train, y_test),
('x_train', 'x_test', 'y_train', 'y_test')):
filename = f'{dataset_name}_{name}.npy'
np.save(os.path.join(dataset_dir, filename), data)
logging.info(f'dataset {dataset_name} is ready.')
return True
def epsilon(dataset_dir: Path) -> bool:
"""
Epsilon dataset
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html
TaskType:binclass
NumberOfFeatures:2000
NumberOfInstances:500K
"""
dataset_name = 'epsilon'
os.makedirs(dataset_dir, exist_ok=True)
url_train = 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary' \
'/epsilon_normalized.bz2'
url_test = 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary' \
'/epsilon_normalized.t.bz2'
local_url_train = os.path.join(dataset_dir, os.path.basename(url_train))
local_url_test = os.path.join(dataset_dir, os.path.basename(url_test))
if not os.path.isfile(local_url_train):
logging.info(f'Started loading {dataset_name}, train')
retrieve(url_train, local_url_train)
if not os.path.isfile(local_url_test):
logging.info(f'Started loading {dataset_name}, test')
retrieve(url_test, local_url_test)
logging.info(f'{dataset_name} is loaded, started parsing...')
X_train, y_train = load_svmlight_file(local_url_train,
dtype=np.float32)
X_test, y_test = load_svmlight_file(local_url_test,
dtype=np.float32)
X_train = X_train.toarray()
X_test = X_test.toarray()
y_train[y_train <= 0] = 0
y_test[y_test <= 0] = 0
for data, name in zip((X_train, X_test, y_train, y_test),
('x_train', 'x_test', 'y_train', 'y_test')):
filename = f'{dataset_name}_{name}.npy'
np.save(os.path.join(dataset_dir, filename), data)
logging.info(f'dataset {dataset_name} is ready.')
return True
def fraud(dataset_dir: Path) -> bool:
"""
Credit Card Fraud Detection contest
https://www.kaggle.com/mlg-ulb/creditcardfraud
Requires Kaggle API and API token (https://github.com/Kaggle/kaggle-api)
Contains missing values as NaN.
TaskType:binclass
NumberOfFeatures:28
NumberOfInstances:285K
"""
dataset_name = 'fraud'
os.makedirs(dataset_dir, exist_ok=True)
filename = "creditcard.csv"
local_url = os.path.join(dataset_dir, filename)
if not os.path.isfile(local_url):
logging.info(f'Started loading {dataset_name}')
args = ["kaggle", "datasets", "download", "mlg-ulb/creditcardfraud", "-f",
filename, "-p", str(dataset_dir)]
_ = subprocess.check_output(args)
logging.info(f'{dataset_name} is loaded, started parsing...')
df = pd.read_csv(local_url + ".zip", dtype=np.float32)
X = df[[col for col in df.columns if col.startswith('V')]].to_numpy(dtype=np.float32)
y = df['Class'].to_numpy(dtype=np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
for data, name in zip((X_train, X_test, y_train, y_test),
('x_train', 'x_test', 'y_train', 'y_test')):
filename = f'{dataset_name}_{name}.npy'
np.save(os.path.join(dataset_dir, filename), data)
logging.info(f'dataset {dataset_name} is ready.')
return True
def gisette(dataset_dir: Path) -> bool:
"""
GISETTE is a handwritten digit recognition problem.
The problem is to separate the highly confusable digits '4' and '9'.
This dataset is one of five datasets of the NIPS 2003 feature selection challenge.
Classification task. n_classes = 2.
gisette X train dataset (6000, 5000)
gisette y train dataset (6000, 1)
gisette X test dataset (1000, 5000)
gisette y test dataset (1000, 1)
"""
dataset_name = 'gisette'
os.makedirs(dataset_dir, exist_ok=True)
cache_dir = os.path.join(dataset_dir, '_gisette')
os.makedirs(cache_dir, exist_ok=True)
domen_hhtp = 'http://archive.ics.uci.edu/ml/machine-learning-databases/'
gisette_train_data_url = domen_hhtp + '/gisette/GISETTE/gisette_train.data'
filename_train_data = os.path.join(cache_dir, 'gisette_train.data')
if not os.path.exists(filename_train_data):
retrieve(gisette_train_data_url, filename_train_data)
gisette_train_labels_url = domen_hhtp + '/gisette/GISETTE/gisette_train.labels'
filename_train_labels = os.path.join(cache_dir, 'gisette_train.labels')
if not os.path.exists(filename_train_labels):
retrieve(gisette_train_labels_url, filename_train_labels)
gisette_test_data_url = domen_hhtp + '/gisette/GISETTE/gisette_valid.data'
filename_test_data = os.path.join(cache_dir, 'gisette_valid.data')
if not os.path.exists(filename_test_data):
retrieve(gisette_test_data_url, filename_test_data)
gisette_test_labels_url = domen_hhtp + '/gisette/gisette_valid.labels'
filename_test_labels = os.path.join(cache_dir, 'gisette_valid.labels')
if not os.path.exists(filename_test_labels):
retrieve(gisette_test_labels_url, filename_test_labels)
logging.info(f'{dataset_name} is loaded, started parsing...')
num_cols = 5000
df_train = | pd.read_csv(filename_train_data, header=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu May 6 21:26:21 2021
@author: Gary
"""
import pandas as pd
import numpy as np
large_perc_value = 50
lower_perc_tolerance = 95
upper_perc_tolerance = 105
massComp_upper_limit = 0.1
# def rec_has_it(rec,place):
# if 'within_total_tolerance' in list(rec.columns):
# print(f'rec_df has "wi_tot_tol" at <{place}>')
def calc_overall_percentages(rec_df,disc_df):
# valid CAS here must include proprietary and carriers because they should be included in valid
# percentages.
rec_df['is_valid_cas'] = rec_df.bgCAS.str[0].isin(['0','1','2','3','4',
'5','6','7','8','9'])
rec_df.is_valid_cas = np.where(rec_df.bgCAS.isin(['proprietary','conflictingID']),
True,rec_df.is_valid_cas)
valid = rec_df[rec_df.is_valid_cas|rec_df.is_water_carrier]\
.groupby('UploadKey',as_index=False)['PercentHFJob'].sum()
valid.columns = ['UploadKey','total_percent_of_valid']
c1 = valid.total_percent_of_valid>lower_perc_tolerance
c2 = valid.total_percent_of_valid<upper_perc_tolerance
valid['within_total_tolerance'] =c2&c1
if 'within_total_tolerance' in disc_df.columns:
disc_df = disc_df.drop(['within_total_tolerance'],axis=1)
allrecs = rec_df.groupby('UploadKey',as_index=False)['PercentHFJob'].sum()
allrecs.columns = ['UploadKey','total_percent_all_records']
disc_df = pd.merge(disc_df,valid,on='UploadKey',how='left')
disc_df = | pd.merge(disc_df,allrecs,on='UploadKey',how='left') | pandas.merge |
import pandas as pd
import json
from jsonschema import validate
from os import path
import sys
import PySimpleGUI as sg
raw_data = None
class_list = None
DayInfo = None
delta_max = None
UUID_to_email = None
def set_data_vars(data_path, json_path):
"""
Sets global data variables to be used in ``ElementCollection.py`` and ``Main.py``, including
* raw_data: the dataframe read from the raw data's csv
* class_list: list of unique emails from raw_data (we assume this is acceptable as a list of everyone in the class)
* DayInfo: the JSON data imported into the program, if the user uses a JSON input
:param data_path: path to the raw_data csv
:param json_path: path to the JSON file, or -1 if the user isn't using a JSON file to provide H5P IDs
"""
global raw_data
global class_list
global DayInfo
global delta_max
global UUID_to_email
raw_data = | pd.read_csv(data_path) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import ast
from IPython.display import display
from matplotlib.patches import Ellipse
import math
# path imports
from astropy.coordinates import SkyCoord
from astropath import path
from astropath import localization
from astropath import chance
from astropath import bayesian
def get_candidates(frb_loc, r, true_gal=-1, gal_cat=None):
'''Helper function for single_path, grabs all galaxies withing a certain radius from a central loc'''
radius = r/3600
dec = gal_cat[np.logical_and(gal_cat['dec'] < frb_loc[1]+radius, gal_cat['dec'] > frb_loc[1]-radius)]
candidates = dec[np.logical_and(dec['ra'] < frb_loc[0]+radius, dec['ra'] > frb_loc[0]-radius)]
return candidates
def single_path(frb, cand_info, offset_info, search_rad=15, plot=False, gal_cat=None):
'''
Runs PATH for a single frb given assumed priors, p_u, and search radius.
Plotting returns all the candidates colored by their association probability.
The orange ring is the frb ellipse, green ring means there was a correct association
if there is an incorrect association the guessed galaxy will be red and the true will be cyan
Parameters:
frb (arr) containing the values of a single row from the output of sim_frbs
cand_info (tuple) (unknown probability (float), keyword for Aggarwal cand_priors)
offset_info (tuple) (maximum size (int), keyword for Aggarwal off_priors (str))
search_rad (int) radius to search around frb in arcsec
plot (boolean) True plots
Returns:
dataframe of candidates, and thier association probabilities
Example:
candidates = single_path(frbs.iloc[22], (0., 'inverse'), (6, 'exp'), search_rad=7, plot=True)
'''
Path = path.PATH()
# init frb
frb_coord = SkyCoord(frb[0], frb[1], unit='deg')
eellipse = dict(a=frb[2], b=frb[2], theta=0.)
Path.init_localization('eellipse', center_coord=frb_coord, eellipse=eellipse)
# init candidates
candidates = get_candidates((frb_coord.ra.value, frb_coord.dec.value), search_rad, gal_cat=gal_cat)
Path.init_candidates(candidates.ra.values,
candidates.dec.values,
candidates.diskSize.values,
mag=candidates.Rc.values,
sfr=candidates.sfr.values)
# init priors
P_u, cand_pdf = cand_info
mx, off_pdf = offset_info
Path.init_cand_prior(cand_pdf, P_U=P_u)
Path.init_theta_prior(off_pdf, mx)
# calculating
Path.calc_priors()
P_Ox, P_Ux = Path.calc_posteriors('fixed', box_hwidth=30., max_radius=30)
# adding true galaxy index to results df
Path.candidates['gal_Index'] = candidates.index
# adding probabilities to candidates for easier plotting
candidates['pOx'] = Path.candidates['P_Ox'].values
if plot:
figure, axes = plt.subplots(figsize=(10, 10))
display(candidates[candidates['pOx']>.05])
# plotting galaxies based on probability
for i in candidates.sort_values('diskSize', ascending=False).values:
axes.add_patch(plt.Circle((i[0], i[1]), i[3]/3600, facecolor=plt.cm.Blues(i[-1]), alpha=1, edgecolor='k'))
# circle outlines for frbs, true_gal, guessed_gal
axes.add_patch(plt.Circle((frb[0], frb[1]), frb[2]/3600, fill=False, color='tab:orange', linewidth=2))
tru = gal_cat[gal_cat.index == frb[-1]].values[0] # getting tru_gal variables
axes.add_patch(plt.Circle((tru[0], tru[1]), tru[3]/3600, fill=False, edgecolor='tab:cyan', linewidth=2))
best_index = Path.candidates[Path.candidates.P_Ox == Path.candidates.P_Ox.max()]['gal_Index'].values[0]
best = gal_cat[gal_cat.index == best_index].values[0] # getting best_gal variables
if frb[-1]==best_index:
axes.add_patch(plt.Circle((best[0], best[1]), best[3]/3600, fill=False, edgecolor='tab:green', linewidth=2))
else:
axes.add_patch(plt.Circle((best[0], best[1]), best[3]/3600, fill=False, edgecolor='tab:red', linewidth=2))
# making color map
colors = candidates.pOx.values
colors[-1] = 1.
plt.scatter(candidates.ra.values, candidates.dec.values, c=colors, alpha=1)
plt.gca().set_aspect('equal', adjustable='box')
plt.colorbar()
return Path.candidates
def multiple_path(frbs, cand_info, offset_info, search_rad=15, save=None, plot=False, gal_cat=None):
'''
Runs path for an entire catalog of frbs, saves in csv
Parameters:
frbs (arr) output of sim_frbs
cand_info (tuple) (unknown probability (float), keyword for Aggarwal cand_priors)
offset_info (tuple) (maximum size (int), keyword for Aggarwal off_priors (str))
search_rad (int) radius to search around frb in arcsec
save (str) filename which will be appended with the length of the input frb cat
plot (boolean) True plots
Returns:
dataframe of important statistics for analysis
Example:
multiple_path(frbs, (0.05, 'inverse'), (6, 'exp'), search_rad=7, save='inverse', gal_cat=galaxies)
'''
stats = []
count = 0
for i, r in frbs.iterrows():
results = single_path(r, cand_info, offset_info, search_rad=search_rad, plot=False, gal_cat=gal_cat)
pox = results.P_Ox.values
true_gal = r[-1]
best_gal = results[results.P_Ox == results.P_Ox.max()]['gal_Index'].values[0]
stats.append([pox[pox > .01], max(pox), best_gal==true_gal, true_gal, len(results)])
count += 1
if count%500==0:
print('{} '.format(count), end='')
stat = | pd.DataFrame(stats, columns=['all_pOx', 'max_pOx', 'correct', 'gal_Index', 'num_cand']) | pandas.DataFrame |
import pandas as pd
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from pandas.plotting import scatter_matrix
import yfinance as yf
#import talib
#%matplotlib inline
start = '2017-02-19'
end = '2022-2-19'
sp500 = yf.download('^GSPC', start, end)
# Moving Averages https://www.analyticsvidhya.com/blog/2021/07/stock-prices-analysis-with-python/#h2_5
def MA(data_frame, days):
name = 'MA'+str(days)
data_frame[name] = data_frame['close'].rolling(days).mean()
return data_frame
# RSI https://wire.insiderfinance.io/calculate-rsi-with-python-and-yahoo-finance-c8fb78b1c199
def RSI(data, window = 14, adjust = False):
delta = data['Close'].diff(1).dropna()
loss = delta.copy()
gains = delta.copy()
gains[gains < 0] = 0
loss[loss > 0] = 0
gain_ewm = gains.ewm(com = window - 1, adjust = adjust).mean()
loss_ewm = abs(loss.ewm(com = window - 1, adjust = adjust).mean())
RS = gain_ewm / loss_ewm
RSI = 100 - 100/ (1 + RS)
return RSI
reversed_df = sp500.iloc[::-1]
#sp500['RSI'] = talib.RSI(reversed_df['Close'], 14)
locator = mdates.MonthLocator(interval = 3)
fmt = mdates.DateFormatter('%b')
#KDJ https://github.com/Abhay64/KDJ-Indicator
array_close = np.array(sp500['Close'])
array_high = np.array(sp500['High'])
array_low = np.array(sp500['Low'])
z = 0
y = 0
highest = 0
lowest = 0
kperiods = 13 #kperiods are 14 array start from 0 index
array_highest = []
array_lowest = []
for i in range(0, array_high.size - kperiods):
highest = array_high[y]
for j in range(0, kperiods):
if(highest < array_high[y + 1]):
highest = array_high[y + 1]
y = y + 1
# creating list highest of k periods
array_highest.append(highest)
y = y - (kperiods - 1)
for i in range(0, array_low.size - kperiods):
lowest = array_low[z]
for j in range(0, kperiods):
if(lowest > array_low[z + 1]):
lowest = array_low[z + 1]
z = z + 1
# creating list lowest of k periods
array_lowest.append(lowest)
# skip one from starting after each iteration
z = z - (kperiods - 1)
#KDJ (K line, D line, J line)
# K line
Kvalue = []
for i in range(kperiods,array_close.size):
k = ((array_close[i] - array_lowest[i - kperiods]) * 100 / (array_highest[i - kperiods] - array_lowest[i - kperiods]))
Kvalue.append(k)
sp500['K'] = pd.Series(Kvalue)
# D line
x = 0
# dperiods for calculate d values
dperiods = 3
Dvalue = [None, None]
mean = 0
for i in range(0, len(Kvalue) - dperiods + 1):
sum = 0
for j in range(0, dperiods):
sum = Kvalue[x] + sum
x = x + 1
mean = sum / dperiods
# d values for %d line adding in the list Dvalue
Dvalue.append(mean)
# skip one from starting after each iteration
x = x - (dperiods - 1)
sp500['D'] = pd.Series(Dvalue)
# J line
Jvalue = [None, None]
for i in range(0, len(Dvalue) - dperiods + 1):
j = (Dvalue[i + 2] * 3) - (Kvalue[i + 2] * 2)
# j values for %j line
Jvalue.append(j)
sp500['J'] = | pd.Series(Jvalue) | pandas.Series |
"""
Prepare training and testing datasets as CSV dictionaries 2.0
Created on 04/26/2019; modified on 11/06/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
import re
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store', 'dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
def tile_ids_in(inp):
ids = []
try:
for id in os.listdir(inp['path']):
if '_{}.png'.format(str(inp['sldnum'])) in id:
ids.append([inp['slide'], inp['level'], inp['path']+'/'+id, inp['BMI'], inp['age'], inp['label']])
except FileNotFoundError:
print('Ignore:', inp['path'])
return ids
# pair tiles of 10x, 5x, 2.5x of the same area
def paired_tile_ids_in(patient, slide, tumor, label, root_dir):
dira = os.path.isdir(root_dir + 'level1')
dirb = os.path.isdir(root_dir + 'level2')
dirc = os.path.isdir(root_dir + 'level3')
if dira and dirb and dirc:
fac = 1000
ids = []
for level in range(1, 4):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '.png' in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('.p', id.split('y-', 1)[1])[0]) / fac)
ids.append([patient, slide, tumor, label, level, dirr + '/' + id, x, y])
ids = pd.DataFrame(ids, columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'level', 'path', 'x', 'y'])
idsa = ids.loc[ids['level'] == 1]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L1path"})
idsb = ids.loc[ids['level'] == 2]
idsb = idsb.drop(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L2path"})
idsc = ids.loc[ids['level'] == 3]
idsc = idsc.drop(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L3path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
else:
print('Pass: ', root_dir)
idsa = pd.DataFrame(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'L1path', 'L2path', 'L3path'])
return idsa
# Prepare label at per patient level
def big_image_sum(label_col, path, ref_file, pdmd='tumor', exclude=None):
ref = pd.read_csv(ref_file, header=0)
big_images = []
ref = ref.loc[ref[label_col].notna()]
for idx, row in ref.iterrows():
big_images.append([row['Patient_ID'], row['Slide_ID'], row['Tumor'],
path + "/{}/{}/{}/".format(str(row['Tumor']), str(row['Patient_ID']),
row['Slide_ID'].split('-')[-1]), row[label_col]])
datapd = pd.DataFrame(big_images, columns=['Patient_ID', 'Slide_ID', 'Tumor', 'path', 'label'])
datapd = datapd.dropna()
if exclude:
datapd = datapd[~datapd['Tumor'].isin(exclude)]
if pdmd != 'origin':
rm = []
for tu in list(datapd.Tumor.unique()):
for lb in list(datapd.label.unique()):
if datapd.loc[(datapd['Tumor'] == tu) & (datapd['label'] == lb)].shape[0] < 3:
rm.append(tu)
datapd = datapd[~datapd['Tumor'].isin(rm)]
print('Remove rare case cancer types if any: ', rm, flush=True)
return datapd
# seperate into training and testing; each type is the same separation ratio on big images
# test and train csv files contain tiles' path.
def set_sep(alll, path, cut=0.3):
trlist = []
telist = []
valist = []
for tm in list(alll.Tumor.unique()):
sub = alll[alll['Tumor'] == tm]
unq = list(sub.Patient_ID.unique())
np.random.shuffle(unq)
validation = unq[:np.max([int(len(unq) * cut / 2), 1])]
valist.append(sub[sub['Patient_ID'].isin(validation)])
test = unq[np.max([int(len(unq) * cut / 2), 1]):np.max([int(len(unq) * cut), 2])]
telist.append(sub[sub['Patient_ID'].isin(test)])
train = unq[np.max([int(len(unq) * cut), 2]):]
trlist.append(sub[sub['Patient_ID'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test.to_csv(path + '/te_sample_raw.csv', header=True, index=False)
train.to_csv(path + '/tr_sample_raw.csv', header=True, index=False)
validation.to_csv(path + '/va_sample_raw.csv', header=True, index=False)
test_tiles = pd.DataFrame(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'L1path', 'L2path', 'L3path'])
train_tiles = pd.DataFrame(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'L1path', 'L2path', 'L3path'])
validation_tiles = pd.DataFrame(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'L1path', 'L2path', 'L3path'])
for idx, row in test.iterrows():
tile_ids = paired_tile_ids_in(row['Patient_ID'], row['Slide_ID'], row['Tumor'], row['label'], row['path'])
test_tiles = pd.concat([test_tiles, tile_ids])
for idx, row in train.iterrows():
tile_ids = paired_tile_ids_in(row['Patient_ID'], row['Slide_ID'], row['Tumor'], row['label'], row['path'])
train_tiles = pd.concat([train_tiles, tile_ids])
for idx, row in validation.iterrows():
tile_ids = paired_tile_ids_in(row['Patient_ID'], row['Slide_ID'], row['Tumor'], row['label'], row['path'])
validation_tiles = pd.concat([validation_tiles, tile_ids])
# No shuffle on test set
train_tiles = sku.shuffle(train_tiles)
validation_tiles = sku.shuffle(validation_tiles)
test_tiles = test_tiles.sort_values(by=['Tumor', 'Slide_ID'], ascending=True)
test_tiles.to_csv(path+'/te_sample_full.csv', header=True, index=False)
train_tiles.to_csv(path+'/tr_sample_full.csv', header=True, index=False)
validation_tiles.to_csv(path+'/va_sample_full.csv', header=True, index=False)
# TO KEEP SPLIT SAME AS BASELINES. seperate into training and testing; each type is the same separation
# ratio on big images test and train csv files contain tiles' path.
def set_sep_secondary(path, label_col, splitfile, exclude=None):
split = pd.read_csv(splitfile, header=0)
test = split[split['set'] == 'test']
test = test.drop(columns=['set'])
train = split[split['set'] == 'train']
train = train.drop(columns=['set'])
validation = split[split['set'] == 'validation']
validation = validation.drop(columns=['set'])
test.to_csv(path + '/te_sample_raw.csv', header=True, index=False)
train.to_csv(path + '/tr_sample_raw.csv', header=True, index=False)
validation.to_csv(path + '/va_sample_raw.csv', header=True, index=False)
test_tiles = | pd.DataFrame(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'L1path', 'L2path', 'L3path']) | pandas.DataFrame |
from numpy.ma import add
import pandas as pd
import numpy as np
np.seterr(divide='ignore')
import scipy.signal as signal
import scipy.stats as stats
import matplotlib.pyplot as plt
import statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels.stats.multitest as multi
from scipy.optimize import curve_fit
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from scipy.stats import percentileofscore
from scipy.stats import circstd, circmean
import copy
import itertools
from matplotlib.lines import Line2D
from random import sample
import os
from skopt.space import Space
from skopt.sampler import Lhs
def periodogram_df(df, folder = '', **kwargs):
names = list(df.test.unique())
names.sort()
for name in names:
x, y = np.array(df[df.test == name].x), np.array(df[df.test == name].y)
if folder:
save_to = os.path.join(folder, "per_" + name)
else:
save_to = ""
periodogram(x,y, save_to = save_to, name=name, **kwargs)
def periodogram(X, Y, per_type='per', sampling_f = '', logscale = False, name = '', save_to = '', prominent = False, max_per = 240):
if per_type == 'per' or per_type == 'welch':
X_u = np.unique(X)
Y_u = []
for x_u in X_u:
#y_u.append(np.mean(y[t == x]))
Y_u.append(np.median(Y[x_u == X]))
if not sampling_f:
sampling_f = 1/(X[1]-X[0])
Y = Y_u
if per_type == 'per':
# Fourier
f, Pxx_den = signal.periodogram(Y,sampling_f)
elif per_type =='welch':
# Welch
f, Pxx_den = signal.welch(Y,sampling_f)
elif per_type == 'lombscargle':
# Lomb-Scargle
min_per = 2
#max_per = 50
f = np.linspace(1/max_per, 1/min_per, 10)
Pxx_den = signal.lombscargle(X, Y, f)
else:
print("Invalid option")
return
# significance
# Refinetti et al. 2007
p_t = 0.05
N = len(Y)
T = (1 - (p_t/N)**(1/(N-1))) * sum(Pxx_den) # threshold for significance
if f[0] == 0:
per = 1/f[1:]
Pxx = Pxx_den[1:]
else:
per = 1/f
Pxx = Pxx_den
Pxx = Pxx[per <= max_per]
per = per[per <= max_per]
try:
if logscale:
plt.semilogx(per, Pxx, 'ko')
plt.semilogx(per, Pxx, 'k--', linewidth=0.5)
plt.semilogx([min(per), max(per)], [T, T], 'k--', linewidth=1)
else:
plt.plot(per, Pxx, 'ko')
plt.plot(per, Pxx, 'k--', linewidth=0.5)
plt.plot([min(per), max(per)], [T, T], 'k--', linewidth=1)
except:
print("Could not plot!")
return
peak_label = ''
if prominent:
locs, heights = signal.find_peaks(Pxx, height = T)
if any(locs):
heights = heights['peak_heights']
s = list(zip(heights, locs))
s.sort(reverse=True)
heights, locs = zip(*s)
heights = np.array(heights)
locs = np.array(locs)
peak_label = ', max peak=' + str(per[locs[0]])
else:
locs = Pxx >= T
if any(locs):
heights, locs = Pxx[locs], per[locs]
HL = list(zip(heights, locs))
HL.sort(reverse = True)
heights, locs = zip(*HL)
peak_label = ', peaks=\n'
locs = locs[:11]
for loc in locs[:-1]:
peak_label += "{:.2f}".format(loc) + ','
peak_label += "{:.2f}".format(locs[-1])
plt.xlabel('period [hours]')
plt.ylabel('PSD')
plt.title(name + peak_label)
if save_to:
plt.savefig(save_to+'.pdf')
plt.savefig(save_to+'.png')
plt.close()
else:
plt.show()
def remove_lin_comp_df(df, n_components = 0, period = 24, summary_file=""):
df2 = pd.DataFrame(columns=df.columns)
if summary_file:
df_fit = pd.DataFrame(columns=['test', 'k', 'CI', 'p', 'q'])
for test in df.test.unique():
x,y = df[df['test']==test].x,df[df['test']==test].y
x,y,fit = remove_lin_comp(x,y,n_components=n_components, period=period, return_fit=True)
df_tmp = pd.DataFrame(columns=df.columns)
df_tmp['x'] = x
df_tmp['y'] = y
df_tmp['test'] = test
df2 = df2.append(df_tmp, ignore_index=True)
if summary_file:
fit['test'] = test
df_fit=df_fit.append(fit, ignore_index=True)
if summary_file:
df_fit.q = multi.multipletests(df_fit.p, method = 'fdr_bh')[1]
if summary_file.endswith("csv"):
df_fit.to_csv(summary_file, index=False)
elif summary_file.endswith("xlsx"):
df_fit.to_excel(summary_file, index=False)
return df2
def remove_lin_comp(X, Y, n_components = 0, period = 24, return_fit=False):
X = np.array(X)
Y = np.array(Y)
X_fit = generate_independents(X, n_components = n_components, period = period, lin_comp = True)
model = sm.OLS(Y, X_fit)
results = model.fit()
CIs = results.conf_int()
if type(CIs) != np.ndarray:
CIs = CIs.values
CI = CIs[1]
#A = results.params[0]
k = results.params[1]
"""
X_lin = np.zeros(X_fit.shape)
X_lin[:,1] = X_fit[:,1]
Y_lin = results.predict(X_lin)
Y = Y-Y_lin
"""
#Y_fit = results.predict(X_fir)
#Y = Y - Y_fit
#Y = Y - A - k*X
if CI[0] * CI[1] > 0: # if both CIs hve the same sign
Y = Y - k*X
if return_fit:
fit = {}
fit['k'] = results.params[1]
fit['CI'] = CI
fit['p'] = results.pvalues[1]
return X,Y,fit
"""
X_fit = generate_independents(X, n_components = n_components, period = period, lin_comp = False)
model = sm.OLS(Y, X_fit)
results = model.fit()
plt.plot(X, results.fittedvalues, color="black")
"""
return X, Y
# prepare the independent variables
def generate_independents(X, n_components = 3, period = 24, lin_comp = False, remove_lin_comp = False):
if n_components == 0:
X_fit = X
lin_comp = True
else:
for i in np.arange(n_components):
n = i+1
A = np.sin((X/(period/n))*np.pi*2)
B = np.cos((X/(period/n))*np.pi*2)
if not i:
X_fit = np.column_stack((A, B))
else:
X_fit = np.column_stack((X_fit, np.column_stack((A, B))))
if lin_comp and n_components:
X_fit = np.column_stack((X, X_fit))
if remove_lin_comp:
X_fit[:,0] = 0
X_fit = sm.add_constant(X_fit, has_constant='add')
return X_fit
# prepare the independent variables for limorhyde
def generate_independents_compare(X1, X2, n_components1 = 3, period1 = 24, n_components2 = 3, period2 = 24, lin_comp = False, non_rhythmic=False, remove_lin_comp=False):
H1 = np.zeros(X1.size)
H2 = np.ones(X2.size)
X = np.concatenate((X1, X2))
H_i = np.concatenate((H1, H2))
X_i = H_i * X
for i in np.arange(n_components1):
n = i+1
A = np.sin((X/(period1/n))*np.pi*2)
B = np.cos((X/(period1/n))*np.pi*2)
if not i:
X_fit = np.column_stack((A, B))
else:
X_fit = np.column_stack((X_fit, np.column_stack((A, B))))
if non_rhythmic:
X_fit = np.column_stack((X_fit, H_i))
else:
for i in np.arange(n_components2):
n = i+1
A_i = H_i * np.sin((X/(period2/n))*np.pi*2)
B_i = H_i * np.cos((X/(period2/n))*np.pi*2)
X_fit = np.column_stack((X_fit, np.column_stack((A_i, B_i))))
X_fit = np.column_stack((X_fit, H_i))
if lin_comp:
X_fit = np.column_stack((X_i, X_fit))
X_fit = np.column_stack((X, X_fit))
if remove_lin_comp:
X_fit[:,0] = 0
X_fit[:,1] = 0
X_fit = sm.add_constant(X_fit, has_constant='add')
return X_fit
"""
*****************************
* start of finding the best *
*****************************
"""
def get_best_fits(df_results, criterium = 'R2_adj', reverse = False, n_components = []):
df_best = pd.DataFrame(columns = df_results.columns, dtype=float)
names = np.unique(df_results.test)
for name in names:
if n_components:
for n_comp in n_components:
if reverse:
M = df_results[(df_results.test == name) & (df_results.n_components == n_comp)][criterium].min()
else:
M = df_results[(df_results.test == name) & (df_results.n_components == n_comp)][criterium].max()
df_best = df_best.append(df_results[(df_results.test == name) & (df_results.n_components == n_comp) & (df_results[criterium] == M)], ignore_index = True)
else:
M = df_results[df_results.test == name][criterium].max()
df_best = df_best.append(df_results[(df_results.test == name) & (df_results[criterium] == M)], ignore_index = True)
return df_best
def get_best_models_population(df, df_models, n_components = [1,2,3], lin_comp = False, criterium = 'RSS', reverse = True):
names = np.unique(df_models.test)
df_best = pd.DataFrame(columns = df_models.columns, dtype=float)
df_models = get_best_fits(df_models, criterium = criterium, reverse = reverse, n_components=n_components)
for test in names:
n_points = df[df.test.str.startswith(test)].x.shape[0] # razlika med get_best_models in get_best_models_population
df_test_models = df_models[df_models.test == test]
df_test_models = df_test_models.sort_values(by=['n_components'])
i = 0
for new_row in df_test_models.iterrows():
if i == 0:
best_row = new_row
i = 1
else:
RSS_reduced = best_row[1].RSS
RSS_full = new_row[1].RSS
DF_reduced = n_points - (best_row[1].n_components * 2 + 1)
DF_full = n_points - (new_row[1].n_components * 2 + 1)
if lin_comp:
DF_reduced -= 1
DF_full -= 1
#print (test, old_row[1].n_components, new_row[1].n_components)
if compare_models(RSS_reduced, RSS_full, DF_reduced, DF_full) < 0.05:
best_row = new_row
df_best = df_best.append(best_row[1], ignore_index=True)
return df_best
# compare two models according to the F-test
# http://people.reed.edu/~jones/Courses/P24.pdf
# https://www.graphpad.com/guides/prism/7/curve-fitting/index.htm?reg_howtheftestworks.htm
def get_best_models(df, df_models, n_components = [1,2,3], lin_comp = False, criterium='p', reverse = True):
names = np.unique(df_models.test)
df_best = pd.DataFrame(columns = df_models.columns, dtype=float)
df_models = get_best_fits(df_models, n_components = n_components, criterium=criterium, reverse = reverse)
for test in names:
n_points = df[df.test == test].x.shape[0]
df_test_models = df_models[df_models.test == test]
df_test_models = df_test_models.sort_values(by=['n_components'])
i = 0
for new_row in df_test_models.iterrows():
if i == 0:
best_row = new_row
i = 1
else:
RSS_reduced = best_row[1].RSS
RSS_full = new_row[1].RSS
DF_reduced = n_points - (best_row[1].n_components * 2 + 1)
DF_full = n_points - (new_row[1].n_components * 2 + 1)
if lin_comp:
DF_reduced -= 1
DF_full -= 1
#print (test, old_row[1].n_components, new_row[1].n_components)
if compare_models(RSS_reduced, RSS_full, DF_reduced, DF_full) < 0.05:
best_row = new_row
df_best = df_best.append(best_row[1], ignore_index=True)
return df_best
"""
***************************
* end of finding the best *
***************************
"""
"""
************
* plotting *
************
"""
def plot_data(df, names = [], folder = '', prefix = '', color='black'):
if not names:
names = np.unique(df.test)
for test in names:
X, Y = np.array(df[df.test == test].x), np.array(df[df.test == test].y)
plt.plot(X,Y,'o', markersize=1, color=color)
plt.title(test)
#test = test.replace("$","")
#fig = plt.gcf()
#fig.set_size_inches(11,8)
if folder:
plt.savefig(os.path.join(folder, prefix+test+'.png'))
plt.savefig(os.path.join(folder, prefix+test+'.pdf'))
plt.close()
else:
plt.show()
def plot_data_pairs(df, names, folder = '', prefix ='', color1='black', color2='red'):
for test1, test2 in names:
X1, Y1 = np.array(df[df.test == test1].x), np.array(df[df.test == test1].y)
X2, Y2 = np.array(df[df.test == test2].x), np.array(df[df.test == test2].y)
plt.plot(X1,Y1,'o', color=color1, markersize=1, label=test1)
plt.plot(X2,Y2,'o', color=color2, markersize=1, label=test2)
plt.legend()
plt.title(test1 + ' vs. ' + test2)
if folder:
plt.savefig(os.path.join(folder,prefix+test1+'_'+test2+'.png'))
plt.savefig(os.path.join(folder,prefix+test1+'_'+test2+'.pdf'))
plt.close()
else:
plt.show()
def plot_components(X, Y, n_components = 3, period = 24, name = '', save_to = ''):
A = np.sin((X/period)*np.pi*2)
B = np.cos((X/period)*np.pi*2)
C = np.sin((X/(period/2))*np.pi*2)
D = np.cos((X/(period/2))*np.pi*2)
E = np.sin((X/(period/3))*np.pi*2)
F = np.cos((X/(period/3))*np.pi*2)
#G = np.sin((X/(period/4))*np.pi*2)
#H = np.cos((X/(period/4))*np.pi*2)
fig, axs = plt.subplots(n_components, 2, constrained_layout=True)
fig.suptitle(name, fontsize=16)
axs[0,0].plot(A, Y,'.')
axs[0,0].set(xlabel = 'sin((x/'+str(period)+') * 2$\pi$)')
axs[0,1].plot(B, Y,'.')
axs[0,1].set(xlabel = 'cos((x/'+str(period)+') * 2$\pi$)')
if n_components >= 2:
axs[1,0].plot(C, Y,'.')
axs[1,0].set(xlabel = 'sin((x/'+str(period/2)+') * 2$\pi$)')
axs[1,1].plot(D, Y,'.')
axs[1,1].set(xlabel = 'cos((x/'+str(period/2)+') * 2$\pi$)')
if n_components == 3:
axs[2,0].plot(E, Y,'.')
axs[2,0].set(xlabel = 'sin((x/'+str(period/3)+') * 2$\pi$)')
axs[2,1].plot(F, Y,'.')
axs[2,1].set(xlabel = 'cos((x/'+str(period/3)+') * 2$\pi$)')
if n_components == 4:
axs[3,0].plot(E, Y,'.')
axs[3,0].set(xlabel = 'sin((x/'+str(period/4)+') * 2$\pi$)')
axs[3,1].plot(F, Y,'.')
axs[3,1].set(xlabel = 'cos((x/'+str(period/4)+') * 2$\pi$)')
for ax in axs.flat:
ax.set(ylabel = 'y')
if save_to:
plt.savefig(save_to+'.pdf')
plt.savefig(save_to+'.png')
plt.close()
else:
plt.show()
def plot_phases(acrs, amps, tests, period=24, colors = ("black", "red", "green", "blue"), folder = "", prefix="", legend=True, CI_acrs = [], CI_amps = [], linestyles = [], title = "", labels = []):#, plot_measurements = False, measurements=None):
acrs = np.array(acrs, dtype = float)
amps = np.array(amps, dtype = float)
if colors and len(colors) < len(tests):
colors += ("black",) * (len(tests)-len(colors))
x = np.arange(0, 2*np.pi, np.pi/4)
x_labels = list(map(lambda i: 'CT ' + str(i) + " ", list((x/(2*np.pi) * period).astype(int))))
x_labels[1::2] = [""]*len(x_labels[1::2])
ampM = np.max(amps)
amps /= ampM
acrs = -acrs
fig = plt.figure()
ax = fig.add_subplot(projection='polar')
ax.set_theta_offset(0.5*np.pi)
ax.set_theta_direction(-1)
lines = []
for i, (acr, amp, test, color) in enumerate(zip(acrs, amps, tests, colors)):
"""
if "LDL" in test:
color = "#FF0000"
elif "HDL" in test:
color = "#0000FF"
elif "CHOL" in test:
color = "#00FF00"
elif "control" in test.lower():
color = "#000000"
else:
color = "#0000FF"
"""
if linestyles:
#ax.plot([acr, acr], [0, amp], label=test, color=color, linestyle = linestyles[i])
ax.annotate("", xy=(acr, amp), xytext=(0, 0), arrowprops=dict(arrowstyle="->", color=color, alpha = 0.75, linewidth=2, linestyle = linestyles[i]) )
lines.append(Line2D([0], [0], color=color, linewidth=2, linestyle=linestyles[i]))
else:
#ax.plot([acr, acr], [0, amp], label=test, color=color)
ax.annotate("", xy=(acr, amp), xytext=(0, 0), arrowprops=dict(arrowstyle="->", color=color, alpha = 0.75, linewidth=2) )
lines.append(Line2D([0], [0], color=color, linewidth=2))
#ax.plot([acr, acr], [0, amp], label=test, color=color)
#ax.annotate("", xy=(acr, amp), xytext=(0, 0), arrowprops=dict(arrowstyle="->", color=color, linewidth=2) )
if CI_acrs and CI_amps:
amp_l, amp_u = np.array(CI_amps[i])/ampM
amp_l = max(0, amp_l)
amp_u = min(1, amp_u)
acr_l, acr_u = -np.array(CI_acrs[i])
if acr_l - acr_u > 2*np.pi:
plt.fill_between(np.linspace(0, np.pi*2, 1000), amp_l, amp_u, color=color, alpha=0.1)
elif acr_u < acr_l:
acr_l, acr_u = acr_u, acr_l
plt.fill_between(np.linspace(acr_l, acr_u, 1000), amp_l, amp_u, color=color, alpha=0.1)
ax.set_rmax(1)
ax.set_rticks([0.5]) # Less radial ticks
ax.set_yticklabels([""])
ax.set_xticks(x)
ax.set_xticklabels(x_labels)
ax.grid(True)
ax.set_facecolor('#f0f0f0')
"""
for i, (acr, amp, test, color) in enumerate(zip(acrs, amps, tests, colors)):
if plot_measurements:
try:
x,y = measurements
except:
df = measurements
x,y=df[df.test == test].x, df[df.test == test].y
plt.plot(x,y,'o',markersize=1, alpha = 0.75, color=color)
"""
name = "_".join(tests)
#ax.set_title(name, va='bottom')
if title:
ax.set_title(title, va='bottom')
else:
ax.set_title(name, va='bottom')
if legend:
if labels:
plt.legend(lines, labels, bbox_to_anchor=(1.0, 1), loc='upper left', borderaxespad=0., frameon=False)
else:
plt.legend(lines, tests, bbox_to_anchor=(1.0, 1), loc='upper left', borderaxespad=0., frameon=False)
#ax.legend()
if folder:
plt.savefig(os.path.join(folder,prefix+name+"_phase.pdf"))
plt.savefig(os.path.join(folder,prefix+name+"_phase.png"))
plt.close()
else:
plt.show()
"""
*******************
* end of plotting *
*******************
"""
"""
*****************************
* start of fitting wrappers *
*****************************
"""
def fit_group(df, n_components = 2, period = 24, names = "", folder = '', prefix='', **kwargs):
df_results = pd.DataFrame(columns = ['test', 'period', 'n_components', 'p', 'q', 'p_reject', 'q_reject', 'RSS', 'R2', 'R2_adj', 'log-likelihood', 'amplitude', 'acrophase', 'mesor', 'peaks', 'heights', 'troughs', 'heights2'], dtype=float)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
if not any(names):
names = np.unique(df.test)
for test in names:
for n_comps in n_components:
for per in period:
if n_comps == 0:
per = 100000
X, Y = np.array(df[df.test == test].x), np.array(df[df.test == test].y)
if folder:
save_to = os.path.join(folder,prefix+test+'_compnts='+str(n_comps) +'_per=' + str(per))
else:
save_to = ''
results, statistics, rhythm_param, _, _ = fit_me(X, Y, n_components = n_comps, period = per, name = test, save_to = save_to, **kwargs)
try:
R2, R2_adj = results.rsquared,results.rsquared_adj
except:
R2, R2_adj = np.nan, np.nan
df_results = df_results.append({'test': test,
'period': per,
'n_components': n_comps,
'p': statistics['p'],
'p_reject': statistics['p_reject'],
'RSS': statistics['RSS'],
'R2': R2,
'R2_adj': R2_adj,
'ME': statistics['ME'],
'resid_SE': statistics['resid_SE'],
'log-likelihood': results.llf,
'amplitude': rhythm_param['amplitude'],
'acrophase': rhythm_param['acrophase'],
'mesor': rhythm_param['mesor'],
'peaks': rhythm_param['peaks'],
'heights': rhythm_param['heights'],
'troughs': rhythm_param['troughs'],
'heights2': rhythm_param['heights2']
}, ignore_index=True)
if n_comps == 0:
break
df_results.q = multi.multipletests(df_results.p, method = 'fdr_bh')[1]
df_results.q_reject = multi.multipletests(df_results.p_reject, method = 'fdr_bh')[1]
return df_results
def population_fit_group(df, n_components = 2, period = 24, folder = '', prefix='', names = [], **kwargs):
df_results = pd.DataFrame(columns = ['test', 'period', 'n_components', 'p', 'q', 'p_reject', 'q_reject', 'RSS', 'amplitude', 'acrophase', 'mesor'], dtype=float)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
if not any(names):
names = np.unique(df.test)
names = list(set(list(map(lambda x:x.split('_rep')[0], names))))
names.sort()
for name in set(names):
for n_comps in n_components:
for per in period:
if n_comps == 0:
per = 100000
df_pop = df[df.test.str.startswith(name)]
if folder:
save_to=os.path.join(folder,prefix+name+'_compnts='+str(n_comps) +'_per=' + str(per))
_, statistics, _, rhythm_params, _ = population_fit(df_pop, n_components = n_comps, period = per, save_to = save_to, **kwargs)
else:
_, statistics, _, rhythm_params, _ = population_fit(df_pop, n_components = n_comps, period = per, **kwargs)
df_results = df_results.append({'test': name,
'period': per,
'n_components': n_comps,
'p': statistics['p'],
'p_reject': statistics['p_reject'],
'RSS': statistics['RSS'],
'ME': statistics['ME'],
'resid_SE': statistics['resid_SE'],
'amplitude': rhythm_params['amplitude'],
'acrophase': rhythm_params['acrophase'],
'mesor': rhythm_params['mesor']}, ignore_index=True)
if n_comps == 0:
break
df_results.q = multi.multipletests(df_results.p, method = 'fdr_bh')[1]
df_results.q_reject = multi.multipletests(df_results.p_reject, method = 'fdr_bh')[1]
return df_results
"""
***************************
* end of fitting wrappers *
***************************
"""
"""
******************************
* start of fitting functions *
******************************
"""
def population_fit(df_pop, n_components = 2, period = 24, lin_comp= False, model_type = 'lin', plot = True, plot_measurements=True, plot_individuals=True, plot_margins=True, hold = False, save_to = '', x_label='', y_label='', return_individual_params = False, params_CI = False, samples_per_param_CI=5, max_samples_CI = 1000, sampling_type = "LHS", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], color="black", **kwargs):
if return_individual_params:
ind_params = {}
for param in parameters_to_analyse:
ind_params[param] = []
params = -1
tests = df_pop.test.unique()
k = len(tests)
#X_test = np.linspace(0, 2*period, 1000)
#X_fit_eval_params = generate_independents(X_test, n_components = n_components, period = period, lin_comp = lin_comp)
#if lin_comp:
# X_fit_eval_params[:,1] = 0
min_X = np.min(df_pop.x.values)
max_X = np.max(df_pop.x.values)
min_Y = np.min(df_pop.y.values)
max_Y = np.max(df_pop.y.values)
if plot:
if plot_measurements:
X_plot = np.linspace(min(min_X,0), 1.1*max(max_X,period), 1000)
else:
X_plot = np.linspace(0, 1.1*period, 1000)
X_plot_fits = generate_independents(X_plot, n_components = n_components, period = period, lin_comp = lin_comp)
#if lin_comp:
# X_plot_fits[:,1] = 0
"""
min_X = 1000
max_X = 0
min_Y = 1000
max_Y = 0
min_X_test = np.min(X_test)
"""
min_Y_test = 1000
max_Y_test = 0
for test in tests:
x,y = np.array(df_pop[df_pop.test == test].x), np.array(df_pop[df_pop.test == test].y)
"""
min_X = min(min_X, np.min(x))
max_X = max(max_X, np.max(x))
min_Y = min(min_Y, np.min(y))
max_Y = max(max_Y, np.max(y))
"""
results, statistics, rhythm_params, X_test, Y_test, model = fit_me(x, y, n_components = n_components, period = period, plot = False, return_model = True, lin_comp=lin_comp, **kwargs)
X_fit_eval_params = generate_independents(X_test, n_components = n_components, period = period, lin_comp = lin_comp, remove_lin_comp=True)
if lin_comp:
X_fit_eval_params[:,1] = 0
if return_individual_params:
Y_eval_params = results.predict(X_fit_eval_params)
rhythm_ind_params = evaluate_rhythm_params(X_test, Y_eval_params, period=period)
for param in parameters_to_analyse:
ind_params[param].append(rhythm_ind_params[param])
if (plot and plot_individuals):
#Y_eval_params = results.predict(X_fit_eval_params)
Y_plot_fits = results.predict(X_plot_fits)
if (plot and plot_individuals):
if not hold:
plt.plot(X_plot,Y_plot_fits,color=color, alpha=0.25, label=test)
else:
plt.plot(X_plot,Y_plot_fits,color=color, alpha=0.25)
min_Y_test = min(min_Y_test, np.min(Y_plot_fits))
max_Y_test = max(max_Y_test, np.max(Y_plot_fits))
if plot and plot_measurements:
plt.plot(x,y,'o', color=color, markersize=1)
if type(params) == int:
params = results.params
if plot and plot_margins:
#_, lowers, uppers = wls_prediction_std(results, exog=X_fit_eval_params, alpha=0.05)
Y_plot_fits_all = Y_plot_fits
else:
params = np.vstack([params, results.params])
if plot and plot_margins:
#_, l, u = wls_prediction_std(results, exog=X_fit_eval_params, alpha=0.05)
#lowers = np.vstack([lowers, l])
#uppers = np.vstack([uppers, u])
Y_plot_fits_all = np.vstack([Y_plot_fits_all, Y_plot_fits])
# parameter statistics: means, variances, stadndard deviations, confidence intervals, p-values
#http://reliawiki.com/index.php/Multiple_Linear_Regression_Analysis
if k > 1:
means = np.mean(params, axis=0)
variances = np.sum((params-np.mean(params, axis=0))**2, axis = 0)/(k-1) # np.var(params, axis=0) # isto kot var z ddof=k-1
sd = variances**0.5
se = sd/((k-1)**0.5)
T0 = means/se
p_values = 2 * (1 - stats.t.cdf(abs(T0), k-1))
t = abs(stats.t.ppf(0.05/2,df=k-1))
lower_CI = means - ((t*sd)/((k-1)**0.5))
upper_CI = means + ((t*sd)/((k-1)**0.5))
results.initialize(model, means)
else:
means = params
sd = np.zeros(len(params))
sd[:] = np.nan
se = np.zeros(len(params))
se[:] = np.nan
lower_CI = means
upper_CI = means
p_values = np.zeros(len(params))
p_values[:] = np.nan
x,y = df_pop.x, df_pop.y
xy = list(zip(x,y))
xy.sort()
x,y = zip(*xy)
x,y = np.array(x), np.array(y)
X_fit = generate_independents(x, n_components = n_components, period = period, lin_comp = lin_comp)
Y_fit = results.predict(X_fit)
Y_eval_params = results.predict(X_fit_eval_params)
rhythm_params = evaluate_rhythm_params(X_test, Y_eval_params, period=period)
if plot:
pop_name = "_".join(test.split("_")[:-1])
Y_plot_fits = results.predict(X_plot_fits)
if not hold:
plt.plot(X_plot,Y_plot_fits, color=color, label="population fit")
else:
plt.plot(X_plot,Y_plot_fits, color=color, label=pop_name)
plt.legend()
if x_label:
plt.xlabel(x_label)
else:
plt.xlabel('time [h]')
if y_label:
plt.ylabel(y_label)
else:
plt.ylabel('measurements')
min_Y_test = min(min_Y_test, np.min(Y_eval_params))
max_Y_test = max(max_Y_test, np.max(Y_eval_params))
if plot and plot_margins:
if k == 1:
_, lower, upper = wls_prediction_std(results, exog=X_plot_fits, alpha=0.05)
else:
#lower = np.mean(lowers, axis=0)
#upper = np.mean(uppers, axis=0)
var_Y = np.var(Y_plot_fits_all, axis=0, ddof = k-1)
sd_Y = var_Y**0.5
lower = Y_plot_fits - ((t*sd_Y)/((k-1)**0.5))
upper = Y_plot_fits + ((t*sd_Y)/((k-1)**0.5))
plt.fill_between(X_plot, lower, upper, color=color, alpha=0.1)
if plot:
if plot_measurements:
if model_type == 'lin':
plt.axis([min(min_X,0), 1.1*max(max_X,period), 0.9*min(min_Y, min_Y_test), 1.1*max(max_Y, max_Y_test)])
else:
plt.axis([min(min_X,0), max_X, 0.9*min(min_Y, min_Y_test), 1.1*max(max_Y, max_Y_test)])
else:
plt.axis([0, period, min_Y_test*0.9, max_Y_test*1.1])
if plot:
#pop_name = "_".join(test.split("_")[:-1])
if not hold:
plt.title(pop_name + ', p-value=' + "{0:.5f}".format(statistics['p']))
if save_to:
plt.savefig(save_to+'.png')
plt.savefig(save_to+'.pdf')
plt.close()
else:
plt.show()
statistics = calculate_statistics(x, y, Y_fit, n_components, period, lin_comp)
statistics_params = {'values': means,
'SE': se,
'CI': (lower_CI, upper_CI),
'p-values': p_values}
if params_CI:
population_eval_params_CI(X_test, X_fit_eval_params, results, statistics_params, rhythm_params, samples_per_param=samples_per_param_CI, max_samples = max_samples_CI, k=k, sampling_type=sampling_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, period=period)
if return_individual_params:
return params, statistics, statistics_params, rhythm_params, results, ind_params
else:
return params, statistics, statistics_params, rhythm_params, results
def fit_me(X, Y, n_components = 2, period = 24, lin_comp = False, model_type = 'lin', alpha = 0, name = '', save_to = '', plot=True, plot_residuals=False, plot_measurements=True, plot_margins=True, return_model = False, color = False, plot_phase = True, hold=False, x_label = "", y_label = "", rescale_to_period=False, bootstrap=False, bootstrap_size=1000, bootstrap_type="std", params_CI = False, samples_per_param_CI=5, max_samples_CI = 1000, sampling_type="LHS", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase']):
#print(lin_comp)
"""
###
# prepare the independent variables
###
"""
"""
if n_components == 0:
X_fit = X
X_fit_test = X_test
lin_comp = True
else:
for i in np.arange(n_components):
n = i+1
A = np.sin((X/(period/n))*np.pi*2)
B = np.cos((X/(period/n))*np.pi*2)
A_test = np.sin((X_test/(period/n))*np.pi*2)
B_test = np.cos((X_test/(period/n))*np.pi*2)
if not i:
X_fit = np.column_stack((A, B))
X_fit_test = np.column_stack((A_test, B_test))
else:
X_fit = np.column_stack((X_fit, np.column_stack((A, B))))
X_fit_test = np.column_stack((X_fit_test, np.column_stack((A_test, B_test))))
"""
X_fit = generate_independents(X, n_components=n_components, period=period, lin_comp=lin_comp)
#X_fit_eval_params = X_fit_test
#if lin_comp and n_components:
# X_fit = np.column_stack((X, X_fit))
# X_fit_eval_params = np.column_stack((np.zeros(len(X_test)), X_fit_test))
# X_fit_test = np.column_stack((X_test, X_fit_test))
#X_fit = sm.add_constant(X_fit, has_constant='add')
#X_fit_test = sm.add_constant(X_fit_test, has_constant='add')
#X_fit_eval_params = sm.add_constant(X_fit_eval_params, has_constant='add')
"""
###
# fit
###
"""
if model_type == 'lin':
model = sm.OLS(Y, X_fit)
results = model.fit()
elif model_type == 'poisson':
#model = sm.GLM(Y, X_fit, family=sm.families.Poisson())
model = statsmodels.discrete.discrete_model.Poisson(Y, X_fit)
results = model.fit(disp=0)
elif model_type =='gen_poisson':
#model = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y, X_fit)
model = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y, X_fit, p=1)
results = model.fit(disp=0)
elif model_type == 'nb':
# https://towardsdatascience.com/negative-binomial-regression-f99031bb25b4
# https://dius.com.au/2017/08/03/using-statsmodels-glms-to-model-beverage-consumption/#cameron
# if not alpha:
# train_model = sm.GLM(Y, X_fit, family=sm.families.Poisson())
# train_results = train_model.fit()
# df_train = pd.DataFrame()
# df_train['Y'] = Y
# df_train['mu'] = train_results.mu
# df_train['AUX_OLS_DEP'] = df_train.apply(lambda x: ((x['Y'] - x['mu'])**2 - x['Y']) / x['mu'], axis=1)
# ols_expr = """AUX_OLS_DEP ~ mu - 1"""
# aux_olsr_results = smf.ols(ols_expr, df_train).fit()
# alpha=aux_olsr_results.params[0]
#model = sm.GLM(Y, X_fit, family=sm.families.NegativeBinomial(alpha=alpha))
model = statsmodels.discrete.discrete_model.NegativeBinomialP(Y, X_fit, p=1)
results = model.fit(disp=0)
else:
print("Invalid option")
return
if model_type =='lin':
Y_fit = results.fittedvalues
else:
Y_fit = results.predict(X_fit)
if model_type in ['lin', 'poisson', 'nb']:
statistics = calculate_statistics(X, Y, Y_fit, n_components, period, lin_comp)
if model_type in ['poisson', 'nb']:
statistics['count'] = np.sum(Y)
else:
RSS = sum((Y - Y_fit)**2)
p = results.llr_pvalue
statistics = {'p':p, 'RSS':RSS, 'count': np.sum(Y)}
#Y_test = results.predict(X_fit_test)
X_test = np.linspace(0, 2*period, 1000)
X_fit_test = generate_independents(X_test, n_components=n_components, period=period, lin_comp=lin_comp, remove_lin_comp = True)
Y_fit_test = results.predict(X_fit_test)
rhythm_params = evaluate_rhythm_params(X_test, Y_fit_test, period=period)
if lin_comp:
rhythm_params['lin_comp'] = results.params[1]
CIs = results.conf_int()
if type(CIs) != np.ndarray:
rhythm_params['CI(lin_comp)'] = CIs.values[1]
else:
rhythm_params['CI(lin_comp)'] = CIs[1]
rhythm_params['p(lin_comp)'] = results.pvalues[1]
#print(rhythm_params['p(lin_comp)'])
"""
###
# plot
###
"""
if plot:
if plot_measurements:
min_X = np.min(X)
max_X = np.max(X)
else:
min_X = 0
max_X = period
X_plot = np.linspace(min_X, max_X, 1000)
X_plot_fits = generate_independents(X_plot, n_components=n_components, period=period, lin_comp=lin_comp)
Y_plot = results.predict(X_plot_fits)
###
if not color:
color = 'black'
if plot_measurements:
if not hold:
plt.plot(X,Y, 'ko', markersize=1, label = 'data', color=color)
else:
plt.plot(X,Y, 'ko', markersize=1, color=color)
if not hold:
plt.plot(X_plot, Y_plot, 'k', label = 'fit', color=color)
else:
plt.plot(X_plot, Y_plot, 'k', label = name, color=color)
# plot measurements
if plot_measurements:
if rescale_to_period:
X = X % period
if model_type == 'lin':
plt.axis([min_X, max_X, 0.9*min(min(Y), min(Y_plot)), 1.1*max(max(Y), max(Y_plot))])
else:
plt.axis([min_X, max_X, 0.9*min(min(Y), min(Y_plot)), 1.1*max(max(Y), max(Y_plot))])
else:
plt.axis([min_X, max_X, min(Y_plot)*0.9, max(Y_plot)*1.1])
if name:
plt.title(name)
"""
if model_type == 'lin':
if name:
plt.title(name + ', p-value=' + "{0:.5f}".format(statistics['p']))
else:
plt.title('p-value=' + "{0:.5f}".format(statistics['p']))
else:
if name:
plt.title(name + ', p-value=' + '{0:.3f}'.format(statistics['p']) + ' (n='+str(statistics['count'])+ ')')
else:
plt.title('p-value=' + '{0:.3f}'.format(statistics['p']) + ' (n='+str(statistics['count'])+ ')')
"""
if x_label:
plt.xlabel(x_label)
else:
plt.xlabel('Time [h]')
if y_label:
plt.ylabel(y_label)
elif model_type == 'lin':
plt.ylabel('Measurements')
else:
plt.ylabel('Count')
# plot confidence intervals
if plot_margins:
if model_type == 'lin':
_, lower, upper = wls_prediction_std(results, exog=X_plot_fits, alpha=0.05)
if color:
plt.fill_between(X_plot, lower, upper, color=color, alpha=0.1)
else:
plt.fill_between(X_plot, lower, upper, color='#888888', alpha=0.1)
else:
# calculate and draw plots from the combinations of parameters from the 95 % confidence intervals of assessed parameters
res2 = copy.deepcopy(results)
params = res2.params
CIs = results.conf_int()
if type(CIs) != np.ndarray:
CIs = CIs.values
#N = 512
N = 1024
if n_components == 1:
N2 = 10
elif n_components == 2:
N2 = 8
else:
N2 = 10 - n_components
P = np.zeros((len(params), N2))
for i, CI in enumerate(CIs):
P[i,:] = np.linspace(CI[0], CI[1], N2)
n_param_samples = P.shape[1]**P.shape[0]
N = n_param_samples #min(max_samples_CI, n_param_samples)
if n_param_samples < 10**6:
params_samples = np.random.choice(n_param_samples, size=N, replace=False)
else:
params_samples = my_random_choice(max_val=n_param_samples, size=N)
for i,idx in enumerate(params_samples):
p = lazy_prod(idx, P)
res2.initialize(results.model, p)
Y_test_CI = res2.predict(X_plot_fits)
if plot and plot_margins:
if color and color != '#000000':
plt.plot(X_plot, Y_test_CI, color=color, alpha=0.05)
else:
plt.plot(X_plot, Y_test_CI, color='#888888', alpha=0.05)
if not hold:
if save_to:
plt.savefig(save_to+'.png')
plt.savefig(save_to+'.pdf')
plt.close()
else:
plt.show()
if plot_residuals:
resid = results.resid
sm.qqplot(resid)
plt.title(name)
if save_to:
plt.savefig(save_to+'_resid.pdf', bbox_inches='tight')
plt.savefig(save_to+'_resid.png')
plt.close()
else:
plt.show()
if plot_phase:
per = rhythm_params['period']
amp = rhythm_params['amplitude']
phase = rhythm_params['acrophase']
if save_to:
folder = os.path.join(*os.path.split(save_to)[:-1])
plot_phases([phase], [amp], [name], period=per, folder=folder)
else:
plot_phases([phase], [amp], [name], period=per)#, plot_measurements=True, measurements=[X,Y])
if bootstrap:
eval_params_bootstrap(X, X_fit, X_test, X_fit_test, Y, model_type = model_type, rhythm_params=rhythm_params, bootstrap_size=bootstrap_size, bootstrap_type=bootstrap_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, period=period)
if params_CI:
eval_params_CI(X_test, X_fit_test, results, rhythm_params, samples_per_param = samples_per_param_CI, max_samples = max_samples_CI, k=len(X), sampling_type=sampling_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, period=period)
if return_model:
return results, statistics, rhythm_params, X_test, Y_fit_test, model
else:
return results, statistics, rhythm_params, X_test, Y_fit_test
"""
****************************
* end of fitting functions *
****************************
"""
"""
***********************
* start of assessment *
***********************
"""
# rhythm params
def evaluate_rhythm_params(X,Y, project_acrophase=True, period=0):
#plt.plot(X,Y)
#plt.show()
m = min(Y)
M = max(Y)
A = M - m
MESOR = m + A/2
AMPLITUDE = abs(A/2)
PHASE = 0
PHASE_LOC = 0
H = M - 0.01*M if M >= 0 else M + 0.01*M
locs, heights = signal.find_peaks(Y, height = H)
heights = heights['peak_heights']
if len(locs) >= 2:
period2 = X[locs[1]] - X[locs[0]]
period2 = int(round(period2))
else:
period2 = np.nan
if not period:
period = period2
if len(locs) >= 1:
PHASE = X[locs[0]]
PHASE_LOC = locs[0]
if period:
ACROPHASE = phase_to_radians(PHASE, period)
if project_acrophase:
ACROPHASE = project_acr(ACROPHASE)
else:
ACROPHASE = np.nan
# peaks and heights
#Y = Y[X < 24]
#X = X[X < 24]
locs, heights = signal.find_peaks(Y, height = MESOR)
heights = heights['peak_heights']
peaks = X[locs]
heights = Y[locs]
idxs1 = peaks <= period
peaks = peaks[idxs1]
heights = heights[idxs1]
Y2 = M - Y
locs2, heights2 = signal.find_peaks(Y2, height = MESOR-m)
heights2 = heights2['peak_heights']
troughs = X[locs2]
heights2 = Y[locs2]
idxs2 = troughs <= period
troughs = troughs[idxs2]
heights2 = heights2[idxs2]
# rhythm_params
return {'period':period, 'amplitude':AMPLITUDE, 'acrophase':ACROPHASE, 'mesor':MESOR, 'peaks': peaks, 'heights': heights, 'troughs': troughs, 'heights2': heights2, 'max_loc': PHASE_LOC, 'period2':period2}
def calculate_statistics(X, Y, Y_fit, n_components, period, lin_comp = False):
# statistics according to Cornelissen (eqs (8) - (9))
MSS = sum((Y_fit - Y.mean())**2)
RSS = sum((Y - Y_fit)**2)
n_params = n_components * 2 + 1
if lin_comp:
n_params += 1
N = Y.size
F = (MSS/(n_params - 1)) / (RSS/(N - n_params))
p = 1 - stats.f.cdf(F, n_params - 1, N - n_params)
#print("p-value(Cornelissen): {}".format(p))
# statistics of GOF according to Cornelissen (eqs (14) - (15))
# TODO: ali bi bilo potrebno popraviti za lumicycle - ko je več zaporednih meritev v eni časovni točki?
#X_periodic = (X % period).astype(int)
X_periodic = np.round_(X % period,2)
X_unique = np.unique(X_periodic)
n_T = len(X_unique)
SSPE = 0
for x in X_unique:
Y_i_avg = np.mean(Y[X_periodic == x])
SSPE += sum((Y[X_periodic == x] - Y_i_avg)**2)
SSLOF = RSS-SSPE
#print('RSS: ', RSS)
#print('SSPE: ', SSPE)
#print('SSLOF: ', SSLOF)
if lin_comp:
try:
F = (SSLOF/(n_T-1-(2*n_components + 1)))/(SSPE/(N-n_T))
p_reject = 1 - stats.f.cdf(F, n_T-1-(2*n_components + 1), N-n_T)
except:
F = np.nan
p_reject = np.nan
else:
try:
F = (SSLOF/(n_T-1-2*n_components))/(SSPE/(N-n_T))
p_reject = 1 - stats.f.cdf(F, n_T-1-2*n_components, N-n_T)
except:
F = np.nan
p_reject = np.nan
# Another measure that describes goodnes of fit
# How well does the curve describe the data?
# signal to noise ratio
# fitted curve: signal
# noise:
stdev_data = np.std(Y, ddof = 1)
stdev_fit = np.std(Y_fit, ddof = 1)
SNR = stdev_fit / stdev_data
# Standard Error of residuals, margin of error
# https://stats.stackexchange.com/questions/57746/what-is-residual-standard-error
DoF = N - n_params
resid_SE = np.sqrt(RSS/DoF)
# https://scientificallysound.org/2017/05/16/independent-t-test-python/
# https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/hypothesis-testing/margin-of-error/
critical_value = stats.t.ppf(1-0.025, DoF)
ME = critical_value * resid_SE
return {'p':p, 'p_reject':p_reject, 'SNR':SNR, 'RSS': RSS, 'resid_SE': resid_SE, 'ME': ME}
"""
*********************
* end of assessment *
*********************
"""
"""
*****************************
* start of compare wrappers *
*****************************
"""
# compare pairs using a given number of components and period
# analysis - options (from best to worst) (ADDITIONAL ANALYSIS)
# - bootstrap1: independent bootstrap analysis
# - CI1: independent analysis of confidence intervals of two models
# - bootstrap2: bootstrap analysis of a merged model
# - CI2: analysis of confidence intervals of a merged model
def compare_pairs_limo(df, pairs, n_components = 3, period = 24, folder = "", prefix = "", analysis = "", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], **kwargs):
if analysis not in ("", "CI1", "bootstrap1", "CI2", "bootstrap2"):
print("Invalid option")
return
columns = ['test', 'period', 'n_components', 'p', 'q', 'p params', 'q params', 'p(F test)', 'q(F test)']
if analysis:
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
df_results = pd.DataFrame(columns = columns)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
for test1, test2 in pairs:
for per in period:
for n_comps in n_components:
if folder:
save_to = os.path.join(folder,prefix + test1 + '-' + test2 + '_per=' + str(per) + '_comps=' + str(n_comps))
else:
save_to = ''
#pvalues, params, results = compare_pair_df_extended(df, test1, test2, n_components = n_comps, period = per, lin_comp = lin_comp, model_type = model_type, alpha=alpha, save_to = save_to, plot_measurements=plot_measurements)
#p_overall, pvalues, params, _ = compare_pair_df_extended(df, test1, test2, n_components = n_comps, period = per, save_to = save_to, **kwargs)
p_overall, p_params, p_F, _, _, rhythm_params = compare_pair_df_extended(df, test1, test2, n_components = n_comps, period = per, save_to = save_to, additional_analysis = analysis, parameters_to_analyse=parameters_to_analyse, parameters_angular=parameters_angular, **kwargs)
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period'] = per
d['n_components'] = n_comps
d['d_amplitude'] = rhythm_params['d_amplitude']
d['d_acrophase'] = rhythm_params['d_acrophase']
d['p'] = p_overall
d['p params'] = p_params
d['p(F test)'] = p_F
if analysis:
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
df_results = df_results.append(d, ignore_index=True)
df_results['q'] = multi.multipletests(df_results['p'], method = 'fdr_bh')[1]
df_results['q params'] = multi.multipletests(df_results['p params'], method = 'fdr_bh')[1]
df_results['q(F test)'] = multi.multipletests(df_results['p(F test)'], method = 'fdr_bh')[1]
if analysis:
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
# compare pairs using the best models as stored in df_best_models
# Basic analysis: fist analysis according to LymoRhyde (Singer:2019). Extended with the extra sum-of-squares F test that compares two nested models
# compare pairs with the presumption that the same model is used in both cases
# the same model: the same period and the same number of cosinor components
#
# analysis - options (from best to worst)
# - bootstrap1: independent bootstrap analysis
# - CI1: independent analysis of confidence intervals of two models
# - bootstrap2: bootstrap analysis of a merged model
# - CI2: analysis of confidence intervals of a merged model
def compare_pairs_best_models_limo(df, df_best_models, pairs, folder = "", prefix = "", analysis = "", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], **kwargs):
if analysis not in ("", "CI1", "bootstrap1", "CI2", "bootstrap2"):
print("Invalid option")
return
columns = ['test', 'period1', 'n_components1', 'period2', 'n_components2', 'p', 'q', 'p params', 'q params', 'p(F test)', 'q(F test)']
if analysis:
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
df_results = pd.DataFrame(columns = columns)
for test1, test2 in pairs:
model1 = df_best_models[df_best_models["test"] == test1].iloc[0]
model2 = df_best_models[df_best_models["test"] == test2].iloc[0]
n_components1 = model1.n_components
n_components2 = model2.n_components
period1 = model1.period
period2 = model2.period
# if models have different number of components always start with the simpler model
# model is simpler if number of components is smaller
if n_components1 > n_components2:
test1, test2 = test2, test1
n_components1, n_components2 = n_components2, n_components1
period1, period2 = period2, period1
if folder:
save_to = os.path.join(folder, prefix + test1 + '-' + test2 + '_per1=' + str(period1) + '_comps1=' + str(n_components1) + '_per1=' + str(period2) + '_comps1=' + str(n_components2))
else:
save_to = ''
p_overall, p_params, p_F, params, _, rhythm_params = compare_pair_df_extended(df, test1, test2, n_components = n_components1, period = period1, n_components2 = n_components2, period2 = period2, save_to = save_to, additional_analysis = analysis, parameters_to_analyse=parameters_to_analyse, parameters_angular=parameters_angular, **kwargs)
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period1'] = period1
d['n_components1'] = n_components1
d['period2'] = period2
d['n_components2'] = n_components2
d['d_amplitude'] = rhythm_params['d_amplitude']
d['d_acrophase'] = rhythm_params['d_acrophase']
d['p'] = p_overall
d['p params'] = p_params
d['p(F test)'] = p_F
if analysis:
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
df_results = df_results.append(d, ignore_index=True)
#d['CI(d_amplitude)'] = rhythm_params['CI(d_amplitude)']
#d['p(d_amplitude)'] = rhythm_params['p(d_amplitude)']
#d['CI(d_acrophase)'] = rhythm_params['CI(d_acrophase)']
#d['p(d_acrophase)'] = rhythm_params['p(d_acrophase)']
df_results = df_results.append(d, ignore_index=True)
df_results['q'] = multi.multipletests(df_results['p'], method = 'fdr_bh')[1]
df_results['q params'] = multi.multipletests(df_results['p params'], method = 'fdr_bh')[1]
df_results['q(F test)'] = multi.multipletests(df_results['p(F test)'], method = 'fdr_bh')[1]
if analysis:
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
# compare pairs using a given number of components and period
# analysis - options (from best to worst)
# - bootstrap: independent bootstrap analysis
# - CI: independent analysis of confidence intervals of two models
# if you want to increase the speed specify df_results_extended in which for all analysed models confidence intervals for amplitude and acrophase are given - result of cosinor.analyse_models
def diff_p_t_test_from_CI(X1, X2, CI1, CI2, DoF, angular = False):
dX = X2 - X1
if angular:
dX = project_acr(dX)
t = abs(stats.t.ppf(0.05/2,df=DoF))
dev1 = (CI1[1] - CI1[0])/2
dev2 = (CI2[1] - CI2[0])/2
if angular:
dev1 = abs(project_acr(dev1))
dev2 = abs(project_acr(dev2))
else:
dev1 = abs(dev1)
dev2 = abs(dev2)
dev = dev1+dev2
se = (dev1 + dev2)/t
CI = [dX-dev, dX+dev]
T0 = dX/se
p_val = 2 * (1 - stats.t.cdf(abs(T0), DoF))
return dX, p_val, CI
def compare_pairs(df, pairs, n_components = 3, period = 24, analysis = "bootstrap", df_results_extended = | pd.DataFrame(columns=["test"]) | pandas.DataFrame |
import unittest
import pandas as pd
from ira.utils.utils import mstruct
| pd.set_option('display.width', 1000) | pandas.set_option |
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
def perform_fit_scaling(x):
"""
Scales all the values between [0,1]. \
It often speeds up the learning process.
:param x: the feature values
:return: x, scaled
"""
scaler = MinMaxScaler() # Default behavior is to scale to [0,1]
columns = x.columns
index = x.index
x = scaler.fit_transform(x)
# keeping the column names
x = pd.DataFrame(x, columns=columns, index=index)
return x, scaler
def perform_scaling(x, scaler):
"""
Scales all the values between [0,1].\
It often speeds up the learning process.
:param x: the feature values
:param scaler: a predefined and fitted scaler, e.g. a MinMaxScaler
:return: x, scaled
"""
columns = x.columns
index = x.index
x = scaler.transform(x)
# keeping the column names
x = | pd.DataFrame(x, columns=columns, index=index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 18 17:32:59 2020
@author: ashu0
"""
import pandas as pd
from glob import glob
filenames = glob(r'C:\Users\ashu0\OneDrive\Desktop\Ok\Modeling\Three Way Match\*.csv')
dataframes = [ | pd.read_csv(f) | pandas.read_csv |
"""
Python tools and algorithms gathered through out the development projects and tutorials.
Sections:
1. File Read/Write/Convert/Save Operations
2. Pandas Utils
3. Path Operations for File/Folder/System
4. Algorithms for Hierarchical Structures
5. Utility functions for xlrd library and read_spir function
"""
import collections
import os
import re
import warnings # xlsx writer warning is eliminated
from tkinter import Tk, filedialog, messagebox
import pandas as pd
import xlrd as xl
import xlsxwriter
from six import string_types
from advanced_tools.IO_path_utils import checkfile, get_filepaths
##############################################################################################
### Pandas Utils & Excel Utils
##############################################################################################
def combine_multiple_csv_into_excel(full_path_to_folder=None, sep='\t', encoding='latin1'):
r"""
Combine csv files that can be converted to Dataframe and have same exact structure.
:param full_path_to_folder:
:param sep: Text separator, default is '\t'
:param encoding: Text encoding, default is 'latin1'
:return: excel file with one extra column showing the name of the file.
"""
csv_files = sorted(get_filepaths(full_path_to_folder))
folder_name = os.path.split(full_path_to_folder)[1] # For folder location and folder name
df_base = pd.read_csv(csv_files[0], sep=sep, encoding=encoding, low_memory=False)
df_base['File_Name'] = os.path.splitext(os.path.split(csv_files[0])[1])[0]
for i in csv_files[1:]:
df_temp = pd.read_csv(i, sep=sep, encoding=encoding, low_memory=False)
file_name = os.path.splitext(os.path.split(i)[1])[0]
df_temp['File_Name'] = file_name
df_base = df_base.append(df_temp)
df_base.to_excel('{}\\{}.xlsx'.format(full_path_to_folder, folder_name))
def split_worksheets(file):
"""
:param file: Excel file to be split by its worksheets.
:return:
"""
dfs_to_split = pd.read_excel(file, None, encoding='latin1')
# 'None' used as worksheet kwarg thus it could be read as Dataframe dict.
dfs_to_split = collections.OrderedDict(sorted(dfs_to_split.items()))
for k, v in dfs_to_split.items():
export_file_name = os.path.join(os.path.split(file)[0], "{}.xlsx".format(k))
writer = pd.ExcelWriter(export_file_name, engine='xlsxwriter')
v.to_excel(excel_writer=writer, sheet_name=k, index=False)
writer.save()
writer.close()
def dataframe_diff(df1, df2):
"""
Give difference between two pandas dataframe.
Date Fruit Num Color
9 2013-11-25 Orange 8.6 Orange
8 2013-11-25 Apple 22.1 Red
"""
df = | pd.concat([df1, df2]) | pandas.concat |
# coding: utf-8
# # SF Crime
# ## W207 Final Project
# ### Basic Modeling
#
#
# ### Environment and Data
# In[1]:
# Import relevant libraries:
import time
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
# Set random seed and format print output:
np.random.seed(0)
np.set_printoptions(precision=3)
# #### DDL to construct table for SQL transformations:
#
# ```sql
# CREATE TABLE kaggle_sf_crime (
# dates TIMESTAMP,
# category VARCHAR,
# descript VARCHAR,
# dayofweek VARCHAR,
# pd_district VARCHAR,
# resolution VARCHAR,
# addr VARCHAR,
# X FLOAT,
# Y FLOAT);
# ```
# #### Getting training data into a locally hosted PostgreSQL database:
# ```sql
# \copy kaggle_sf_crime FROM '/Users/Goodgame/Desktop/MIDS/207/final/sf_crime_train.csv' DELIMITER ',' CSV HEADER;
# ```
#
# #### SQL Query used for transformations:
#
# ```sql
# SELECT
# category,
# date_part('hour', dates) AS hour_of_day,
# CASE
# WHEN dayofweek = 'Monday' then 1
# WHEN dayofweek = 'Tuesday' THEN 2
# WHEN dayofweek = 'Wednesday' THEN 3
# WHEN dayofweek = 'Thursday' THEN 4
# WHEN dayofweek = 'Friday' THEN 5
# WHEN dayofweek = 'Saturday' THEN 6
# WHEN dayofweek = 'Sunday' THEN 7
# END AS dayofweek_numeric,
# X,
# Y,
# CASE
# WHEN pd_district = 'BAYVIEW' THEN 1
# ELSE 0
# END AS bayview_binary,
# CASE
# WHEN pd_district = 'INGLESIDE' THEN 1
# ELSE 0
# END AS ingleside_binary,
# CASE
# WHEN pd_district = 'NORTHERN' THEN 1
# ELSE 0
# END AS northern_binary,
# CASE
# WHEN pd_district = 'CENTRAL' THEN 1
# ELSE 0
# END AS central_binary,
# CASE
# WHEN pd_district = 'BAYVIEW' THEN 1
# ELSE 0
# END AS pd_bayview_binary,
# CASE
# WHEN pd_district = 'MISSION' THEN 1
# ELSE 0
# END AS mission_binary,
# CASE
# WHEN pd_district = 'SOUTHERN' THEN 1
# ELSE 0
# END AS southern_binary,
# CASE
# WHEN pd_district = 'TENDERLOIN' THEN 1
# ELSE 0
# END AS tenderloin_binary,
# CASE
# WHEN pd_district = 'PARK' THEN 1
# ELSE 0
# END AS park_binary,
# CASE
# WHEN pd_district = 'RICHMOND' THEN 1
# ELSE 0
# END AS richmond_binary,
# CASE
# WHEN pd_district = 'TARAVAL' THEN 1
# ELSE 0
# END AS taraval_binary
# FROM kaggle_sf_crime;
# ```
# #### Load the data into training, development, and test:
# In[2]:
data_path = "./data/train_transformed.csv"
df = pd.read_csv(data_path, header=0)
x_data = df.drop('category', 1)
y = df.category.as_matrix()
# Impute missing values with mean values:
x_complete = x_data.fillna(x_data.mean())
X_raw = x_complete.as_matrix()
# Scale the data between 0 and 1:
X = MinMaxScaler().fit_transform(X_raw)
# Shuffle data to remove any underlying pattern that may exist:
shuffle = np.random.permutation(np.arange(X.shape[0]))
X, y = X[shuffle], y[shuffle]
# Separate training, dev, and test data:
test_data, test_labels = X[800000:], y[800000:]
dev_data, dev_labels = X[700000:800000], y[700000:800000]
train_data, train_labels = X[:700000], y[:700000]
mini_train_data, mini_train_labels = X[:75000], y[:75000]
mini_dev_data, mini_dev_labels = X[75000:100000], y[75000:100000]
# In[3]:
#the submission format requires that we list the ID of each example?
#this is to remember the order of the IDs after shuffling
#(not used for anything right now)
allIDs = np.array(list(df.axes[0]))
allIDs = allIDs[shuffle]
testIDs = allIDs[800000:]
devIDs = allIDs[700000:800000]
trainIDs = allIDs[:700000]
#this is for extracting the column names for the required submission format
sampleSubmission_path = "./data/sampleSubmission.csv"
sampleDF = pd.read_csv(sampleSubmission_path)
allColumns = list(sampleDF.columns)
featureColumns = allColumns[1:]
#this is for extracting the test data for our baseline submission
real_test_path = "./data/test_transformed.csv"
testDF = pd.read_csv(real_test_path, header=0)
real_test_data = testDF
test_complete = real_test_data.fillna(real_test_data.mean())
Test_raw = test_complete.as_matrix()
TestData = MinMaxScaler().fit_transform(Test_raw)
#here we remember the ID of each test data point
#(in case we ever decide to shuffle the test data for some reason)
testIDs = list(testDF.axes[0])
# In[4]:
#copied the baseline classifier from below,
#but made it return prediction probabilities for the actual test data
def MNB():
mnb = MultinomialNB(alpha = 0.0000001)
mnb.fit(train_data, train_labels)
#print("\n\nMultinomialNB accuracy on dev data:", mnb.score(dev_data, dev_labels))
return mnb.predict_proba(real_test_data)
MNB()
baselinePredictionProbabilities = MNB()
#here is my rough attempt at putting the results (prediction probabilities)
#in a .csv in the required format
#first we turn the prediction probabilties into a data frame
resultDF = | pd.DataFrame(baselinePredictionProbabilities,columns=featureColumns) | pandas.DataFrame |
from operator import index
from jinja2 import Environment, FileSystemLoader
from bokeh.plotting import curdoc
from dotenv import load_dotenv
import pandas as pd
import panel as pn
import holoviews as hv
import numpy as np
import urllib
import urllib.parse as p
from tabulate import tabulate
import requests
import codecs
import sys
import os
from tech.tech import read_impact_hour_data, read_cstk_data, TECH
from tech.tech import ImpactHoursData, ImpactHoursFormula, Hatch, DandelionVoting
from template.config_tooltips import tooltips
#import tech.config_bounds as config_bounds
import data
load_dotenv()
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template('template/index.html')
# API settings
HCTI_API_ENDPOINT = "https://hcti.io/v1/image"
HCTI_API_USER_ID = os.environ.get('HCTI_API_USER_ID')
HCTI_API_KEY = os.environ.get('HCTI_API_KEY')
def load_app(config_file):
pn.config.sizing_mode = 'stretch_both'
impact_hour_data = read_impact_hour_data()
# ImpactHoursData
i = ImpactHoursData()
# TECH
t = TECH(total_impact_hours = impact_hour_data['Assumed IH'].sum(),
impact_hour_data=impact_hour_data, total_cstk_tokens=1000000,
config=config_file['tech'])
# ImpactHoursFormula
#impact_hours_rewards = ImpactHoursFormula(i.total_impact_hours, impact_hour_data_1)
#impact_rewards_view = pn.Column(impact_hours_rewards.impact_hours_rewards,
# impact_hours_rewards.redeemable,
# impact_hours_rewards.cultural_build_tribute)
# Hatch
cstk_data = read_cstk_data()
#hatch = Hatch(cstk_data, impact_hours_rewards.target_raise,
# i.total_impact_hours,
# impact_hours_rewards.target_impact_hour_rate)
# DandelionVoting
dandelion = DandelionVoting(17e6,config=config_file['dandelion_voting'])
# Import Params Button
import_params_button = pn.widgets.Button(name='Import params', button_type = 'primary')
import_description = pn.pane.Markdown('<h4>To import the parameters, click on the button below:</h4>')
# Share Button
comments = pn.widgets.TextAreaInput(name='Comments', max_length=1024, placeholder='Explain your thoughts on why you choose the params...')
share_button = pn.widgets.Button(name='Share your results on GitHub!', button_type = 'primary')
url = pn.widgets.TextInput(name='URL', value = '')
share_button.js_on_click(args={'target': url}, code='window.open(target.value)')
results_button = pn.widgets.Button(name='See your results', button_type = 'success')
def update_params_by_url_query():
queries = curdoc().session_context.request.arguments
queries = { i: j[0] for i, j in queries.items() }
if queries:
if 'ihminr' in queries:
t.min_raise = int(queries['ihminr'])
if 'ihmaxr' in queries:
t.max_raise = int(queries['ihmaxr'])
if 'hs' in queries:
t.impact_hour_slope = float(queries['hs'])
if 'maxihr' in queries:
t.maximum_impact_hour_rate = float(queries['maxihr'])
if 'ihtr' in queries:
t.target_raise = int(queries['ihtr'])
if 'hor' in queries:
t.hatch_oracle_ratio = float(queries['hor'])
if 'hpd' in queries:
t.hatch_period_days = int(queries['hpd'])
if 'her' in queries:
t.hatch_exchange_rate = float(queries['her'])
if 'ht' in queries:
t.hatch_tribute_percentage = int(queries['ht'])
if 'sr' in queries:
dandelion.support_required_percentage = int(queries['sr'])
if 'maq' in queries:
dandelion.minimum_accepted_quorum_percentage = int(queries['maq'])
if 'vdd' in queries:
dandelion.vote_duration_days = int(queries['vdd'])
if 'vbh' in queries:
dandelion.vote_buffer_hours = int(queries['vbh'])
if 'rqh' in queries:
dandelion.rage_quit_hours = int(queries['rqh'])
if 'tfx' in queries:
dandelion.tollgate_fee_xdai = float(queries['tfx'])
t.param.trigger('action') # Update dashboard
dandelion.param.trigger('action')
@pn.depends(results_button)
def update_input_output_pane(results_button_on):
if results_button_on:
input_output_pane = pn.pane.GIF('media/inputs_outputs.gif')
else:
input_output_pane = pn.pane.Markdown('')
return input_output_pane
@pn.depends(results_button)
def update_result_score(results_button_on):
if results_button_on:
t.param.trigger('action') # Update dashboard
dandelion.param.trigger('action')
data_table = {'Parameters': ["Target raise (wxDai)", "Maximum raise (wxDai)", "Minimum raise (wxDai)",
"Impact hour slope (wxDai/IH)", "Maximum impact hour rate (wxDai/IH)",
"Hatch oracle ratio (wxDai/CSTK)", "Hatch period (days)",
"Hatch exchange rate (TECH/wxDai)", "Hatch tribute (%)", "Support required (%)",
"Minimum accepted quorum (%)", "Vote duration (days)", "Vote buffer (hours)",
"Rage quit (hours)", "Tollgate fee (wxDai)"],
'Values': [int(t.target_raise), int(t.max_raise),
int(t.min_raise), t.impact_hour_slope,
t.maximum_impact_hour_rate, t.hatch_oracle_ratio,
t.hatch_period_days, t.hatch_exchange_rate, t.hatch_tribute_percentage,
dandelion.support_required_percentage, dandelion.minimum_accepted_quorum_percentage, dandelion.vote_duration_days,
dandelion.vote_buffer_hours, dandelion.rage_quit_hours, dandelion.tollgate_fee_xdai]}
df = | pd.DataFrame(data=data_table) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 14:24:09 2020
@author: hsauro
@author: joseph-hellerstein
"""
from SBstoat.namedTimeseries import NamedTimeseries, mkNamedTimeseries, TIME
import SBstoat.namedTimeseries as namedTimeseries
from SBstoat import _plotOptions as po
from SBstoat.timeseriesPlotter import TimeseriesPlotter
from SBstoat import timeseriesPlotter as tp
import numpy as np
import os
import pandas as pd
import unittest
import matplotlib
import matplotlib.pyplot as plt
IGNORE_TEST = False
IS_PLOT = False
DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA_PATH = os.path.join(DIR, "tst_data.txt")
DEFAULT_NUM_ROW = 2
DEFAULT_NUM_COL = 3
DEFAULT_NUM_PLOT = 5
class TestTimeseriesPlotter(unittest.TestCase):
def setUp(self):
self.timeseries = NamedTimeseries(csvPath=TEST_DATA_PATH)
self.plotter = TimeseriesPlotter(isPlot=IS_PLOT)
def testConstructor1(self):
if IGNORE_TEST:
return
self.assertTrue(isinstance(self.plotter.isPlot, bool))
def testInitializeRowColumn(self):
if IGNORE_TEST:
return
def test(maxCol, **kwargs):
options = self.plotter._mkPlotOptionsMatrix(self.timeseries,
maxCol=maxCol, **kwargs)
if po.NUM_ROW in kwargs:
self.assertGreaterEqual(options.numRow, kwargs[po.NUM_ROW])
if po.NUM_COL in kwargs:
self.assertEqual(options.numCol, kwargs[po.NUM_COL])
#
test(3, **{})
test(3, **{po.NUM_COL: 3})
test(4, **{po.NUM_ROW: 2})
test(5, **{po.NUM_ROW: 2})
def testPlotSingle1(self):
if IGNORE_TEST:
return
self.plotter.plotTimeSingle(self.timeseries,
timeseries2=self.timeseries,
numCol=4,
marker=[None, '*'], alpha=[0.1, 0.8], color=["red", "g"],
titlePosition=[0.8, 0.5], titleFontsize=10)
self.plotter.plotTimeSingle(self.timeseries, numCol=4,
marker=[None, '*'])
self.plotter.plotTimeSingle(self.timeseries, numCol=4,
subplotWidthSpace=0.2, yticklabels=[])
self.plotter.plotTimeSingle(self.timeseries, columns=["S1", "S2", "S3"], numRow=2)
self.plotter.plotTimeSingle(self.timeseries, numCol=4)
self.plotter.plotTimeSingle(self.timeseries, numCol=2)
self.plotter.plotTimeSingle(self.timeseries, numRow=2, numCol=3, ylabel="xxx")
self.plotter.plotTimeSingle(self.timeseries, columns=["S1", "S2"])
def testPlotSingle5(self):
if IGNORE_TEST:
return
timeseries = self.timeseries.subsetColumns(["S1"])
dct = {}
indices = [i for i in range(len(timeseries)) if i % 4 == 0]
for col in timeseries.allColnames:
dct[col] = timeseries[col][indices]
df = | pd.DataFrame(dct) | pandas.DataFrame |
from itertools import cycle
from typing import List
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.base import RegressorMixin
from sklearn.linear_model import ElasticNet, LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools import add_constant
from tqdm import tqdm
__all__ = ['obtain_independent_variables', 'obtain_independent_variables_vif', 'obtain_important_variables_loo',
'obtain_uncorrelated_variables', 'predictor_importance_loo', 'elasticnet_plot']
def _print_if_verbose(message, verbose):
if verbose:
print(message)
def obtain_independent_variables(data, method='variance_inflation_factor', threshold=None, **kwargs):
"""Obtains independent variables recursively
Recursively selects features from the dataset so that the resulting
dataset does only consist of independent features. Independence is defined
by method which is passed as a parameter. Note that reasonable values
for threshold parameter strongly depend on the method used.
Args:
data: Dataframe
method: Method used to measure independence. Currently, supports computing
"variance_inflation_factor", "leave_one_out", "recursive_inclusion" or "correlation"
threshold: Threshold used to flag features as dependent (in some way) if
value of method exceeds this value. Default: 0.5 for "correlation",
1 for "leave_one_out", 0.1 for "recursive_inclusion" and 5 for "variance_inflation_factor"
kwargs: Only used for "leave_one_out" and "recursive_inclusion". In that case please specify
the dependent variable "y=..." as well as the model to use "model=..."
Returns:
List of all independent variables
"""
if method in ['variance_inflation_factor', 'vif']:
threshold = threshold or 5
return obtain_independent_variables_vif(data, threshold)
if method == 'correlation':
threshold = threshold or 0.5
return obtain_uncorrelated_variables(data, correlation_threshold=threshold)
if method in ['leave_one_out', 'loo']:
threshold = threshold or 1
return obtain_important_variables_loo(X=data, threshold=threshold, **kwargs)
if method in ['recursive_inclusion', 'ri']:
threshold = threshold or 0.1
return obtain_important_variables_ri(X=data, threshold=threshold, **kwargs)
raise ValueError("""Method not understood. Please specify either
'variance_inflation_factor', 'leave_one_out', 'recursive_inclusion' or 'correlation'.""")
def obtain_independent_variables_vif(data, threshold=5):
"""Obtains non-multicollinear variables by recursively computing variance_inflation_factors
Recursively selects features from the dataset so that the resulting
dataset does not have features with variance inflation factor larger than
specified threshold. Note that VIF(x) = \frac{1}{1 - R(x)^2}, where R(x)^2
is the coefficient of determination computed by regressing the feature x
onto the other features.
Args:
data: Dataframe
threshold: Features will be excluded if VIF exceeds this value
Returns:
List of all independent variables
"""
indep_variables = list()
for column_name in data.select_dtypes('number').columns:
variables = [column_name] + indep_variables
exog = add_constant(data[variables]).values
vif = variance_inflation_factor(exog, 1)
if vif <= threshold:
indep_variables.append(column_name)
return indep_variables
def obtain_uncorrelated_variables(data, correlation_threshold=0.5):
"""Computes pairwise correlation coefficients and determines uncorrelated variables
Recursively selects features from the dataset so that the resulting
dataset does not have features with pairwise correlation larger than
specified correlation_threshold.
Args:
data: Dataframe
correlation_threshold: Features will be excluded if pairwise correlation
exceeds this value
Returns:
List of all uncorrelated variables
"""
indep_variables = list()
for column_name in data.select_dtypes('number').columns:
variables = indep_variables + [column_name]
corrs = data[variables].corr()[column_name]
if (abs(corrs) > correlation_threshold).sum() <= 1:
indep_variables.append(column_name)
return indep_variables
def obtain_important_variables_loo(X: pd.DataFrame,
y: np.ndarray,
threshold: float = 1,
model: RegressorMixin = LinearRegression(),
verbose=False) -> List[str]:
"""Performs a leave-one-out ("loo") analysis to determine important features
Recursively drops features which the model supplied deems as irrelevant (to be precise,
the score loss in percent must be less than threshold) until no feature is irrelevant
enough to be omitted.
Args:
X: Data frame containing independent features.
y: Array containing the dependent feature
threshold: Minimum percentage that a feature must contribute to model score
to be contained in the final set
model: Model to use for the analysis. Note that the results may heavily depend
on the model chosen.
verbose: Print progress
Returns:
"""
_print_if_verbose(f"CV score start: {cross_val_score(model, X, y).mean()}.", verbose)
while True:
loo_scores = predictor_importance_loo(X, y, model, verbose=verbose)
lowest_score = loo_scores.loo_score.iat[-1]
least_important_feature = loo_scores.feature.iat[-1]
if lowest_score > threshold:
_print_if_verbose(f"Not removing feature {least_important_feature} with score {lowest_score}.", verbose)
break
_print_if_verbose(f"Removed feature {least_important_feature} with score {lowest_score}.", verbose)
X = X.drop(columns=least_important_feature)
_print_if_verbose(f"CV score end: {cross_val_score(model, X, y).mean()}.", verbose)
return list(X.columns)
def predictor_importance_loo(X: pd.DataFrame,
y: np.ndarray,
model: RegressorMixin,
verbose: bool = True) -> pd.DataFrame:
"""Performs a leave-one-out ("loo") analysis to determine important features
Iterates over all features (column names in X) and determines the scores (R^2-values)
of the model fitted on the data without the feature. Features will then be ranked
according to the score loss that removing this feature implied, relative to the
score of the model on the full dataset. Scores are computed using k-fold cross-validation
The result dataset contains values equal to the percentage decrease in score, i.e.
100*(<score of model with feature> / <score of model without feature> -1)
(bigger is better).
Args:
X: Data frame containing independent features.
y: Array containing the dependent feature
model: Model to use for the analysis. Note that the results may heavily depend
on the model chosen.
verbose: print progress bar
Returns:
Data frame containing features and score quotients for both train and test set
"""
results = {
"feature": [],
"loo_score": [],
}
score_with_feature = cross_val_score(model, X, y).mean()
iter_set = tqdm(X.columns) if verbose else X.columns
for feature_name in iter_set:
X_wo_feature = X.drop(columns=[feature_name])
score_without_feature = cross_val_score(model, X_wo_feature, y).mean()
if score_without_feature > 0:
loo_score = 100 * (score_with_feature / score_without_feature - 1)
else:
loo_score = np.inf
results["feature"].append(feature_name)
results["loo_score"].append(loo_score)
return | pd.DataFrame(data=results) | pandas.DataFrame |
from collections import Counter
from mypymatch.Matcher import Matcher
import numpy as np
import pandas as pd
from datetime import date, timedelta
import datetime
from tqdm import tqdm
from joblib import Parallel, delayed
import multiprocessing as mp
from collections import defaultdict
import os
import re
import sys
import scipy.stats as stats
import sys
sys.path.append(sys.argv[0])
sys.setrecursionlimit(1000000)
from statsmodels.tools.sm_exceptions import ConvergenceWarning
import warnings
warnings.filterwarnings(
"error",
message='Maximum number of iterations has been exceeded',
category=ConvergenceWarning)
#######################
def turnover_class(Y):
"""
Y: Rank_Turn / DayOut
generate rules converting y to y' ; nominal out come value converted into sub classes:
heavy sell (100,000 dollar sell) ; sell ; zero ; buy ; heavy buy
return a array of y' in accordance of y, and a dictionary of outcome and its classification rule in a dictionary
"""
col = Y.name
Y = pd.DataFrame(Y)
Y = Y.reset_index().sort_values(by=col) # to fast the speed
if col == 'Rank_Turn' or col =='ref':
turnover_threshold = 0.1
mask_z = (Y[col] == 1.0) # rank 1.0 means turnover = 0
mask_b = (Y[col] < 1.0) & (Y[col] >= turnover_threshold)
mask_hb = Y[col] < turnover_threshold
Y = Y.assign(cls='')
Y.cls = Y.cls.mask(mask_z, 'Zero')
Y.cls = Y.cls.mask(mask_b, 'Norm')
Y.cls = Y.cls.mask(mask_hb, 'Popular')
elif col == 'PopRef':
mask_p = (Y[col] == 1.0) # value=1 means Poppular
mask_n = (Y[col] == 0.0)
Y = Y.assign(cls='')
Y.cls = Y.cls.mask(mask_n, 'NonPopular')
Y.cls = Y.cls.mask(mask_p, 'Popular')
elif col == 'DayOut':
turnover_threshold = 50
mask_hs = Y[col] < -1 * turnover_threshold
mask_s = (Y[col] < 0) & (Y[col] >= (-1 * turnover_threshold))
mask_z = Y[col] == 0
mask_b = (Y[col] > 0) & (Y[col] <= turnover_threshold)
mask_hb = Y[col] > turnover_threshold
Y = Y.assign(cls='')
Y.cls = Y.cls.mask(mask_hs, 'HeavySell')
Y.cls = Y.cls.mask(mask_s, 'Sell')
Y.cls = Y.cls.mask(mask_z, 'Zero')
Y.cls = Y.cls.mask(mask_b, 'Buy')
Y.cls = Y.cls.mask(mask_hb, 'HeavyBuy')
return Y
class MyRows:
def __init__(self, rows, outcome_col):
self.value = rows
self.dependent_name = outcome_col
self.dependent = rows[self.dependent_name]
self.dep_val = self.dependent
self.ref_name = None
global args
dep = args.dep
if dep =='mix':
self.ref_name = 'ref'
self.dep_val = rows['ref']
self.corr_data = self.value
def get_correlated_features(self, alpha=np.nan):
"""
Get un-correlated feature rows out from the data sample
Parameters:
df: pd.Dataframe, features columns + outcome columns
outcome_col: object, the column name of outcome
alpha: float, choice of significant level for t-test to keep the correlated variables.
----
return: df : pd.DataFrame ; correlated features + outcome col
"""
if np.isnan(alpha):
global args
alpha = args.alpha
df = self.value
outcome_col = self.dependent_name
#df = pd.get_dummies(df)
if pd.DataFrame.isna(df).any().any():
raise ValueError('Input feature dataframe contains NaN.')
if len(df) < 3:
return df
# change '-' in the column names into '_'
df.columns = df.columns.str.strip().str.replace('-', '_')
# only get numerical columns to check if
no_col = df.select_dtypes(
include=['int', 'float',
'int64',
'float64']).columns
if outcome_col in no_col:
no_col = no_col.drop(outcome_col)
if 'ref' in no_col:
no_col = no_col.drop('ref')
for col in no_col:
arr = df[col]
outcome = df[outcome_col]
corr, pvalue = stats.pearsonr(arr, outcome)
if pvalue > alpha:
# if fail to reject the null hypothesis that the correlation
# coefficient IS NOT significantly different from 0.
df = df.drop(col, axis=1) # remove the column
df = df.reset_index(drop=True)
self.corr_data = df
return df
def find_best_question(Rows, question_excluded):
"""Find the best question to ask by iterating over every feature / value
and calculating the information gain.
para: question_excluded, questions already asked during building the tree"""
Rows.get_correlated_features()
rows, outcome_col = Rows.corr_data, Rows.dependent_name
best_ceffect = 0 # keep track of the best information gain
best_question = None # keep train of the feature / value that produced it
question_list = rows.columns.drop(Rows.dependent_name)
if Rows.ref_name:
question_list = question_list.drop(Rows.ref_name)
qkeys, qcount = np.unique(question_excluded, return_counts=True)
qdict = defaultdict(list)
maxAskCount = 2 # delete the cols(questions) that are asked twice
for (c, k) in zip(qcount, qkeys):
qdict[c].append(k)
if maxAskCount in qdict.keys():
# if the col is used more than maxAskCount,
# remove from the questionlist
for item in qdict[maxAskCount]:
if item in question_list:
question_list = question_list.drop(item)
if len(question_list) == 0:
import warnings
warnings.warn('Find Best Question: Rows is empty')
return best_ceffect, best_question
# get the question list for processing
testQlist = []
for col in question_list: # for each feature
values = list(set(rows[col])) # unique values in the column
if is_numeric(
values[0]): # if too many numeric value in ValueSet,deduct some
global args
SplitCount = args.sp
if len(values) > SplitCount:
values = np.linspace(min(rows[col]), max(
rows[col]), SplitCount)[1:-1]
else:
if len(values) == 2:
values = values[:-1]
# if not a numeric question set and with unique set number of 2,
# it means one is the complimentary of the other, so cancel one
# here.
for val in values: # for each value
testQlist += [Question(col, val)]
# Start multiprocessing
cpu_cores = mp.cpu_count()
if cpu_cores > 10:
num_cores = int(np.floor(mp.cpu_count() / 4))
else:
num_cores = 1
# for q in tqdm(testQlist, desc='calculating p_value and gini for {} rows'.format(len(Rows.value))):
# q.cal_pvalue_gain(Rows)
def para_run(Q): # Q is a Question Instance
global args
alpha = args.alpha
Q.cal_causal_effect_gain(Rows, alpha)
return Q
resQlist = Parallel(n_jobs=num_cores)(delayed(para_run)(q)for q in tqdm(
testQlist, desc='calculating p_value and gini for {} rows'.format(len(Rows.value))))
res_df = pd.DataFrame(columns=['col', 'val', 'ceffect', 'gain'])
for q in resQlist:
res_df.loc[len(res_df), ['col', 'val', 'ceffect', 'gain']] = [
q.column, q.value, q.ceffect, q.gain]
#weights = [0.15, 0.85]
weights = [0.8, 0.2]
# pvalue the smaller the better; gain the bigger the better,here the
# q.gain is the 1-gain
# weighted rank over Causal Effect (bigger the better) and gini info gain
res_df['ranks'] = res_df[['ceffect', 'gain']].mul(weights).sum(1)
res_df = res_df.sort_values(
by=['ranks'],
ascending=False).reset_index(
drop=True)
best_question = Question(
res_df.col[0],
res_df.val[0])
best_ceffect = res_df.ceffect[0]
return best_ceffect, best_question
def is_numeric(value):
"""Test if a value is numeric."""
return isinstance(value, int) or isinstance(value, float)
class Question:
"""A Question is used to partition a dataset.
This class just records a 'column number' (e.g., 0 for Color) and a
'column value' (e.g., Green). The 'match' method is used to compare
the feature value in an example to the feature value stored in the
question. See the demo below.
"""
def __init__(self, column, value):
self.column = column
self.value = value
def match(self, example):
# Compare the feature value in an example to the
# feature value in this question.
val = example[self.column]
if is_numeric(val):
return val < self.value
else:
return val == self.value
def cal_causal_effect_gain(self, Rows, alpha):
Rows.get_correlated_features()
rows, outcome_col = Rows.corr_data, Rows.dependent_name
# try splitting the dataset
true_rows, false_rows = partition(rows, self)
TrRows = MyRows(true_rows, Rows.dependent_name)
FlRows = MyRows(false_rows, Rows.dependent_name)
# Skip this split if it doesn't divide the
# dataset.
if len(true_rows) == 0 or len(false_rows) == 0:
self.gain = 0
self.ceffect = 0
return
try:
# Get Prospensity_matched dataset
matchdf = prospensity_match(rows, self, outcome_col)
except BaseException: # if failed match, then use the original dataset
Yvar = self.column
Yvar_new = varnameQuestion(self)
true_rows, false_rows = partition(rows, self)
true_rows = true_rows.rename(columns={Yvar: Yvar_new})
false_rows = false_rows.rename(columns={Yvar: Yvar_new})
true_rows[Yvar_new] = 1
false_rows[Yvar_new] = 0
matchdf = true_rows.append(false_rows)
try:
# Calculate the p-value from this split
cau_effect, pvalue = match_ttest(matchdf, self, outcome_col)
except BaseException:
pvalue = 1.0
cau_effect = 0
# Calculate the information gain from this split
current_uncertainty = gini_r(Rows)
gain = info_gain(TrRows, FlRows, current_uncertainty)
self.gain = gain
# if pass the significant test ( two groups are significantly
# different)
if pvalue <= alpha:
self.ceffect = cau_effect
else:
self.ceffect = 0
def __repr__(self):
# This is just a helper method to print
# the question in a readable format.
condition = "=="
if is_numeric(self.value):
condition = "<"
return "{0} {1} {2:.2f}" .format(
self.column, condition, self.value)
return "%s %s %s" % (
self.column, condition, str(self.value))
def partition(rows, question):
"""Partitions a dataset.
For each row in the dataset, check if it matches the question. If
so, add it to 'true rows', otherwise, add it to 'false rows'.
"""
true_rows = | pd.DataFrame(columns=rows.columns) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Analyzing Student's Behavior and Model suggestion for classification levels
# ### <NAME>
# > #### This Data Science project was made under Capstone Data Science IBM Certification Program.
# ## Table of contents
# * [Introduction: Business Problem](#introduction)
# * [Data](#data)
# * [Methodology](#methodology)
# * [Analysis](#analysis)
# * [Results and Discussion](#results)
# * [Conclusion](#conclusion)
# # 1. Introduction <a name="introduction"></a>
# A description of the problem and a discussion of the background
#
# The Internet revolution brought more than social medias and faster information exchanges. It brought also a generation of people who studies through the digital environments. Under this context, the online education evolved quickly and the transformation of the societies really started. Nowadays, people in distant places, poor countries can benefit from technology to achieve information and in this case, the Massive Open Online Courses, MOOCs had a major role.
# MOOCs can join people all around the world to achieve understand in a wide range of areas, delivering science and culture.
#
# It is known, also, that online learning suffers massive unenrollment. The logical border and the lack of motivation can make the students leave. Under this context, what are the related features which causes it? How understand the student scenario and predict his churn or low grades?
# I think that is a relevant point. If MOOCs platforms achieve student understanding and predicting, I think it's possible to menage the student's churn and find a way to give them the needed motivation.
#
# With this set in mind, I started a search for MOOCs generated Students Data to investigate and prepare some conclusions about the theme.
#
# # 2. Data
# A description of the data and how it will be used to solve the problem
#
# To guide my investigation, I was looking for a Set to help to understand the student's behavior, motivation and correlated characteristics in order to better understand why or how is the result of an enrollment. So, it is important to find a dataset with some key features like grade, gender, enrollment levels, and so on. Location data is also important to understand cultural marks, which will be explored by locations APIs.
# Guided by the analysis exploration, I'll be able to build a model to predict student's behavior or results.
# After querying correlated datasets in order to find those with better columns, I found a nice DataSet from Kaggle called "Students' Academic Performance Dataset". You can check it here https://www.kaggle.com/aljarah/xAPI-Edu-Data.
# <p> The data compounds 16 columns with aggregated informations about over 480 students of a Learning Platform called Kalboard360. The datails will be shown next section.
#
# ## 2.1 Data Structure
# As previously mentioned, this dataset includes 16 columns:
#
# 1. Gender - student's gender (nominal: 'Male' or 'Female’)
#
# 2. Nationality- student's nationality (nominal:’ Kuwait’,’ Lebanon’,’ Egypt’,’ SaudiArabia’,’ USA’,’ Jordan’,’ Venezuela’,’ Iran’,’ Tunis’,’ Morocco’,’ Syria’,’ Palestine’,’ Iraq’,’ Lybia’)
#
# 3. Place of birth- student's Place of birth (nominal:’ Kuwait’,’ Lebanon’,’ Egypt’,’ SaudiArabia’,’ USA’,’ Jordan’,’ Venezuela’,’ Iran’,’ Tunis’,’ Morocco’,’ Syria’,’ Palestine’,’ Iraq’,’ Lybia’)
#
# 4. Educational Stages- educational level student belongs (nominal: ‘lowerlevel’,’MiddleSchool’,’HighSchool’)
#
# 5. Grade Levels- grade student belongs (nominal: ‘G-01’, ‘G-02’, ‘G-03’, ‘G-04’, ‘G-05’, ‘G-06’, ‘G-07’, ‘G-08’, ‘G-09’, ‘G-10’, ‘G-11’, ‘G-12 ‘)
#
# 6. Section ID- classroom student belongs (nominal:’A’,’B’,’C’)
#
# 7. Topic- course topic (nominal:’ English’,’ Spanish’, ‘French’,’ Arabic’,’ IT’,’ Math’,’ Chemistry’, ‘Biology’, ‘Science’,’ History’,’ Quran’,’ Geology’)
#
# 8. Semester- school year semester (nominal:’ First’,’ Second’)
#
# 9. Parent responsible for student (nominal:’mom’,’father’)
#
# 10. Raised hand- how many times the student raises his/her hand on classroom (numeric:0-100)
#
# 11. Visited resources- how many times the student visits a course content(numeric:0-100)
#
# 12. Viewing announcements-how many times the student checks the new announcements(numeric:0-100)
#
# 13. Discussion groups- how many times the student participate on discussion groups (numeric:0-100)
#
# 14. Parent Answering Survey- parent answered the surveys which are provided from school or not (nominal:’Yes’,’No’)
#
# 15. Parent School Satisfaction- the Degree of parent satisfaction from school(nominal:’Yes’,’No’)
#
# 16. Student Absence Days-the number of absence days for each student (nominal: above-7, under-7)
#
# The most important characteristic of this dataset is that it has included the parent's data, which is a nice approach to understand the student.
# # 3. Methodology
#
# The first steps are the data exploration and insight-taking approach in order to better understand the data and the columns. The purpose of this exploratory analysis is to identify hidden features and understand the relations between the features.
# Next, I'll do a descritive analysis by building a dataset for a clustering algorithm. This way, the data understanding will become a more powerfull decision making, focused on student's behaviors.
# Finally, I'll create a my predictive analysis by building a dataset with the best features for a supervised learning algorithm to predict the student's beahvior under certain conditions, which will achieve my final objective.
# # 4. Analysis
# As mentioned, this section will understand the data in order to compose the clustering dataset.
# ### 4.1 Exploratory Analysis
# In[110]:
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
# In[111]:
dataset = pd.read_csv("../../../input/aljarah_xAPI-Edu-Data/xAPI-Edu-Data.csv")
dataset.head(5)
# In the context to understand the student and his results, setting up a dataframe with certain columns
# In[112]:
df = dataset[['gender','PlaceofBirth','StageID','Topic','raisedhands','VisITedResources','AnnouncementsView','Discussion', 'ParentAnsweringSurvey','ParentschoolSatisfaction','StudentAbsenceDays', 'Class']]
df.head()
# Try to understand the results from countries
# In[113]:
df.groupby(['ParentschoolSatisfaction'])['Class'].value_counts(normalize=True)
# In[114]:
df.groupby(['ParentAnsweringSurvey'])['ParentschoolSatisfaction'].value_counts(normalize=True)
# It seems that parents which aren't envolved in answering the scholar's surveys are likely to become unsatisfied with the School. This can mean that well informed parents can better understand the student's enrollment and reality and are better satisfied.
# ### Question: What is the relation between active parents and student's classification?
# In[115]:
df.groupby(['ParentAnsweringSurvey'])['Class'].value_counts(normalize=True)
# So, definitively parent's active behavior has an important role on student's growth.
# ## Understanding student's behavior
# Next, it is important to know what characteristics are linked to students sucess. So, we're going to test the features related.
# In[116]:
df2 = dataset[['gender','raisedhands','VisITedResources','AnnouncementsView','Discussion','StudentAbsenceDays', 'Class']]
df2.head()
# ### Question: What's the relation between raising hands and classification?
# In[117]:
df2['raisedhands'] = pd.cut(df2.raisedhands, bins=3, labels=np.arange(3), right=False)
df2.groupby(['raisedhands'])['Class'].value_counts(normalize=True)
# So, it seems that students which has low levels of raising hands are most likely to have Low classification. In the otherside, high frequency of raising hands are linked to higher classification.
# Next, we're going to check the act of visiting the course resources.
# In[118]:
df2['VisITedResources'] = pd.cut(df2.VisITedResources, bins=3, labels=np.arange(3), right=False)
df2.groupby(['VisITedResources'])['Class'].value_counts(normalize=True)
# Low levels of resource exploring means lower levels of classification. High levels of visiting resources are linked to higher classification.
# In[119]:
df2['AnnouncementsView'] = pd.cut(df2.AnnouncementsView, bins=3, labels=np.arange(3), right=False)
df2.groupby(['AnnouncementsView'])['Class'].value_counts(normalize=True)
# The act of visualizing the announcements makes the students more prepared for the tasks and they are most likely to plan the assessments of the week. High visualization frequency is lined, indeed, to better classifications.
# In[120]:
df2['Discussion'] = pd.cut(df2.Discussion, bins=3, labels=np.arange(3), right=False)
df2.groupby(['Discussion'])['Class'].value_counts(normalize=True)
# Suprisingly, discussion frequency is weakly linked to higher results, at least, directly. Of course, there are higher interactions levels ocrring with Higher graded students but the data shows that discussion is a secondary act.
# Concluding this step on analysis, we're going to understand the absence rate with the grade level
# In[121]:
df2.groupby(['StudentAbsenceDays'])['Class'].value_counts(normalize=True)
# As expected, the lower the absence of the student, the higher tends to become their classification. Let's keep this feature.
# ### 4.1.1 Clustering DataSet
# Now that we know what are the important features to understand the student's behavior and classification, we're going to build a dataset for a K-Means algorithm, which will show the student's cluster.
# To make the construction process easiest to understand, we're going to reimplement the dataset building phases.
# In[122]:
df2 = dataset[['gender','raisedhands','VisITedResources','AnnouncementsView','Discussion','StudentAbsenceDays', 'Class']]
df2.tail()
# Let's identify the correlations between the student's actions
# In[123]:
correlation = df2[['raisedhands','VisITedResources','AnnouncementsView','Discussion']].corr(method='pearson')
correlation
# This made clear that our best correlated features are raisedHands and visitedResources, which will compose our model dataset further.
# So, we need an <b>one hot encoding</b> on columns gender,absence and class
# In[124]:
df2 = pd.concat([df2,pd.get_dummies(df2['gender'], prefix='gender_')], axis=1)
df2 = pd.concat([df2,pd.get_dummies(df2['StudentAbsenceDays'], prefix='absence_')], axis=1)
df2 = pd.concat([df2,pd.get_dummies(df2['Class'], prefix='class_')], axis=1)
df2.drop(['gender'], axis = 1,inplace=True)
df2.drop(['StudentAbsenceDays'], axis = 1,inplace=True)
df2.drop(['Class'], axis = 1,inplace=True)
df2.head()
# In[125]:
from sklearn.cluster import KMeans
from sklearn import preprocessing
# So, based on previous exploratory analysis, was possible to identify that raised hands and announcements visualization brings most results respect high classification. So, both features will compound our X axis
# In[126]:
X = df2[['raisedhands', 'VisITedResources']].values
#NORMALIZE OUR ARRAY
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(X)
#GET X AXIS
X = | pd.DataFrame(x_scaled) | pandas.DataFrame |
import mcradar as mcr
import xarray as xr
import numpy as np
import pandas as pd
import os
from IPython.core.debugger import Tracer ; debug=Tracer() #insert this line somewhere to debug
def getApectRatio(radii):
# imput radii [mm]
# auer et all 1970 (The Dimension of Ice Crystals in Natural Clouds)
diameter = 2 * radii *1e3 # calculating the diameter in [mu m]
h = 2.020 * (diameter)**0.449
as_ratio = h / diameter
return as_ratio
#reading the data file
dataPath = "data"
fileName = "mass2fr_0300-0600min_avtstep_5.ncdf"
filePath = os.path.join(dataPath, fileName)
data = xr.open_dataset(filePath)
#fake time
time = np.ones_like(data.dim_SP_all_av150)
#calculating the aspec ratio
sPhi = np.ones_like(data.dim_SP_all_av150)*np.nan
sPhi = getApectRatio(data.diam * 1e3)
sPhi[data.mm.values > 1]=0.6
#converting to pandas dataframe
dataTable = data.to_dataframe()
dataTable = dataTable.rename(columns={'m_tot':'mTot', 'height':'sHeight',
'vt':'vel', 'diam':'dia','xi':'sMult'})
#settings
dicSettings = mcr.loadSettings(dataPath='_', freq=np.array([9.6e9]),
maxHeight=3000, minHeight=2500,
heightRes=5)
#adding required variables
dataTable['radii'] = dataTable['dia'] / 2.# particle radius in m
dataTable['time']=time
PSD_method="bin" #"bin": count SP and their multiplicity in height and size bins; "1D_KDE": #DOES NOT WORK YET!! 1-dimensional kernel density estimate, "discrete_SP": calculate scattering properties of each SP individually
if PSD_method in ["bin","1D_KDE"]:
#some definitions
nbins = 100 #number of used bins
n_heights = 50
model_top = 3850 #[m] #TODO: read this from output
minR =-4 #minimum R considered (log10-space)
maxR = 0 #maximum R considered (log10-space)
area_box = 5 #[m2] #TODO: read this from output
Rgrid=np.logspace(minR,maxR,nbins)
Rgrid_log=np.linspace(minR,maxR,nbins)
Rgrid_logdiff=Rgrid_log[1]-Rgrid_log[0]
heightvec_bound = np.linspace(0,model_top,n_heights)
#heightvec_bound = np.linspace(2900,3000,5) #TODO: remove (only for debugging)
Vbox = area_box*heightvec_bound[1]-heightvec_bound[0] #[m3]
reducedDataTable = pd.DataFrame()
for i_height in range(len(heightvec_bound)-1):
print("calculate h=",heightvec_bound[i_height])
#initialize as many dataFrame as categories
#one category must have the same particle properties (mass, velocity) at the same size
dataBINmono = pd.DataFrame(data={"Rgrid": Rgrid}) #initialize dataFrame
dataBINagg = | pd.DataFrame(data={"Rgrid": Rgrid}) | pandas.DataFrame |
"""
test_subcomp_b.py
Contains tests for subcomp_b_process_emissions_factors,
which averages emissions factors for DR days and all days.
"""
import unittest
import pandas as pd
import pandas.testing as pdt
from subcomp_b_process_emissions_factors import seasonal_ave, annual_ave, \
get_hour_ave, alldays_oneyear_seasonal_ave, get_oneyear_hour_ave, subcomp_b_runall
from emissions_parameters import DIR_TESTDATA_IN
df_emissions_data = pd.read_excel(DIR_TESTDATA_IN+'subcomp_b_test_data/emissions_data.xlsx')
df_dr_hours_winter = pd.read_excel(DIR_TESTDATA_IN+'subcomp_b_test_data/dr_hours_winter.xlsx')
df_dr_hours_spring = pd.read_excel(DIR_TESTDATA_IN+'subcomp_b_test_data/dr_hours_spring.xlsx')
df_dr_hours_summer = pd.read_excel(DIR_TESTDATA_IN+'subcomp_b_test_data/dr_hours_summer.xlsx')
df_dr_hours_fall = pd.read_excel(DIR_TESTDATA_IN+'subcomp_b_test_data/dr_hours_fall.xlsx')
df_dr_hours_data = | pd.read_excel(DIR_TESTDATA_IN+'subcomp_b_test_data/dr_hours_data.xlsx') | pandas.read_excel |
"""Utility function relevant to the graphnet.data package."""
from ast import Is
from glob import glob
import os
import numpy as np
import pandas as pd
from pathlib import Path
import re
from typing import List, Tuple, Union
import sqlite3
import sqlalchemy
def run_sql_code(database: str, code: str):
"""executes SQLite coded
Args:
database (str): path to databases
code (str): SQLite code
"""
conn = sqlite3.connect(database)
c = conn.cursor()
c.executescript(code)
c.close()
return
def save_to_sql(df, table_name, database):
"""saves a dataframe df to a table table_name in SQLite database database. Table must exist already.
Args:
df (pandas.DataFrame): dataframe with data to be stored in sqlite table
table_name (str): name of table. Must exist already
database (SQLite database): path to SQLite database
"""
engine = sqlalchemy.create_engine("sqlite:///" + database)
df.to_sql(table_name, con=engine, index=False, if_exists="append")
engine.dispose()
return
def get_desired_event_numbers(
db_path,
desired_size,
fraction_noise=0,
fraction_nu_e=0,
fraction_muon=0,
fraction_nu_mu=0,
fraction_nu_tau=0,
seed=0,
):
assert (
fraction_nu_e
+ fraction_muon
+ fraction_nu_mu
+ fraction_nu_tau
+ fraction_noise
== 1.0
), "Sum of fractions not equal to one."
rng = np.random.RandomState(seed=seed)
fracs = [
fraction_noise,
fraction_muon,
fraction_nu_e,
fraction_nu_mu,
fraction_nu_tau,
]
numbers_desired = [int(x * desired_size) for x in fracs]
pids = [1, 13, 12, 14, 16]
with sqlite3.connect(db_path) as con:
total_query = "SELECT event_no FROM truth WHERE abs(pid) IN {}".format(
tuple(pids)
)
tot_event_nos = | pd.read_sql(total_query, con) | pandas.read_sql |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from copy import deepcopy
import torch
from torch.nn import Linear, MSELoss, CrossEntropyLoss, LogSoftmax, NLLLoss, functional as F
from torch.optim import SGD, Adam, RMSprop
from sklearn.model_selection import KFold
df = pd.read_excel("Supplementary file 2-2. Clinical data.xlsx", 'Sheet1')
df_train = df.iloc[:, 2:7]
df_train = pd.get_dummies(df_train)
label_columns = ['Histology_' + i for i in ['Normal', 'Inflammation', 'LGIN', 'HGIN', 'SM1', 'MM', 'SM2 or deeper']]
labels = df_train.loc[:, label_columns]
labels_gt = np.argmax(np.array(labels), 1)
data = df_train.drop(label_columns, axis=1)
data = np.array(data)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=0.20, random_state=10000, shuffle=True)
df_IPCLsNet = | pd.read_excel(p + "\\Clinical_results.xlsx", 'Sheet1') | pandas.read_excel |
#%%
import numpy as np
import pandas as pd
import skimage.io
import skimage.filters
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, '../../')
import act.viz
import act.image
import glob
colors = act.viz.pub_style()
# %%
files = glob.glob('../../../data/images/20200226_xap_distal_testing/*/pos*/')
dfs = []
for i, f in enumerate(files):
# Parse the identifiers.
strain, atc, xan = f.split('/')[-3].split('_')
atc = float(atc.split('ngml')[0])
xan = float(xan.split('mgml')[0])
if strain == 'xd':
strain='distal'
# Load the images.
yfp = skimage.io.imread(glob.glob(f'{f}/*c2.tif')[0])
mch = skimage.io.imread(glob.glob(f'{f}/*c3.tif')[0])
seg = act.image.log_segmentation(yfp, thresh=0.0001)
# Remove small objects
seg = skimage.morphology.remove_small_objects(seg)
# label
label = skimage.measure.label(seg)
# Compute the fluorescence region properties.
yfp_props = skimage.measure.regionprops(label, yfp)
mch_props = skimage.measure.regionprops(label, mch)
mean_yfp = []
mean_mch = []
area = []
for i, _ in enumerate(yfp_props):
mean_yfp.append(yfp_props[i]['mean_intensity'])
mean_mch.append(mch_props[i]['mean_intensity'])
area.append(yfp_props[i]['area'])
# Assemble the data frame.
df = pd.DataFrame(np.array([mean_yfp, mean_mch, area]).T,
columns=['mean_yfp', 'mean_mch', 'area_pix'])
df['strain'] = strain
df['atc_ngml'] = atc
df['xan_mgml'] = xan
dfs.append(df)
df = | pd.concat(dfs) | pandas.concat |
import itertools
import logging
import math
from datetime import datetime, timedelta, timezone
import boto3
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import awswrangler as wr
from ._utils import ensure_data_types, get_df_list
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
@pytest.mark.parametrize("partition_cols", [None, ["c2"], ["c1", "c2"]])
def test_parquet_metadata_partitions_dataset(path, partition_cols):
df = pd.DataFrame({"c0": [0, 1, 2], "c1": [3, 4, 5], "c2": [6, 7, 8]})
wr.s3.to_parquet(df=df, path=path, dataset=True, partition_cols=partition_cols)
columns_types, partitions_types = wr.s3.read_parquet_metadata(path=path, dataset=True)
partitions_types = partitions_types if partitions_types is not None else {}
assert len(columns_types) + len(partitions_types) == len(df.columns)
assert columns_types.get("c0") == "bigint"
assert (columns_types.get("c1") == "bigint") or (partitions_types.get("c1") == "string")
assert (columns_types.get("c1") == "bigint") or (partitions_types.get("c1") == "string")
@pytest.mark.parametrize("partition_cols", [None, ["c2"], ["value", "c2"]])
def test_parquet_cast_string_dataset(path, partition_cols):
df = pd.DataFrame({"id": [1, 2, 3], "value": ["foo", "boo", "bar"], "c2": [4, 5, 6], "c3": [7.0, 8.0, 9.0]})
wr.s3.to_parquet(df, path, dataset=True, partition_cols=partition_cols, dtype={"id": "string", "c3": "string"})
df2 = wr.s3.read_parquet(path, dataset=True).sort_values("id", ignore_index=True)
assert str(df2.id.dtypes) == "string"
assert str(df2.c3.dtypes) == "string"
assert df.shape == df2.shape
for col, row in tuple(itertools.product(df.columns, range(3))):
assert str(df[col].iloc[row]) == str(df2[col].iloc[row])
@pytest.mark.parametrize("use_threads", [True, False, 2])
def test_read_parquet_filter_partitions(path, use_threads):
df = pd.DataFrame({"c0": [0, 1, 2], "c1": [0, 1, 2], "c2": [0, 0, 1]})
wr.s3.to_parquet(df, path, dataset=True, partition_cols=["c1", "c2"], use_threads=use_threads)
df2 = wr.s3.read_parquet(
path, dataset=True, partition_filter=lambda x: True if x["c1"] == "0" else False, use_threads=use_threads
)
assert df2.shape == (1, 3)
assert df2.c0.iloc[0] == 0
assert df2.c1.astype(int).iloc[0] == 0
assert df2.c2.astype(int).iloc[0] == 0
df2 = wr.s3.read_parquet(
path,
dataset=True,
partition_filter=lambda x: True if x["c1"] == "1" and x["c2"] == "0" else False,
use_threads=use_threads,
)
assert df2.shape == (1, 3)
assert df2.c0.iloc[0] == 1
assert df2.c1.astype(int).iloc[0] == 1
assert df2.c2.astype(int).iloc[0] == 0
df2 = wr.s3.read_parquet(
path, dataset=True, partition_filter=lambda x: True if x["c2"] == "0" else False, use_threads=use_threads
)
assert df2.shape == (2, 3)
assert df2.c0.astype(int).sum() == 1
assert df2.c1.astype(int).sum() == 1
assert df2.c2.astype(int).sum() == 0
def test_parquet(path):
df_file = pd.DataFrame({"id": [1, 2, 3]})
path_file = f"{path}test_parquet_file.parquet"
df_dataset = pd.DataFrame({"id": [1, 2, 3], "partition": ["A", "A", "B"]})
df_dataset["partition"] = df_dataset["partition"].astype("category")
path_dataset = f"{path}test_parquet_dataset"
with pytest.raises(wr.exceptions.InvalidArgumentCombination):
wr.s3.to_parquet(df=df_file, path=path_file, mode="append")
with pytest.raises(wr.exceptions.InvalidCompression):
wr.s3.to_parquet(df=df_file, path=path_file, compression="WRONG")
with pytest.raises(wr.exceptions.InvalidArgumentCombination):
wr.s3.to_parquet(df=df_dataset, path=path_dataset, partition_cols=["col2"])
with pytest.raises(wr.exceptions.InvalidArgumentCombination):
wr.s3.to_parquet(df=df_dataset, path=path_dataset, description="foo")
with pytest.raises(wr.exceptions.InvalidArgumentValue):
wr.s3.to_parquet(df=df_dataset, path=path_dataset, partition_cols=["col2"], dataset=True, mode="WRONG")
wr.s3.to_parquet(df=df_file, path=path_file)
assert len(wr.s3.read_parquet(path=path_file, use_threads=True, boto3_session=None).index) == 3
assert len(wr.s3.read_parquet(path=[path_file], use_threads=False, boto3_session=boto3.DEFAULT_SESSION).index) == 3
paths = wr.s3.to_parquet(df=df_dataset, path=path_dataset, dataset=True)["paths"]
with pytest.raises(wr.exceptions.InvalidArgument):
assert wr.s3.read_parquet(path=paths, dataset=True)
assert len(wr.s3.read_parquet(path=path_dataset, use_threads=True, boto3_session=boto3.DEFAULT_SESSION).index) == 3
dataset_paths = wr.s3.to_parquet(
df=df_dataset, path=path_dataset, dataset=True, partition_cols=["partition"], mode="overwrite"
)["paths"]
assert len(wr.s3.read_parquet(path=path_dataset, use_threads=True, boto3_session=None).index) == 3
assert len(wr.s3.read_parquet(path=dataset_paths, use_threads=True).index) == 3
assert len(wr.s3.read_parquet(path=path_dataset, dataset=True, use_threads=True).index) == 3
wr.s3.to_parquet(df=df_dataset, path=path_dataset, dataset=True, partition_cols=["partition"], mode="overwrite")
wr.s3.to_parquet(
df=df_dataset, path=path_dataset, dataset=True, partition_cols=["partition"], mode="overwrite_partitions"
)
def test_parquet_validate_schema(path):
df = pd.DataFrame({"id": [1, 2, 3]})
path_file = f"{path}0.parquet"
wr.s3.to_parquet(df=df, path=path_file)
df2 = pd.DataFrame({"id2": [1, 2, 3], "val": ["foo", "boo", "bar"]})
path_file2 = f"{path}1.parquet"
wr.s3.to_parquet(df=df2, path=path_file2)
df3 = wr.s3.read_parquet(path=path, validate_schema=False)
assert len(df3.index) == 6
assert len(df3.columns) == 3
with pytest.raises(wr.exceptions.InvalidSchemaConvergence):
wr.s3.read_parquet(path=path, validate_schema=True)
def test_parquet_uint64(path):
wr.s3.delete_objects(path=path)
df = pd.DataFrame(
{
"c0": [0, 0, (2 ** 8) - 1],
"c1": [0, 0, (2 ** 16) - 1],
"c2": [0, 0, (2 ** 32) - 1],
"c3": [0, 0, (2 ** 64) - 1],
"c4": [0, 1, 2],
}
)
df["c0"] = df.c0.astype("uint8")
df["c1"] = df.c1.astype("uint16")
df["c2"] = df.c2.astype("uint32")
df["c3"] = df.c3.astype("uint64")
wr.s3.to_parquet(df=df, path=path, dataset=True, mode="overwrite", partition_cols=["c4"])
df = wr.s3.read_parquet(path=path, dataset=True)
assert len(df.index) == 3
assert len(df.columns) == 5
assert df.c0.max() == (2 ** 8) - 1
assert df.c1.max() == (2 ** 16) - 1
assert df.c2.max() == (2 ** 32) - 1
assert df.c3.max() == (2 ** 64) - 1
assert df.c4.astype("uint8").sum() == 3
wr.s3.delete_objects(path=path)
def test_parquet_metadata_partitions(path):
path = f"{path}0.parquet"
df = pd.DataFrame({"c0": [0, 1, 2], "c1": ["3", "4", "5"], "c2": [6.0, 7.0, 8.0]})
wr.s3.to_parquet(df=df, path=path, dataset=False)
columns_types, _ = wr.s3.read_parquet_metadata(path=path, dataset=False)
assert len(columns_types) == len(df.columns)
assert columns_types.get("c0") == "bigint"
assert columns_types.get("c1") == "string"
assert columns_types.get("c2") == "double"
def test_parquet_cast_string(path):
df = pd.DataFrame({"id": [1, 2, 3], "value": ["foo", "boo", "bar"]})
path_file = f"{path}0.parquet"
wr.s3.to_parquet(df, path_file, dtype={"id": "string"}, sanitize_columns=False)
df2 = wr.s3.read_parquet(path_file)
assert str(df2.id.dtypes) == "string"
assert df.shape == df2.shape
for col, row in tuple(itertools.product(df.columns, range(3))):
assert df[col].iloc[row] == df2[col].iloc[row]
def test_to_parquet_file_sanitize(path):
df = pd.DataFrame({"C0": [0, 1], "camelCase": [2, 3], "c**--2": [4, 5]})
path_file = f"{path}0.parquet"
wr.s3.to_parquet(df, path_file, sanitize_columns=True)
df2 = wr.s3.read_parquet(path_file)
assert df.shape == df2.shape
assert list(df2.columns) == ["c0", "camel_case", "c_2"]
assert df2.c0.sum() == 1
assert df2.camel_case.sum() == 5
assert df2.c_2.sum() == 9
@pytest.mark.parametrize("use_threads", [True, False, 2])
def test_to_parquet_file_dtype(path, use_threads):
df = pd.DataFrame({"c0": [1.0, None, 2.0], "c1": [pd.NA, pd.NA, pd.NA]})
file_path = f"{path}0.parquet"
wr.s3.to_parquet(df, file_path, dtype={"c0": "bigint", "c1": "string"}, use_threads=use_threads)
df2 = wr.s3.read_parquet(file_path, use_threads=use_threads)
assert df2.shape == df.shape
assert df2.c0.sum() == 3
assert str(df2.c0.dtype) == "Int64"
assert str(df2.c1.dtype) == "string"
@pytest.mark.parametrize("filename_prefix", [None, "my_prefix"])
@pytest.mark.parametrize("use_threads", [True, False])
def test_to_parquet_filename_prefix(compare_filename_prefix, path, filename_prefix, use_threads):
test_prefix = "my_prefix"
df = pd.DataFrame({"col": [1, 2, 3], "col2": ["A", "A", "B"]})
file_path = f"{path}0.parquet"
# If Dataset is False, parquet file should never start with prefix
filename = wr.s3.to_parquet(
df=df, path=file_path, dataset=False, filename_prefix=filename_prefix, use_threads=use_threads
)["paths"][0].split("/")[-1]
assert not filename.startswith(test_prefix)
# If Dataset is True, parquet file starts with prefix if one is supplied
filename = wr.s3.to_parquet(
df=df, path=path, dataset=True, filename_prefix=filename_prefix, use_threads=use_threads
)["paths"][0].split("/")[-1]
compare_filename_prefix(filename, filename_prefix, test_prefix)
# Partitioned
filename = wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
filename_prefix=filename_prefix,
partition_cols=["col2"],
use_threads=use_threads,
)["paths"][0].split("/")[-1]
compare_filename_prefix(filename, filename_prefix, test_prefix)
# Bucketing
filename = wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
filename_prefix=filename_prefix,
bucketing_info=(["col2"], 2),
use_threads=use_threads,
)["paths"][0].split("/")[-1]
compare_filename_prefix(filename, filename_prefix, test_prefix)
assert filename.endswith("bucket-00000.snappy.parquet")
def test_read_parquet_map_types(path):
df = pd.DataFrame({"c0": [0, 1, 1, 2]}, dtype=np.int8)
file_path = f"{path}0.parquet"
wr.s3.to_parquet(df, file_path)
df2 = wr.s3.read_parquet(file_path)
assert str(df2.c0.dtype) == "Int8"
df3 = wr.s3.read_parquet(file_path, map_types=False)
assert str(df3.c0.dtype) == "int8"
@pytest.mark.parametrize("use_threads", [True, False, 2])
@pytest.mark.parametrize("max_rows_by_file", [None, 0, 40, 250, 1000])
def test_parquet_with_size(path, use_threads, max_rows_by_file):
df = get_df_list()
df = pd.concat([df for _ in range(100)])
paths = wr.s3.to_parquet(
df=df,
path=path + "x.parquet",
index=False,
dataset=False,
max_rows_by_file=max_rows_by_file,
use_threads=use_threads,
)["paths"]
if max_rows_by_file is not None and max_rows_by_file > 0:
assert len(paths) >= math.floor(300 / max_rows_by_file)
assert len(paths) <= math.ceil(300 / max_rows_by_file)
df2 = wr.s3.read_parquet(path=path, dataset=False, use_threads=use_threads)
ensure_data_types(df2, has_list=True)
assert df2.shape == (300, 19)
assert df.iint8.sum() == df2.iint8.sum()
@pytest.mark.parametrize("use_threads", [True, False, 2])
def test_index_and_timezone(path, use_threads):
df = pd.DataFrame({"c0": [datetime.utcnow(), datetime.utcnow()], "par": ["a", "b"]}, index=["foo", "boo"])
df["c1"] = pd.DatetimeIndex(df.c0).tz_localize(tz="US/Eastern")
wr.s3.to_parquet(df, path, index=True, use_threads=use_threads, dataset=True, partition_cols=["par"])
df2 = wr.s3.read_parquet(path, use_threads=use_threads, dataset=True)
assert df[["c0", "c1"]].equals(df2[["c0", "c1"]])
@pytest.mark.parametrize("use_threads", [True, False, 2])
def test_index_recovery_simple_int(path, use_threads):
df = pd.DataFrame({"c0": np.arange(10, 1_010, 1)}, dtype="Int64")
paths = wr.s3.to_parquet(df, path, index=True, use_threads=use_threads, dataset=True, max_rows_by_file=300)["paths"]
assert len(paths) == 4
df2 = wr.s3.read_parquet(f"{path}*.parquet", use_threads=use_threads)
assert df.equals(df2)
@pytest.mark.parametrize("use_threads", [True, False, 2])
def test_index_recovery_simple_str(path, use_threads):
df = pd.DataFrame({"c0": [0, 1, 2, 3, 4]}, index=["a", "b", "c", "d", "e"], dtype="Int64")
paths = wr.s3.to_parquet(df, path, index=True, use_threads=use_threads, dataset=True, max_rows_by_file=1)["paths"]
assert len(paths) == 5
df2 = wr.s3.read_parquet(f"{path}*.parquet", use_threads=use_threads)
assert df.equals(df2)
@pytest.mark.parametrize("use_threads", [True, False, 2])
def test_index_recovery_partitioned_str(path, use_threads):
df = pd.DataFrame(
{"c0": [0, 1, 2, 3, 4], "par": ["foo", "boo", "bar", "foo", "boo"]}, index=["a", "b", "c", "d", "e"]
)
df["c0"] = df["c0"].astype("Int64")
df["par"] = df["c0"].astype("category")
paths = wr.s3.to_parquet(
df, path, index=True, use_threads=use_threads, dataset=True, partition_cols=["par"], max_rows_by_file=1
)["paths"]
assert len(paths) == 5
df2 = wr.s3.read_parquet(f"{path}*.parquet", use_threads=use_threads, dataset=True)
assert df.shape == df2.shape
assert df.c0.equals(df2.c0)
assert df.dtypes.equals(df2.dtypes)
assert df.index.equals(df2.index)
@pytest.mark.parametrize("use_threads", [True, False, 2])
def test_range_index_recovery_simple(path, use_threads):
df = pd.DataFrame({"c0": np.arange(10, 15, 1)}, dtype="Int64", index=pd.RangeIndex(start=5, stop=30, step=5))
paths = wr.s3.to_parquet(df, path, index=True, use_threads=use_threads, dataset=True, max_rows_by_file=3)["paths"]
assert len(paths) == 2
df2 = wr.s3.read_parquet(f"{path}*.parquet", use_threads=use_threads)
assert df.reset_index(level=0).equals(df2.reset_index(level=0))
@pytest.mark.parametrize("use_threads", [True, False, 2])
@pytest.mark.parametrize("name", [None, "foo"])
def test_range_index_recovery_pandas(path, use_threads, name):
df = pd.DataFrame({"c0": np.arange(10, 15, 1)}, dtype="Int64", index=pd.RangeIndex(start=5, stop=30, step=5))
df.index.name = name
path_file = f"{path}0.parquet"
df.to_parquet(path_file)
df2 = wr.s3.read_parquet([path_file], use_threads=use_threads)
assert df.reset_index(level=0).equals(df2.reset_index(level=0))
@pytest.mark.parametrize("use_threads", [True, False, 2])
def test_multi_index_recovery_simple(path, use_threads):
df = | pd.DataFrame({"c0": [0, 1, 2], "c1": ["a", "b", "c"], "c2": [True, False, True], "c3": [0, 1, 2]}) | pandas.DataFrame |
"""
Functions to connect to and process data from SolarForecastArbiter API
"""
import datetime as dt
import json
import logging
import requests
from urllib3 import Retry
import numpy as np
import pandas as pd
from solarforecastarbiter import datamodel
from solarforecastarbiter.utils import merge_ranges
from solarforecastarbiter.io.utils import (
json_payload_to_observation_df,
json_payload_to_forecast_series,
observation_df_to_json_payload,
forecast_object_to_json,
adjust_timeseries_for_interval_label,
serialize_timeseries,
HiddenToken, ensure_timestamps,
load_report_values)
BASE_URL = 'https://api.solarforecastarbiter.org'
logger = logging.getLogger(__name__)
# Limit used to limit the amount of retrieved with a single request. Used
# to break up large requests into smaller requests to avoid timeout.
GET_VALUES_LIMIT = '365D'
def request_cli_access_token(user, password, **kwargs):
"""Request an API access token from Auth0.
Parameters
----------
user : str
Username
password : str
Password
kwargs
Passed to requests.post. Useful for handling SSL certificates,
navigating proxies, or other network complications. See requests
documentation for details.
Returns
-------
access_token : str
"""
req = requests.post(
'https://solarforecastarbiter.auth0.com/oauth/token',
data={'grant_type': 'password', 'username': user,
'audience': BASE_URL,
'password': password,
'client_id': 'c16EJo48lbTCQEhqSztGGlmxxxmZ4zX7'},
**kwargs)
req.raise_for_status()
return req.json()['access_token']
class APISession(requests.Session):
"""
Subclass of requests.Session to handle requets to the SolarForecastArbiter
API. The Session provides connection pooling, automatic retries for certain
types of requets, default timeouts, and a default base url. Responses are
converted into the appropriate class from datamodel.py or a pandas object.
Parameters
----------
access_token : string or HiddenToken
The base64 encoded Bearer token to authenticate with the API
default_timeout : float or tuple, optional
A default timeout to add to all requests. If a tuple, the first element
is the connection timeout and the second is the read timeout.
Default is 10 seconds for connection and 60 seconds to read from the
server.
base_url : string
URL to use as the base for endpoints to APISession
Notes
-----
To pass the API calls through a proxy server, set either the HTTP_PROXY or
HTTPS_PROXY environment variable. If necessary, you can also specify a SSL
certificate using the REQUESTS_CA_BUNDLE environment variable. For example,
on a Linux machine:
>>> export HTTPS_PROXY=https://some_corporate_proxy.com:8080
>>> export REQUESTS_CA_BUNDLE=/path/to/certificates/cert.crt
>>> python script_that_calls_api.py
For more information, see the "Advanced Usage" documentation for the
requests package: https://requests.readthedocs.io/en/master/user/advanced/
"""
def __init__(self, access_token, default_timeout=(10, 60),
base_url=None):
super().__init__()
if isinstance(access_token, HiddenToken):
access_token = access_token.token
self.headers = {'Authorization': f'Bearer {access_token}',
'Accept': 'application/json',
'Accept-Encoding': 'gzip,deflate'}
self.default_timeout = default_timeout
self.base_url = base_url or BASE_URL
# set requests to automatically retry
retries = Retry(total=10, connect=3, read=3, status=3,
status_forcelist=[408, 423, 444, 500, 501, 502, 503,
504, 507, 508, 511, 599],
backoff_factor=0.5,
raise_on_status=False,
remove_headers_on_redirect=[])
adapter = requests.adapters.HTTPAdapter(max_retries=retries)
self.mount(self.base_url, adapter)
def request(self, method, url, *args, **kwargs):
"""
Modify the default Session.request to add in the default timeout
and make requests relative to the base_url. Users will likely
use the standard get and post methods instead of calling this directly.
Raises
------
requests.exceptions.HTTPError
When an error is encountered in when making the request to the API
"""
if url.startswith('/'):
url = f'{self.base_url}{url}'
else:
url = f'{self.base_url}/{url}'
# set a defautl timeout so we never hang indefinitely
if 'timeout' not in kwargs:
kwargs['timeout'] = self.default_timeout
result = super().request(method, url, *args, **kwargs)
if result.status_code >= 400:
raise requests.exceptions.HTTPError(
f'{result.status_code} API Request Error: {result.reason} for '
f'url: {result.url} and text: {result.text}',
response=result)
return result
def _process_site_dict(self, site_dict):
if (
site_dict.get('modeling_parameters', {}).get(
'tracking_type', '') in ('fixed', 'single_axis')
):
return datamodel.SolarPowerPlant.from_dict(site_dict)
else:
return datamodel.Site.from_dict(site_dict)
def get_site(self, site_id):
"""
Retrieve site metadata for site_id from the API and process
into the proper model.
Parameters
----------
site_id : string
UUID of the site to retrieve metadata for
Returns
-------
datamodel.Site or datamodel.SolarPowerPlant
Dataclass with all the metadata for the site depending on if
the Site is a power plant with modeling parameters or not.
"""
req = self.get(f'/sites/{site_id}')
site_dict = req.json()
return self._process_site_dict(site_dict)
def list_sites(self):
"""
List all the sites available to a user.
Returns
-------
list of datamodel.Sites and datamodel.SolarPowerPlants
"""
req = self.get('/sites/')
return [self._process_site_dict(site_dict)
for site_dict in req.json()]
def list_sites_in_zone(self, zone):
"""
List all the sites available to a user in the given climate zone.
Parameters
----------
zone : str
Returns
-------
list of datamodel.Sites and datamodel.SolarPowerPlants
"""
req = self.get(f'/sites/in/{zone}')
return [self._process_site_dict(site_dict)
for site_dict in req.json()]
def search_climatezones(self, latitude, longitude):
"""
Find all climate zones that the location is in.
Parameters
----------
latitude : float, degrees North
longitude : float, degrees East of the Prime Meridian
Returns
-------
list
A list of the climate zones the location is in
"""
req = self.get('/climatezones/search',
params={'latitude': latitude,
'longitude': longitude})
return [r['name'] for r in req.json()]
def create_site(self, site):
"""
Create a new site in the API with the given Site model
Parameters
----------
site : datamodel.Site or datamodel.SolarPowerPlant
Site to create in the API
Returns
-------
datamodel.Site or datamodel.SolarPowerPlant
With the appropriate parameters such as site_id set by the API
"""
site_dict = site.to_dict()
for k in ('site_id', 'provider', 'climate_zones'):
site_dict.pop(k, None)
site_json = json.dumps(site_dict)
req = self.post('/sites/', data=site_json,
headers={'Content-Type': 'application/json'})
new_id = req.text
return self.get_site(new_id)
def get_observation(self, observation_id):
"""
Get the metadata from the API for the a given observation_id
in an Observation object.
Parameters
----------
observation_id : string
UUID of the observation to retrieve
Returns
-------
datamodel.Observation
"""
req = self.get(f'/observations/{observation_id}/metadata')
obs_dict = req.json()
site = self.get_site(obs_dict['site_id'])
obs_dict['site'] = site
return datamodel.Observation.from_dict(obs_dict)
def list_observations(self):
"""
List the observations a user has access to.
Returns
-------
list of datamodel.Observation
"""
req = self.get('/observations/')
obs_dicts = req.json()
if isinstance(obs_dicts, dict):
obs_dicts = [obs_dicts]
if len(obs_dicts) == 0:
return []
sites = {site.site_id: site for site in self.list_sites()}
out = []
for obs_dict in obs_dicts:
obs_dict['site'] = sites.get(obs_dict['site_id'])
out.append(datamodel.Observation.from_dict(obs_dict))
return out
def create_observation(self, observation):
"""
Create a new observation in the API with the given Observation model
Parameters
----------
observation : datamodel.Observation
Observation to create in the API
Returns
-------
datamodel.Observation
With the appropriate parameters such as observation_id set by the
API
"""
obs_dict = observation.to_dict()
obs_dict.pop('observation_id')
obs_dict.pop('provider')
site = obs_dict.pop('site')
obs_dict['site_id'] = site['site_id']
obs_json = json.dumps(obs_dict)
req = self.post('/observations/', data=obs_json,
headers={'Content-Type': 'application/json'})
new_id = req.text
return self.get_observation(new_id)
def _process_fx(self, fx_dict, sites={}):
if fx_dict['site_id'] is not None:
if fx_dict['site_id'] in sites:
fx_dict['site'] = sites[fx_dict['site_id']]
else:
fx_dict['site'] = self.get_site(fx_dict['site_id'])
elif fx_dict['aggregate_id'] is not None:
fx_dict['aggregate'] = self.get_aggregate(fx_dict['aggregate_id'])
if fx_dict['variable'] == "event":
return datamodel.EventForecast.from_dict(fx_dict)
else:
return datamodel.Forecast.from_dict(fx_dict)
def get_forecast(self, forecast_id):
"""
Get Forecast metadata from the API for the given forecast_id
Parameters
----------
forecast_id : string
UUID of the forecast to get metadata for
Returns
-------
datamodel.Forecast
"""
req = self.get(f'/forecasts/single/{forecast_id}/metadata')
fx_dict = req.json()
return self._process_fx(fx_dict)
def list_forecasts(self):
"""
List all Forecasts a user has access to.
Returns
-------
list of datamodel.Forecast
"""
req = self.get('/forecasts/single/')
fx_dicts = req.json()
if isinstance(fx_dicts, dict):
fx_dicts = [fx_dicts]
if len(fx_dicts) == 0:
return []
sites = {site.site_id: site for site in self.list_sites()}
out = []
for fx_dict in fx_dicts:
out.append(self._process_fx(fx_dict, sites=sites))
return out
def create_forecast(self, forecast):
"""
Create a new forecast in the API with the given Forecast model
Parameters
----------
forecast : datamodel.Forecast
Forecast to create in the API
Returns
-------
datamodel.Forecast
With the appropriate parameters such as forecast_id set by the API
"""
fx_dict = forecast.to_dict()
fx_dict.pop('forecast_id')
fx_dict.pop('provider')
site = fx_dict.pop('site')
agg = fx_dict.pop('aggregate')
if site is None and agg is not None:
fx_dict['aggregate_id'] = agg['aggregate_id']
else:
fx_dict['site_id'] = site['site_id']
fx_json = json.dumps(fx_dict)
req = self.post('/forecasts/single/', data=fx_json,
headers={'Content-Type': 'application/json'})
new_id = req.text
return self.get_forecast(new_id)
def _process_prob_forecast(self, fx_dict, sites={}):
if fx_dict['site_id'] is not None:
if fx_dict['site_id'] in sites:
fx_dict['site'] = sites[fx_dict['site_id']]
else:
fx_dict['site'] = self.get_site(fx_dict['site_id'])
elif fx_dict['aggregate_id'] is not None:
fx_dict['aggregate'] = self.get_aggregate(fx_dict['aggregate_id'])
cvs = []
for constant_value_dict in fx_dict['constant_values']:
# the API just gets the groups attributes for the
# single constant value forecasts, so avoid
# those excess calls
cv_dict = fx_dict.copy()
cv_dict.update(constant_value_dict)
cvs.append(
datamodel.ProbabilisticForecastConstantValue.from_dict(
cv_dict))
fx_dict['constant_values'] = cvs
return datamodel.ProbabilisticForecast.from_dict(fx_dict)
def list_probabilistic_forecasts(self):
"""
List all ProbabilisticForecasts a user has access to.
Returns
-------
list of datamodel.ProbabilisticForecast
"""
req = self.get('/forecasts/cdf/')
fx_dicts = req.json()
if isinstance(fx_dicts, dict):
fx_dicts = [fx_dicts]
if len(fx_dicts) == 0:
return []
sites = {site.site_id: site for site in self.list_sites()}
out = []
for fx_dict in fx_dicts:
out.append(self._process_prob_forecast(fx_dict, sites))
return out
def get_probabilistic_forecast(self, forecast_id):
"""
Get ProbabilisticForecast metadata from the API for the given
forecast_id.
Parameters
----------
forecast_id : string
UUID of the forecast to get metadata for
Returns
-------
datamodel.ProbabilisticForecast
"""
# add /metadata after
# https://github.com/SolarArbiter/solarforecastarbiter-api/issues/158
req = self.get(f'/forecasts/cdf/{forecast_id}')
fx_dict = req.json()
return self._process_prob_forecast(fx_dict)
def get_probabilistic_forecast_constant_value(self, forecast_id,
site=None, aggregate=None):
"""
Get ProbabilisticForecastConstantValue metadata from the API for
the given forecast_id.
Parameters
----------
forecast_id : string
UUID of the forecast to get metadata for
site : datamodel.Site or None
If provided, the object will be attached to the returned
value (faster). If None, object will be created from site
metadata obtained from the database (slower).
aggregate : datamodel.Aggregate or None
If provided and the forecast is of an aggregate, the object
will be attached to the return value.
Returns
-------
datamodel.ProbabilisticForecastConstantValue
Raises
------
ValueError
If provided site.site_id does not match database record of
forecast object's linked site_id.
"""
# add /metadata after
# https://github.com/SolarArbiter/solarforecastarbiter-api/issues/158
req = self.get(f'/forecasts/cdf/single/{forecast_id}')
fx_dict = req.json()
agg_id = fx_dict['aggregate_id']
site_id = fx_dict['site_id']
if site_id is not None:
if site is None:
site = self.get_site(site_id)
elif site.site_id != site_id:
raise ValueError('Supplied site.site_id does not match site_id'
f'from database. site.site_id: {site.site_id}'
f' database site_id: {site_id}')
fx_dict['site'] = site
elif agg_id is not None:
if aggregate is None:
aggregate = self.get_aggregate(agg_id)
elif aggregate.aggregate_id != agg_id:
raise ValueError(
'Supplied aggregate.aggregate_id does not match '
'aggregate from database. aggregate.aggregate_id: '
f'{aggregate.aggregate_id}'
f' database aggregate_id: {agg_id}')
fx_dict['aggregate'] = aggregate
return datamodel.ProbabilisticForecastConstantValue.from_dict(fx_dict)
def create_probabilistic_forecast(self, forecast):
"""
Create a new forecast in the API with the given
ProbabilisticForecast model
Parameters
----------
forecast : datamodel.ProbabilisticForecast
Probabilistic forecast to create in the API
Returns
-------
datamodel.ProbabilisticForecast
With the appropriate parameters such as forecast_id set by the API
"""
fx_dict = forecast.to_dict()
fx_dict.pop('forecast_id')
fx_dict.pop('provider')
site = fx_dict.pop('site')
agg = fx_dict.pop('aggregate')
if site is None and agg is not None:
fx_dict['aggregate_id'] = agg['aggregate_id']
else:
fx_dict['site_id'] = site['site_id']
# fx_dict['constant_values'] is tuple of dict representations of
# all ProbabilisticForecastConstantValue objects in the
# ProbabilisticForecast. We need to extract just the numeric
# values from these dicts and put them into a list for the API.
constant_values_fxs = fx_dict.pop('constant_values')
constant_values = [fx['constant_value'] for fx in constant_values_fxs]
fx_dict['constant_values'] = constant_values
fx_json = json.dumps(fx_dict)
req = self.post('/forecasts/cdf/', data=fx_json,
headers={'Content-Type': 'application/json'})
new_id = req.text
return self.get_probabilistic_forecast(new_id)
def get_observation_time_range(self, observation_id):
"""
Get the minimum and maximum timestamps for observation values.
Parameters
----------
observation_id : string
UUID of the observation object.
Returns
-------
tuple of (pandas.Timestamp, pandas.Timestamp)
The minimum and maximum timestamps for values of the observation.
Values without an explicit timezone from the API are assumed to be
UTC.
"""
req = self.get(f'/observations/{observation_id}/values/timerange')
data = req.json()
mint = pd.Timestamp(data['min_timestamp'])
if mint.tzinfo is None and pd.notna(mint):
mint = mint.tz_localize('UTC')
maxt = pd.Timestamp(data['max_timestamp'])
if maxt.tzinfo is None and pd.notna(maxt):
maxt = maxt.tz_localize('UTC')
return mint, maxt
def _process_gaps(self, url, start, end):
req = self.get(url,
params={'start': start,
'end': end})
gaps = req.json()
out = []
for g in gaps['gaps']:
tstamp = pd.Timestamp(g['timestamp'])
nextstamp = pd.Timestamp(g['next_timestamp'])
# results should never be null, but skip anyway
if pd.isna(tstamp) or pd.isna(nextstamp):
continue # pragma: no cover
if tstamp.tzinfo is None:
tstamp = tstamp.tz_localize('UTC')
if nextstamp.tzinfo is None:
nextstamp = nextstamp.tz_localize('UTC')
out.append((tstamp, nextstamp))
return out
def _fixup_gaps(self, timerange, gaps, start, end):
out = []
if pd.isna(timerange[0]) or pd.isna(timerange[1]):
return [(start, end)]
else:
if timerange[0] > start:
if end < timerange[0]:
return [(start, end)]
else:
out.append((start, timerange[0]))
if timerange[1] < end:
if start > timerange[1]:
return [(start, end)]
else:
out.append((timerange[1], end))
if len(gaps) != 0:
if gaps[0][0] < start:
gaps[0] = (start, gaps[0][1])
if gaps[-1][1] > end:
gaps[-1] = (gaps[-1][0], end)
out.extend(gaps)
return list(merge_ranges(out))
@ensure_timestamps('start', 'end')
def get_observation_value_gaps(self, observation_id, start, end):
"""Get any gaps in observation data from start to end.
In addition to querying the /observations/{observation_id}/values/gaps
endpoint, this function also queries the observation timerange to
return all gaps from start to end.
Parameters
----------
observation_id : string
UUID of the observation object.
start : timelike object
Start time in interval to retrieve values for
end : timelike object
End time of the interval
Returns
-------
list of (pd.Timestamp, pd.Timestamp)
Of (start, end) gaps in the observations from the last timestamp
of a valid observation to the next valid observation timestamp.
Interval label is not accounted for.
"""
gaps = self._process_gaps(
f'/observations/{observation_id}/values/gaps', start, end)
trange = self.get_observation_time_range(observation_id)
return self._fixup_gaps(trange, gaps, start, end)
@ensure_timestamps('start', 'end')
def get_observation_values_not_flagged(
self, observation_id, start, end, flag, timezone='UTC'):
"""
Get the dates where the observation series is NOT flagged with
the given flag/bitmask.
Parameters
----------
observation_id : string
UUID of the observation object.
start : timelike object
Start time in interval to retrieve values for
end : timelike object
End time of the interval
flag : int
Days that are not flagged with this flag are returned. This can
be a compound flag/bitmask of the flags found in
:py:mod:`solarforecastarbiter.validation.quality_mapping`,
in which case days that do not have all flags present
are returned.
timezone : str, default "UTC"
The timezone to localize the data before computing the date
Returns
-------
dates : numpy.array of type datetime64[D]
"""
req = self.get(f'/observations/{observation_id}/values/unflagged',
params={'start': start,
'end': end,
'timezone': timezone,
'flag': flag})
data = req.json()
dates = data['dates']
return np.array([dt.date.fromisoformat(d) for d in dates],
dtype='datetime64[D]')
@ensure_timestamps('start', 'end')
def get_observation_values(
self, observation_id, start, end, interval_label=None,
request_limit=GET_VALUES_LIMIT):
"""
Get observation values from start to end for observation_id from the
API
Parameters
----------
observation_id : string
UUID of the observation object.
start : timelike object
Start time in interval to retrieve values for
end : timelike object
End time of the interval
interval_label : str or None
If beginning, ending, adjust the data to return only data that is
valid between start and end. If None or instant, return any data
between start and end inclusive of the endpoints.
request_limit : string
Timedelta string describing maximum request length. Defaults to 365
days.
Returns
-------
pandas.DataFrame
With a datetime index and (value, quality_flag) columns
Raises
------
ValueError
If start or end cannot be converted into a Pandas Timestamp
"""
out = self.chunk_value_requests(
f'/observations/{observation_id}/values',
start,
end,
parse_fn=json_payload_to_observation_df,
request_limit=request_limit,
)
return adjust_timeseries_for_interval_label(
out, interval_label, start, end)
def get_forecast_time_range(self, forecast_id):
"""
Get the miniumum and maximum timestamps for forecast values.
Parameters
----------
forecast_id : string
UUID of the forecast object.
Returns
-------
tuple of (pandas.Timestamp, pandas.Timestamp)
The minimum and maximum timestamps for values of the forecast.
Values without an explicit timezone from the API are assumed to be
UTC.
"""
req = self.get(f'/forecasts/single/{forecast_id}/values/timerange')
data = req.json()
mint = | pd.Timestamp(data['min_timestamp']) | pandas.Timestamp |
"""
The functions in this file are used to extract data and download images of the human brain ISH data set from
the Allen Brain Atlas.
Link: http://human.brain-map.org/ish/search
"""
import urllib.request
import xml.etree.ElementTree as et
import os
import glob
import time
import sys
import pandas as pd
from human_ISH_config import *
if (not os.path.exists(os.path.join(DATA_DIR , STUDY))):
os.mkdir(os.path.join(DATA_DIR , STUDY))
XML_DIR = os.path.join(DATA_DIR , STUDY , "xml_files")
if (os.path.exists(XML_DIR)):
print ("xml_files folder already exists.")
else:
os.mkdir(XML_DIR)
IMAGES_DIR = os.path.join(DATA_DIR, STUDY, "images")
if (os.path.exists(IMAGES_DIR)):
print ("images folder already exists.")
else:
os.mkdir(IMAGES_DIR)
def progressbar(it, prefix="", size=60, file=sys.stdout):
count = len(it)
def show(j):
x = int(size*j/count)
file.write("%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count))
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
def get_study_xml_file():
"""
:return: the path to the xml file of the chosen study if the xml files exists. If not, it will return None.
"""
study_xml_file = os.path.join(DATA_DIR, STUDY, STUDY.lower()+".xml")
if (not os.path.exists(study_xml_file)):
return None
return study_xml_file
def get_specimen_id_list(study_xml_file):
"""
The study xml files contains the list of specimen IDs corresponding to that study.
This functions returns a list of the specimen IDs.
:param study_xml_file: the xml file corresponding to the study
:return: list of strings. Each string is a specimen ID.
"""
print ("getting the list of specimen IDs within this study ...")
list_of_specimen_ids = []
tree = et.parse(study_xml_file)
root = tree.getroot()
specimens = root.find('specimens')
all_specimen = specimens.findall('specimen')
for item in all_specimen:
list_of_specimen_ids.append(item.find('id').text)
return list_of_specimen_ids
def construct_xml_file_per_specimen_id(specimen_id):
"""
This function constructs the Allen Brain website's url of the xml pages of the given specimen ID.
It stores the xml data into a file and returns the path to that file.
The xml file contains info of the experiments that have been performed on this specimen.
:param specimen_id: the specimen ID for which we want to make a xml file
:return: the path to the xml file of this specimen ID
"""
print ("processing specimen " + specimen_id + " ...")
url_to_xml = "http://human.brain-map.org/api/v2/data/query.xml?criteria=model::Specimen%5Bid$in" + specimen_id + "%5D," \
"rma::include,donor(age),structure,data_sets(genes),rma::options%5Bnum_rows$eq%27all%27%5D"
if (os.path.exists(os.path.join(XML_DIR , specimen_id ))):
print ("(the folder for specimen "+ specimen_id +" already exists)")
else:
os.mkdir(os.path.join(XML_DIR , specimen_id ))
specimen_id_xml_file = os.path.join(XML_DIR , specimen_id , specimen_id + ".xml")
response = urllib.request.urlopen(url_to_xml)
"""
with open(specimen_id_xml_file, 'w') as f:
f.write(response.read().decode('utf-8'))
"""
return response #specimen_id_xml_file
def get_experiment_id_list(specimen_xml_file, specimen_id):
"""
For the given specimen xml file, this function goes through the file and retrieves the list of experiments on this specimen ID.
Each experiment corresponds to a certain gene. In each experiment more than one slice may have been evaluated.
:param specimen_xml_file: the path to the specimen xml file.
:return: a list of strings. Each string is an experiment ID.
"""
print("getting the list of experiment IDs within " + specimen_id +" ...")
list_of_experiment_ids = []
tree = et.parse(specimen_xml_file)
root = tree.getroot()
specimens = root.find('specimens')
specimen = specimens.find('specimen')
datasets = specimen.find('data-sets')
all_datasets = datasets.findall('data-set')
for item in all_datasets:
list_of_experiment_ids.append(item.find('id').text)
return list_of_experiment_ids
def construct_xml_file_per_experiment_id(experiment_id, specimen_id):
"""
This function constructs the Allen Brain website's url of the xml pages of the given experiment ID.
It stores the xml data into a file and returns the path to that file.
This experiment corresponds to a certain gene. More than one slice may have been evaluated in this experiment.
:param experiment_id: the experiment ID for which we want to make a xml file
:param specimen_id: the specimen ID of the specimen that this experiment belongs to
:return: the path to the xml file of this experiment ID
"""
url_to_xml = "http://human.brain-map.org/api/v2/data/query.xml?criteria=model::SectionDataSet%5Bid$in" + experiment_id \
+ "%5D,rma::include,genes,plane_of_section,section_images(associates,alternate_images),treatments," \
"specimen(donor(age,organism,conditions),structure),probes(orientation,predicted_sequence," \
"forward_primer_sequence,reverse_primer_sequence,products),rma::options%5Bnum_rows$eq%27all%27%5D"
experiment_id_xml_file = os.path.join(XML_DIR, specimen_id , experiment_id + ".xml")
response = urllib.request.urlopen(url_to_xml)
"""
with open(experiment_id_xml_file, 'w') as f:
f.write(response.read().decode('utf-8'))
"""
return response #experiment_id_xml_file
def get_image_id_list(experiment_xml_file, experiment_id):
"""
Each experiment corresponds to a certain gene. In each experiment more than one slice may have been evaluated.
This function returns a list of image IDs that correspond to a cerain experiment.
:param experiment_xml_file: the xml file of the experiment
:return: list of strings. Each string is an image ID.
"""
#print("getting the list of image IDs within experiment " + experiment_id + " ...")
list_of_image_ids = []
tree = et.parse(experiment_xml_file)
root = tree.getroot()
section_data_sets = root.find('section-data-sets')
section_data_set = section_data_sets.find('section-data-set')
section_images = section_data_set.find('section-images')
all_section_images = section_images.findall('section-image')
for item in all_section_images:
list_of_image_ids.append(item.find('id').text)
return list_of_image_ids
def redownload_small_images(threshold = 6, downsample_rate=2.5):
"""
this function checks the size of the downloaded images and re-downloads images that are smaller than some threshold.
This is to make sure there are no corrupted images in the dataset.
Corrupted images could be a result of connection error while downloading the images.
:param downsample_rate: default is 2.5
:param threshold: a threshold to define small images
:return: None
"""
images_list = os.listdir(IMAGES_DIR)
print ("there are " + str(len(images_list)) + " existing images")
threshold = threshold * 1000000
for image_item in images_list:
image_path = os.path.join(IMAGES_DIR, image_item)
if os.path.getsize(image_path) < threshold:
#print (image_item + " is less than 10 MB. Redownloading...")
print ("Redownloading...")
image_id = image_item.split(".")[0]
default_url = "http://api.brain-map.org/api/v2/image_download/" + image_id + "?downsample=" + str(downsample_rate)
urllib.request.urlretrieve(default_url, os.path.join(IMAGES_DIR, image_id + ".jpg"))
def download_images(image_list_to_download, downsample_rate=2.5, skip=True):
"""
Gets a list if image IDs to download. if skip==True, skips downloading the image IDs that already exist in the directory.
:param image_list_to_download: list of image IDs to download
:param downsample_rate: downsampling rate to determine the final size of downloaded images. Default is 2.5
:return: None
"""
total_num_of_images = len(image_list_to_download)
print(str(total_num_of_images) + " images to download.")
existing_images_list = [f for f in glob.glob(os.path.join(IMAGES_DIR, "*.jpg"))]
num_of_existing_images = len(existing_images_list)
for i in range(num_of_existing_images):
existing_images_list[i] = existing_images_list[i].split("/")[-1].split(".")[0]
print(str(num_of_existing_images) + " images already exist.")
if skip:
remaining_images_list = list(set(image_list_to_download) - set(existing_images_list))
else:
remaining_images_list = image_list_to_download
num_of_remaining_images = len(remaining_images_list)
print("downloading " + str(num_of_remaining_images) + " images...")
# draw progess bar
for i in progressbar(range(num_of_remaining_images), "Downloading: ", 100):
time.sleep(0.1)
image_id = remaining_images_list[i]
default_url = "http://api.brain-map.org/api/v2/image_download/" + image_id + "?downsample=" + str(
downsample_rate)
urllib.request.urlretrieve(default_url, os.path.join(IMAGES_DIR, image_id + ".jpg"))
def add_experiment_images_to_image_info_csv(image_info_df, experiment_xml_file):
"""
Goes through the xml file of the experiment and adds the info of its images to the image info dataframe.
If the gene name is missing in the experiment, then this experiment is considered invalid.
:param image_info_df: the image info dataframe to append the new images
:param experiment_xml_file: the xml file of the experiment that we want to add its images
:return: the image info dataframe and also a boolean which determines whether this experiment is invalid.
"""
invalid = False
tree = et.parse(experiment_xml_file)
root = tree.getroot()
section_data_sets = root.find('section-data-sets')
section_data_set = section_data_sets.find('section-data-set')
experiment_id = section_data_set.find('id').text
specimen_id = section_data_set.find('specimen-id').text
section_images = section_data_set.find('section-images')
genes = section_data_set.find('genes')
specimen = section_data_set.find('specimen')
donor = specimen.find('donor')
structure = specimen.find('structure')
donor_id = donor.find('name').text
donor_sex = donor.find('sex').text
donor_age = donor.find('age-id').text
pmi = donor.find('pmi').text
donor_race = donor.find('race-only').text
smoker = donor.find('smoker').text
chemotherapy = donor.find('chemotherapy').text
radiation_therapy = donor.find('radiation-therapy').text
tumor_status = donor.find('tumor-status').text
conditions = donor.find('conditions')
condition = conditions.find('condition')
description = condition.find('description').text
region_name = structure.find('name').text
region_acronym = structure.find('acronym').text
tissue_ph = specimen.find('tissue-ph').text
gene = genes.find('gene')
if gene == None:
print ("experiment " + experiment_id + " is invalid")
invalid = True
else:
gene_symbol = gene.find('acronym').text
gene_alias_tags = gene.find('alias-tags').text
entrez_id = gene.find('entrez-id').text
gene_original_name = gene.find('original-name').text
gene_original_symbol = gene.find('original-symbol').text
all_section_images = section_images.findall('section-image')
image_id_list = []
for item in all_section_images:
image_id_list.append(item.find('id').text)
for image_id in image_id_list:
new_row = pd.Series({'image_id': image_id, 'gene_symbol': gene_symbol, 'entrez_id': entrez_id,
'alias_tags': gene_alias_tags, 'original_name': gene_original_name,
'original_symbol': gene_original_symbol, 'experiment_id':experiment_id,'specimen_id': specimen_id,
'description': description, 'donor_id': donor_id, 'donor_sex': donor_sex,
'donor_age':donor_age, 'donor_race':donor_race,
'smoker' : smoker, 'chemotherapy': chemotherapy, 'radiation_therapy': radiation_therapy,
'tumor_status' : tumor_status,
'region':region_name, 'region_acronym': region_acronym,
'tissue_ph': tissue_ph, 'pmi': pmi })
image_info_df = image_info_df.append(new_row, ignore_index=True)
return image_info_df, invalid
def run():
print("STUDY: ", STUDY)
image_list_to_download = []
study_xml_file = get_study_xml_file()
if study_xml_file == None:
print("--- The study xml file does not exist. Make sure you download it from the Allen Brain website.")
else:
columns = ["image_id", "gene_symbol", "entrez_id", "experiment_id", "specimen_id"]
image_info_df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import pickle
import os
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import joblib
import datetime
import shap
import math
import xgboost as xgb
import matplotlib.pyplot as plt
from azureml.core.run import Run
from azureml.core import Dataset, Datastore, Model
from azureml.data.datapath import DataPath
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn import datasets, linear_model
from scipy import stats
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import levy_train_sql_functions as levy_train_functions
import generic_sql_functions as generic_train_functions
# Set up config of workspace and datastore
aml_workspace = Run.get_context().experiment.workspace
datastore = Datastore.get(aml_workspace, datastore_name='datamgmtdb')
run = Run.get_context()
run.log('levy_model_train','levy_model_train')
#prevent SettingWithCopyWarning message from appearing
pd.options.mode.chained_assignment = None
try:
# Create model build data into dataframe
# Create df with all accounts and early adopter flag
levy_model_accounts=levy_train_functions.levy_train_01_accounts(2)
run.log('Success 01','Accounts Success')
#except Exception:
run.log('EXCEPTION 01','Accounts Exception')
#try:
# select account_ids into list
account_list = levy_model_accounts['A3'].tolist()
#Remove brackets from list
sql_account_list=str(account_list)[1:-1]
#print(sql_account_list)
# Select all accounts data for three time periods in model build
levy_model_accounts_2020 = levy_model_accounts[(levy_model_accounts.A2 <'2020-04-01')]
levy_model_accounts_2020['cohort']='2020'
# months since apprenticeship account sign-up
levy_model_accounts_2020["months_since_sign_up"] = (pd.Timestamp(2020,4,1) - pd.to_datetime(levy_model_accounts_2020["A2"]))/ np.timedelta64(1, "M")
levy_model_accounts_2019 = levy_model_accounts[(levy_model_accounts.A2 <'2019-04-01')]
levy_model_accounts_2019['cohort']='2019'
levy_model_accounts_2019["months_since_sign_up"] = (pd.Timestamp(2019,4,1) - pd.to_datetime(levy_model_accounts_2019["A2"]))/ np.timedelta64(1, "M")
levy_model_accounts_2022 = levy_model_accounts[(levy_model_accounts.A2 <'2022-01-01')]
levy_model_accounts_2022['cohort']='2022'
levy_model_accounts_2022["months_since_sign_up"] = (pd.Timestamp(2022,1,1) - pd.to_datetime(levy_model_accounts_2022["A2"]))/ np.timedelta64(1, "M")
# Add all sets of accounts data into one
levy_model_set=pd.concat([levy_model_accounts_2022,levy_model_accounts_2020,levy_model_accounts_2019])
# make the months since sign-up discrete for analysis purposes
levy_model_set["months_since_sign_up2"] =levy_model_set["months_since_sign_up"].apply(np.floor)
run.log('Success 01a','Accounts Success')
#except Exception:
run.log('EXCEPTION 01a','Accounts Exception')
#try:
# 2018/2019 cohort Part 1
levy_model_set_2018_2019_part1=levy_train_functions.levy_train_02_levy_model_set_2018_2019_part1(sql_account_list)
run.log('Success 02','Commitments 2018/19 Part 1 Success')
#except Exception:
run.log('EXCEPTION 02','Commitments 2018/19 Part 1 Exception')
#try:
# 2018/2019 cohort Part 2
levy_model_set_2018_2019_part2=levy_train_functions.levy_train_03_levy_model_set_2018_2019_part2(sql_account_list)
run.log('Success 03','Commitments 2018/19 Part 2 Success')
#except Exception:
run.log('EXCEPTION 03','Commitments 2018/19 Part 2 Exception')
#try:
# 2019/2020 cohort Part 1
levy_model_set_2019_2020_part1=levy_train_functions.levy_train_04_levy_model_set_2019_2020_part1(sql_account_list)
run.log('Success 04','Commitments 2019/20 Part 1 Success')
#except Exception:
run.log('EXCEPTION 04','Commitments 2019/20 Part 1 Exception')
#try:
# 2018/2019 cohort Part 2
levy_model_set_2019_2020_part2=levy_train_functions.levy_train_05_levy_model_set_2019_2020_part2(sql_account_list)
run.log('Success 05','Commitments 2019/20 Part 2 Success')
#except Exception:
run.log('EXCEPTION 05','Commitments 2019/20 Part 2 Exception')
#try:
# 2022 cohort Part 1
levy_model_set_2022_part1=levy_train_functions.levy_train_06_levy_model_set_2022_part1(sql_account_list)
run.log('Success 06','Commitments 2022 Part 1 Success')
#except Exception:
run.log('EXCEPTION 06','Commitments 2022 Part 1 Exception')
# 2022 cohort Part 2
levy_model_set_2022_part2=levy_train_functions.levy_train_07_levy_model_set_2022_part2(sql_account_list)
#run.log('Success 07','Commitments 2022 Part 2 Success')
#except Exception:
#run.log('EXCEPTION 07','Commitments 2022 Part 2 Exception')
#try:
# join both parts together for all time periods
universe_commitments_2019 = pd.merge(levy_model_set_2018_2019_part1, \
levy_model_set_2018_2019_part2, \
left_on=['A3'], \
right_on=['A3'], \
how='left')
universe_commitments_2020 = pd.merge(levy_model_set_2019_2020_part1, \
levy_model_set_2019_2020_part2, \
left_on=['A3'], \
right_on=['A3'], \
how='left')
universe_commitments_2022 = pd.merge(levy_model_set_2022_part1, \
levy_model_set_2022_part2, \
left_on=['A3'], \
right_on=['A3'], \
how='left')
# Add all sets of accounts data into one
universe_commitments_all= | pd.concat([universe_commitments_2022,universe_commitments_2020,universe_commitments_2019]) | pandas.concat |
import csv
import gymenvironment
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
CLEAN_ACTION = gymenvironment.Actions.CodeClean
SLOPPY_ACTION = gymenvironment.Actions.CodeSloppy
IN_PROGRESS_ACTION = 2
FIX_READY_REWARD = +10
IN_PROGRESS_REWARD = -0.1
PENDING_TIME_INDEX = 0
PENDING_ITEMS_INDEX = 1
def last_minute_patcher(agent, system_state):
action = CLEAN_ACTION
pending_time = system_state[PENDING_TIME_INDEX]
if pending_time <= agent.panic_threshold:
action = SLOPPY_ACTION
return action
def stressed_patcher(agent, system_state):
action = CLEAN_ACTION
pending_items = system_state[PENDING_ITEMS_INDEX]
if pending_items > agent.panic_threshold:
action = SLOPPY_ACTION
return action
class BaseDeveloper(object):
def __init__(self, logger, name, panic_threshold, action_selector):
self.name = name
self.logger = logger
self.panic_threshold = panic_threshold
self.metric_catalogue = []
self.actions = [CLEAN_ACTION, SLOPPY_ACTION]
self.action_selector = action_selector
def record_metric(self, metric_value):
self.metric_catalogue.append(metric_value)
def clear_metrics(self):
self.metric_catalogue = []
def select_action(self, system_state, global_counter=None, session=None):
return self.action_selector(self, system_state)
def new_episode(self):
pass
class PerformanceMetrics:
def __init__(self, developer):
self.sloppy_counter = developer.sloppy_counter
self.action_counter = developer.action_counter
self.attempted_deliveries = developer.attempted_deliveries
self.issues_delivered = developer.issues_delivered
def get_sloppy_ratio(self):
return float(self.sloppy_counter) / self.action_counter if self.action_counter > 0 else 0.0
class DevelopmentIssue(object):
def __init__(self, avg_resolution_time, prob_rework, code_impact):
self.avg_resolution_time = avg_resolution_time
self.prob_rework = prob_rework
self.code_impact = code_impact
class CodingApproach(object):
def __init__(self, resolution_factor, rework_factor, code_impact):
self.resolution_factor = resolution_factor
self.rework_factor = rework_factor
self.code_impact = code_impact
def get_development_issue(self, simulation_environment):
avg_resolution_time = min(1.0, simulation_environment.avg_resolution_time * self.resolution_factor)
prob_rework = min(1.0, simulation_environment.prob_rework * self.rework_factor)
return DevelopmentIssue(
avg_resolution_time=avg_resolution_time,
prob_rework=prob_rework,
code_impact=self.code_impact)
class Developer(object):
def __init__(self, agent, approach_map):
self.current_issue = None
self.approach_map = approach_map
self.agent = agent
self.name = agent.name
self.issues_delivered = None
self.sloppy_counter = None
self.action_counter = None
self.attempted_deliveries = None
self.agent.clear_metrics()
self.reset()
def get_reward(self):
return self.issues_delivered
def reset(self):
self.current_issue = None
self.issues_delivered = 0
self.sloppy_counter = 0
self.action_counter = 0
self.attempted_deliveries = 0
self.agent.new_episode()
def start_coding(self, simulation_environment, global_counter, session):
system_state = simulation_environment.get_system_state()
action = self.agent.select_action(system_state=system_state,
global_counter=global_counter,
session=session)
self.carry_out_action(action, simulation_environment)
self.action_counter += 1
return action
def carry_out_action(self, action, simulation_environment):
coding_approach = self.approach_map[action]
if coding_approach is not None:
# TODO Hacky but temporary solution
if action == SLOPPY_ACTION:
self.sloppy_counter += 1
self.current_issue = coding_approach.get_development_issue(simulation_environment)
else:
raise Exception("The action " + str(action) + " is not supported.")
def log_progress(self, training_step=None, global_counter=None):
performance_metrics = PerformanceMetrics(developer=self)
current_epsilon = None
if hasattr(self.agent, 'get_current_epsilon'):
current_epsilon = self.agent.get_current_epsilon(global_counter)
csv_filename = "training_log_" + self.name + ".csv"
if not os.path.isfile(csv_filename):
with open(csv_filename, 'w', newline="") as file:
csv_writer = csv.writer(file)
csv_writer.writerow(
['training_step', 'sloppy_counter', 'action_counter', 'attempted_deliveries', 'issues_delivered',
'sloppy_ratio',
'current_epsilon'])
with open(csv_filename, 'a', newline="") as f:
csv_writer = csv.writer(f)
csv_writer.writerow([training_step, self.sloppy_counter, self.action_counter, self.attempted_deliveries,
self.issues_delivered, performance_metrics.get_sloppy_ratio(), current_epsilon])
self.agent.record_metric(performance_metrics)
class SimulationEnvironment(object):
def __init__(self, time_units, avg_resolution_time, prob_new_issue, prob_rework, logger):
self.time_units = time_units
self.logger = logger
self.init_avg_resolution_time = avg_resolution_time
self.init_prob_new_issue = prob_new_issue
self.init_prob_rework = prob_rework
self.avg_resolution_time = None
self.prob_new_issue = None
self.prob_rework = None
self.to_do_issues = None
self.doing_issues = None
self.done_issues = None
self.current_time = None
self.reset()
def reset(self, agent_wrappers=None):
self.avg_resolution_time = self.init_avg_resolution_time
self.prob_new_issue = self.init_prob_new_issue
self.prob_rework = self.init_prob_rework
self.to_do_issues = 0
self.doing_issues = 0
self.done_issues = 0
self.current_time = 0
if agent_wrappers is not None:
for wrapper in agent_wrappers:
wrapper.reset()
def add_to_backlog(self):
self.to_do_issues += 1
def move_to_in_progress(self, developer, global_counter, session):
action_performed = developer.start_coding(self, global_counter, session)
self.to_do_issues -= 1
self.doing_issues += 1
return action_performed
def move_to_done(self, developer):
self.doing_issues -= 1
self.done_issues += 1
developer.issues_delivered += 1
developer.current_issue = None
def code_submitted(self, developer):
self.avg_resolution_time = min(1.0, self.avg_resolution_time * developer.current_issue.code_impact)
developer.attempted_deliveries += 1
def get_system_state(self):
return self.time_units - self.current_time, self.to_do_issues, self.doing_issues, self.done_issues
def step(self, developers, session, global_counter=None):
self.current_time += 1
actions_performed = {}
rewards = {}
for developer in developers:
actions_performed[developer.name] = IN_PROGRESS_ACTION
rewards[developer.name] = IN_PROGRESS_REWARD
if developer.current_issue is None:
action_performed = self.move_to_in_progress(developer, global_counter, session)
actions_performed[developer.name] = action_performed
else:
random_output = np.random.random()
if random_output < developer.current_issue.avg_resolution_time:
# Deliver issue, but verify rework first
self.code_submitted(developer)
random_output = np.random.random()
if random_output >= developer.current_issue.prob_rework:
# No rework needed
self.move_to_done(developer)
rewards[developer.name] = FIX_READY_REWARD
if np.random.random() < self.prob_new_issue:
self.add_to_backlog()
episode_finished = self.current_time == self.time_units
return actions_performed, self.get_system_state(), episode_finished, rewards
def run_simulation():
pending_issues = []
simulation_env = SimulationEnvironment(time_units=60, avg_resolution_time=1 / 5.0,
prob_new_issue=0.1, prob_rework=0.05)
developer = Developer(agent=StubbornAgent(only_action="SLOPPY"))
for time_step in range(simulation_env.time_units):
simulation_env.step(developer, time_step=time_step, global_counter=time_step)
pending_issues.append(simulation_env.pending_issues)
return | pd.Series(pending_issues) | pandas.Series |
import os
import pandas as pd
from tqdm import tqdm
import get_fitbit
sleep_logs = []
data_type = 'sleep'
datetimes = pd.date_range('2016-11-09', '2017-08-10')
sleep_list = []
for datetime in tqdm(datetimes):
fitbit_data = get_fitbit.FitbitData(datetime, data_type)
sleep_log = fitbit_data.load_from_disk()
for sleep_event in sleep_log['sleep']:
trim_dict = {}
key_list = sleep_event.keys()
for this_key in key_list:
if this_key != 'minuteData':
trim_dict[this_key] = sleep_event[this_key]
sleep_list.append(trim_dict)
df_sleep = pd.DataFrame(sleep_list)
start_sleep = | pd.to_datetime(df_sleep['startTime']) | pandas.to_datetime |
import os
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.colors as mcolors
import numpy as np
import folium
import difflib
import geopandas as gpd
import unicodedata
#function to remove accents from states and municipios
def remove_accents(input_str):
nfkd_form = unicodedata.normalize('NFKD', input_str)
return u"".join([c for c in nfkd_form if not unicodedata.combining(c)])
#PROCESS SUI INPUT DATA, SAVE AS CSVs
def load_sui_data():
folder = 'sui_database_in'
files = os.listdir(folder)
u_codes = | pd.read_excel('Listado de Agentes.xls') | pandas.read_excel |
import time
import json
import numpy as np
import pandas as pd
data = np.random.rand(5000, 50)
df = | pd.DataFrame(data) | pandas.DataFrame |
"""
Base Model
Base structure for creation of new models
Methods:
calc_error: Estimates error according to SciKit's regression metrics
filter_ts: Returns model's residuals
"""
import sys
sys.path.append('../')
from skfore.skfore import series_viewer
from skfore.datasets import *
import pandas
import numpy
import scipy
import sklearn
import matplotlib
import random
import math
from skfore.extras import add_next_date
from sklearn import preprocessing
class BaseModel():
def __init__(self):
self.residuals = None
self.scaler = None
self.test()
def test(self):
""" Raises error if there are not any of the necessary methods defined """
if (not "fit" in dir(self)):
raise ValueError('Method "fit" has not been defined')
if (not "forecast" in dir(self)):
raise ValueError('Method "forecast" has not been defined')
def calc_error(self, ts, error_function = None, ignore_first = None):
""" Estimates error according to SciKit's regression metrics
Args:
ts: Time series to estimate the model
error_function (None or error function): Error function whose
parameters are real time series and estimated time series. If
None, error_function is Sci-Kit learn's mean squared error
"""
if ignore_first != None:
ignore = ignore_first
else:
try:
ignore = self.q
except:
try:
ignore = self.p
except:
ignore = 0
y_estimated = self.simulate(ts)[ignore:]
y_real = ts[ignore:]
if (error_function == None):
error = sklearn.metrics.mean_squared_error(y_real, y_estimated)
else:
error = error_function(y_real, y_estimated)
return error
def filter_ts(self, ts, ignore_first = None):
""" Returns model's residuals
Args:
ts: Time series to estimate residuals
"""
if ignore_first != None:
ignore = ignore_first
else:
try:
ignore = self.q
except:
try:
ignore = self.p
except:
ignore = 0
prediction = self.simulate(ts)[ignore:]
residuals = ts[ignore:].subtract(prediction)
return residuals
def set_residuals(self, residuals):
self.residuals = series_viewer(residuals)
""" Residuals analysis """
def time_plot(self):
self.residuals.time_plot()
def ACF_plot(self):
self.residuals.ACF_plot()
def PACF_plot(self):
self.residuals.PACF_plot()
def qq_plot(self):
self.residuals.qq_plot()
def density_plot(self):
self.residuals.density_plot()
def histogram(self):
self.residuals.histogram()
def normality(self):
self.residuals.normality()
def update(self, kwargs):
for key in kwargs.keys():
setattr(self, key, kwargs[key])
return self
def predict(self, ts, periods, tsp = None, blind = True, confidence_interval = None, iterations = 300, error_sample = 'bootstrap', ignore_first = None, random_state = 100):
""" Predicts future values in a given period
Args:
ts (pandas.Series): Time series to predict
periods (int): Number of periods ahead to predict
tsp (pandas.Series): Predicted time series to compare future values
blind (boolean): True to forecast without using predicted time
series or False to use it in forecasting
confidence_interval (double): Confidence interval level
iterations (int): Number of iterations
error_sample (str): Use 'bootstrap' to forecast using sample errors
of filtered time series or 'normal' to forecast using errors
from a gaussian distribution with known variance
random_state (int): Determines random number generation for seed
Returns:
Dataframe of confidence intervals and time series of predicted
values: (ci_inf, ci_sup, series)
"""
random.seed(random_state)
if blind == False:
if tsp is None:
raise ValueError('Predicted time series not defined for no blind forecast')
else:
if error_sample == 'bootstrap':
if confidence_interval is None:
c_i = 0.95
else:
c_i = confidence_interval
for i in range(len(tsp)):
if i == 0:
tse = ts
simul_step = self.bootstrap(tse, 1, confidence_interval = c_i, iterations = iterations)
simul_result = simul_step.transpose()
y = ts
else:
tse = ts.append(tsp[0:i])
simul_step = self.bootstrap(tse, 1, confidence_interval = c_i, iterations = iterations)
simul_result = simul_result.append(simul_step.transpose())
value = self.forecast(y)
y = add_next_date(y, value)
prediction = y[-len(tsp):]
prediction.name = 'series'
ci = pandas.DataFrame([prediction], index = ['series'])
result = ci.append(simul_result.transpose())
elif error_sample == 'normal':
if confidence_interval is None:
for i in range(len(tsp)):
if i == 0:
tse = ts
simul_step = self.normal_error(1, tse, ignore_first)
simul_result = simul_step
y = ts
else:
tse = ts.append(tsp[0:i])
simul_step = self.normal_error(1, tse, ignore_first)
simul_result = simul_result.append(simul_step)
value = self.forecast(y)
y = add_next_date(y, value)
prediction = y[-len(tsp):]
prediction.name = 'series'
ci = pandas.DataFrame([prediction], index = ['series'])
result = ci.append(simul_result.transpose())
else:
for i in range(len(tsp)):
if i == 0:
tse = ts
simul_step = self.normal_error(1, tse, ignore_first)
simul_step_b = self.bootstrap(tse, 1, confidence_interval = confidence_interval, iterations = iterations)
simul_result = simul_step
simul_result_b = simul_step_b.transpose()
y = ts
else:
tse = ts.append(tsp[0:i])
simul_step = self.normal_error(1, tse, ignore_first)
simul_step_b = self.bootstrap(tse, 1, confidence_interval = confidence_interval, iterations = iterations)
simul_result = simul_result.append(simul_step)
simul_result_b = simul_result_b.append(simul_step_b.transpose())
value = self.forecast(y)
y = add_next_date(y, value)
prediction = y[-len(tsp):]
prediction.name = 'series'
ci = pandas.DataFrame([prediction], index = ['series'])
result = ci.append(simul_result.transpose())
result = result.append(simul_result_b.transpose())
else:
raise ValueError('Error sample has not been defined correctly')
else:
for i in range(periods):
if i == 0:
y = ts
value = self.forecast(y)
y = add_next_date(y, value)
if error_sample == 'bootstrap':
if confidence_interval is None:
c_i = 0.95
else:
c_i = confidence_interval
ci = self.bootstrap(ts, periods, c_i, iterations)
prediction = y[-periods:]
prediction.name = 'series'
result = ci.append(prediction)
elif error_sample == 'normal':
prediction = y[-periods:]
prediction.name = 'series'
ci = pandas.DataFrame([prediction], index = ['series'])
if confidence_interval is None:
simulation = self.normal_error(periods, ts, ignore_first)
result = ci.append(simulation.transpose())
else:
simulation = self.normal_error(periods, ts, ignore_first)
simulation_b = self.bootstrap(ts, periods, confidence_interval, iterations)
result = ci.append(simulation.transpose())
result = result.append(simulation_b)
else:
raise ValueError('Error sample has not been defined correctly')
result = result.transpose()
if error_sample == 'bootstrap':
result['forecast'] = result.bootstrap
elif error_sample == 'normal':
result['forecast'] = result.normal
result['real'] = tsp
return result
def bootstrap(self, ts, periods = 5,confidence_interval = 0.95, iterations = 500):
try:
ignore = self.q
except:
try:
ignore = self.p
except:
ignore = 0
values = self.filter_ts(ts, ignore).values
results = list()
for i in range(iterations):
for j in range(periods):
train = sklearn.utils.resample(values, n_samples = 1)
if j == 0:
y = ts
else:
y = add_next_date(y, next_value_bootstrap)
next_value = self.forecast(y)
next_value_bootstrap = next_value + train[0]
result_complete = add_next_date(y, next_value_bootstrap)
result = result_complete[-periods:]
results.append(result)
results = pandas.DataFrame(results)
ci_inf = results.quantile(1-confidence_interval)
ci_sup = results.quantile(confidence_interval)
mean = results.mean()
ci = | pandas.DataFrame([ci_inf, ci_sup, mean], index = ['ci_inf', 'ci_sup', 'bootstrap']) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
from datetime import date, datetime
from functools import lru_cache
import pandas as pd
import panel as pn
from jinja2 import Template
@lru_cache()
def load_data(data_filepath):
df = pd.read_hdf(data_filepath, "df")
return df
@lru_cache()
def get_data(
start_date, end_date, topic_name, data_filepath="data/dashboard_data.h5"
):
df_new = load_data(data_filepath)
# print(df_new[["date", "topic"]])
start_date_str = datetime.strptime(str(start_date), "%Y-%m-%d")
end_date_str = datetime.strptime(str(end_date), "%Y-%m-%d")
gt_mask = df_new["date"] >= start_date_str
lt_mask = df_new["date"] <= end_date_str
df_new_filtered = df_new.loc[(gt_mask) & (lt_mask)]
# print(df_new[["date", "topic"]])
df_topic_filtered = df_new[df_new["topic"].isin([topic_name])]
# print(df_topic_filtered.shape[0])
# print(df_topic_filtered)
return [df_new_filtered, df_topic_filtered]
def generate_tooltip(
data_list=[["term", "Term", 14], ["term_weight", "Term Weight", 14]],
imgs=[],
tool_title_fontsize=16,
tool_title="My Title",
jinja2_templates_dir="templates",
hover_template="hover.j2",
):
jinja2_templates_filepath = os.path.join(
jinja2_templates_dir, hover_template
)
with open(jinja2_templates_filepath) as f:
hover_tool_str = Template(f.read())
if imgs:
return hover_tool_str.render(
data_list=data_list,
tool_title_fontsize=tool_title_fontsize,
tool_title=tool_title,
)
else:
return hover_tool_str.render(
data_list=data_list,
tool_title_fontsize=tool_title_fontsize,
tool_title=tool_title,
imgs=imgs,
)
def create_dashboard_sidebar_text():
nlp_wiki_link = "https://en.wikipedia.org/wiki/Natural_language_processing"
guardian_wiki_link = "https://en.wikipedia.org/wiki/The_Guardian"
usl_url = (
"https://en.wikipedia.org/wiki/Machine_learning#Unsupervised_learning"
)
nmfurl = "https://en.wikipedia.org/wiki/Non-negative_matrix_factorization"
tm_link = "https://en.wikipedia.org/wiki/Topic_model"
dash_title = f"""
<h1><a href='{nlp_wiki_link}'>NLP</a>
<a href='{tm_link}'>Topic Predictor</a></h1>"""
text = f"""{dash_title}
This dashboard visualizes the learned topic of news articles from the
Science section of the
<a href='{guardian_wiki_link}'>Guardian Online publication</a></h1>
using the search term <b>Space</b>. An
<a href='{usl_url}'>unsupervised Machine Learning</a></h1>
<a href='{nmfurl}'>Machine Learning</a></h1>
model was trained on the article text of approximately 4,000 news
articles from the result of such a search on the
<a href='https://www.theguardian.com/science'>guardian.com</a></h1>
website covering the years 1957-late 2019. The trained ML model was
used to generate predictions on 48 news articles from November 2019 to
the end of February 2020 that were not seen during training. This
dashboard summarizes select characteristics of the trained model and
the learned topics for all of these unseen news articles.
<h3>Notes</h3>
<ul style='padding-left:1.2em;'>
<li>Of the 35 learned topics, news articles in topics 0 and 33
were not manually read.</li>
<li>These two topics were not assigned a name and so are
listed as <i>Topic 0</i> and <i>Topic 33</i> respectively.</li>
<li>If the vertical axis text is cut off on the plots, then click
the menu icon (three horizontal bars at the top left) twice.</li>
<li>This dashboard must be viewed in landscape mode.</li>
<li>Term Weights are taken from ML model training.</li>
<li>Topic-wise organization (frequency) and residual spreads come
from news article text not seen during ML model training.</li>
</ul>
"""
return text
def update_progress(
start_date, end_date, min_date, max_date, date_range_progress_bar
):
selected_days = (end_date - start_date).days
max_days = (max_date - min_date).days
date_range_progress_bar.value = int((selected_days / max_days) * 100)
def generate_sidebar_html():
return pn.pane.HTML(
create_dashboard_sidebar_text(),
width=450,
style={
"background-color": "#F6F6F6", # text background color
"border": "2px solid lightgrey",
"border-radius": "3px", # >0px produces curved corners
"padding": "5px", # text-to-border whitespace
},
)
def perform_updates(
start_date,
end_date,
daterange_progress_bar,
topic_selected,
data_filepath,
term_weights_bar_chart,
entity_counts_bar_chart,
topic_freq_bar_chart,
source,
source_weights,
source_ner,
source_month,
source_topics,
beginning_date,
ending_date,
topic_selector,
):
df_new_filtered, df_topic_filtered = get_data(
start_date, end_date, topic_selected, data_filepath
)
t_dates = | pd.to_datetime(df_topic_filtered["date"]) | pandas.to_datetime |
import argparse
import os
from itertools import combinations, product
from multiprocessing import Pool
import networkx as nx
import pandas as pd
from custom_utils import get_jaccard, get_structures_added, load_json
from statics import STRUCTURE_TYPES
def get_product_graph(g1, g2, graphicspath, resultspath):
o1 = f"{graphicspath}/structure_overlap_matrix-{g1}.csv"
o2 = f"{graphicspath}/structure_overlap_matrix-{g2}.csv"
j1 = f"{resultspath}/{g1}.json"
j2 = f"{resultspath}/{g2}.json"
json1 = load_json(j1)
json2 = load_json(j2)
s1 = {
idx: (s["n_nodes_total"], s["structure_type"], s)
for idx, s in enumerate(get_structures_added(json1))
}
s2 = {
idx: (s["n_nodes_total"], s["structure_type"], s)
for idx, s in enumerate(get_structures_added(json2))
}
df1 = pd.read_csv(o1, index_col=0).fillna(0)
df1.columns = df1.columns.astype(int)
df2 = pd.read_csv(o2, index_col=0).fillna(0)
df2.columns = df2.columns.astype(int)
G1 = nx.Graph(df1)
G2 = nx.Graph(df2)
nodes = [(x, y) for x, y in product(s1.keys(), s2.keys()) if s1[x][1] == s2[y][1]]
edges = [
(
tup[0],
tup[1],
G1.edges[tup[0][0], tup[1][0]]["weight"]
* G2.edges[tup[0][-1], tup[1][-1]]["weight"],
)
for tup in filter(
lambda tup: tup[0][0] != tup[1][0]
and tup[0][-1] != tup[1][-1]
and G1.edges.get([tup[0][0], tup[1][0]])
and G2.edges.get([tup[0][-1], tup[1][-1]]),
combinations(nodes, 2),
)
]
G12 = nx.Graph()
G12.add_nodes_from(nodes)
G12.add_weighted_edges_from(edges)
return s1, s2, G1, G2, G12
def get_overlap_alignment_edges(G12):
selected_edges = []
while True:
if nx.number_of_edges(G12) == 0:
break
new_edge = max(
G12.edges(data="weight"),
key=lambda tup: (
tup[-1],
-abs(tup[0][0] - tup[0][1]),
-abs(tup[1][0] - tup[1][1]),
),
)
selected_edges.append(new_edge)
nodes_to_remove = list(
filter(
lambda x: (x[0] == new_edge[0][0] and x[1] != new_edge[0][1])
or (x[1] == new_edge[0][1] and x[0] != new_edge[0][0])
or (x[0] == new_edge[1][0] and x[1] != new_edge[1][1])
or (x[1] == new_edge[1][1] and x[0] != new_edge[1][0]),
G12.nodes(),
)
)
G12.remove_edge(*new_edge[:-1])
G12.remove_nodes_from(nodes_to_remove)
print(
"Constructed G12 with",
"n =",
nx.number_of_nodes(G12),
"and m =",
nx.number_of_edges(G12),
end="\r",
)
return selected_edges
def get_common_model_graph(G12):
selected_edges = get_overlap_alignment_edges(G12)
common_graph = nx.Graph()
common_graph.add_weighted_edges_from(selected_edges)
return common_graph
def complete_with_greedy(common_graph, G12):
leftover_candidates = set(G12.nodes()) - set(common_graph.nodes())
while leftover_candidates:
x, y = min(leftover_candidates)
common_graph.add_node((x, y))
leftover_candidates -= {
(u, v) for (u, v) in leftover_candidates if u == x or v == y
}
return common_graph
def get_overlap(g1, g2, structures1, structures2, structure_type):
s1 = list(filter(lambda x: x["structure_type"] == structure_type, structures1))
s2 = list(filter(lambda x: x["structure_type"] == structure_type, structures2))
overlap = pd.DataFrame(
index=list(map(lambda x: x["position"], s1)),
columns=list(map(lambda x: x["position"], s2)),
)
for idx1, structure1 in enumerate(s1):
for idx2, structure2 in enumerate(s2):
overlap.iat[idx1, idx2] = get_jaccard(
structure1, structure2, structure_type
)
return overlap
def match_structures_by_overlap(overlap):
matched = set()
pairs = []
found_match = True
while found_match:
found_match = False
remaining = (
overlap.query("index not in @matched").T.query("index not in @matched").T
)
if min(remaining.shape) > 0:
max_u = remaining.max().idxmax()
other = (
overlap.query("index not in @matched")
.T.query("index not in @matched")
.T[[max_u]]
.T
)
if min(other.shape) > 0:
max_v = other.max().idxmax()
pairs.append((max_v, max_u, overlap.at[max_v, max_u]))
found_match = True
matched = matched | {max_u, max_v}
return pairs
def get_matches_with_node_alignment(g1, g2, structures1, structures2):
clique_overlap = get_overlap(g1, g2, structures1, structures2, "clique")
clique_pairs = match_structures_by_overlap(clique_overlap)
assert len(clique_pairs) == min(clique_overlap.shape), "Clique alignment problem"
star_overlap = get_overlap(g1, g2, structures1, structures2, "star")
star_pairs = match_structures_by_overlap(star_overlap)
assert len(star_pairs) == min(star_overlap.shape), "Star alignment problem"
biclique_overlap = get_overlap(g1, g2, structures1, structures2, "biclique")
biclique_pairs = match_structures_by_overlap(biclique_overlap)
assert len(biclique_pairs) == min(
biclique_overlap.shape
), "Biclique alignment problem"
starclique_overlap = get_overlap(g1, g2, structures1, structures2, "starclique")
starclique_pairs = match_structures_by_overlap(starclique_overlap)
assert len(starclique_pairs) == min(
starclique_overlap.shape
), "Starclique alignment problem"
matched = sorted(
clique_pairs + star_pairs + biclique_pairs + starclique_pairs,
key=lambda x: x[-1],
reverse=True,
)
unmatched1 = (
sorted(set(clique_overlap.index) - set(map(lambda x: x[0], matched)))
+ sorted(set(star_overlap.index) - set(map(lambda x: x[0], matched)))
+ sorted(set(biclique_overlap.index) - set(map(lambda x: x[0], matched)))
+ sorted(set(starclique_overlap.index) - set(map(lambda x: x[0], matched)))
)
unmatched2 = (
sorted(set(clique_overlap.columns) - set(map(lambda x: x[1], matched)))
+ sorted(set(star_overlap.columns) - set(map(lambda x: x[1], matched)))
+ sorted(set(biclique_overlap.columns) - set(map(lambda x: x[1], matched)))
+ sorted(set(starclique_overlap.columns) - set(map(lambda x: x[1], matched)))
)
return matched, unmatched1, unmatched2
def get_structures_for_type(structures, structure_type):
selected = list(filter(lambda x: x["structure_type"] == structure_type, structures))
return list(map(lambda x: x["position"], selected))
def get_matches_greedy(structures1, structures2):
matched = []
unmatched1 = []
unmatched2 = []
for structure_type in STRUCTURE_TYPES:
s1 = get_structures_for_type(structures1, structure_type)
s2 = get_structures_for_type(structures2, structure_type)
matched.extend(list(zip(s1, s2)))
if len(s1) > len(s2):
unmatched1.extend(s1[len(s2) :])
elif len(s1) < len(s2):
unmatched2.extend(s2[len(s1) :])
return matched, unmatched1, unmatched2
def get_graph_names(path):
return sorted(
[
x.split(".json")[0]
for x in [f for f in os.listdir(path) if f.endswith("json")]
]
)
def save_jaccard_alignments(g1, idx, columns, graph_names, mapped_path, alignmentpath):
print(
f"Starting alignments for graph {idx + 1}/{len(graph_names)} ({g1})...",
end="\r",
)
large_df = pd.DataFrame(columns=columns)
for g2 in graph_names[idx:]:
alignment = f"{g1}_-_{g2}"
json1 = load_json(f"{mapped_path}/{g1}.json")
json2 = load_json(f"{mapped_path}/{g2}.json")
structures1 = get_structures_added(json1)
structures2 = get_structures_added(json2)
matched, unmatched1, unmatched2 = get_matches_with_node_alignment(
g1, g2, structures1, structures2
)
df = pd.concat(
[
pd.DataFrame(
matched, columns=["g1_structure", "g2_structure", "jaccard"]
),
pd.DataFrame([unmatched1], index=["g1_structure"]).T,
| pd.DataFrame([unmatched2], index=["g2_structure"]) | pandas.DataFrame |
from __future__ import division
import os
# import cv2
import fnmatch
import sys
import glob
import numpy as np
import pandas as pd
import scipy.stats as sps
import matplotlib as mpl
import matplotlib.pyplot as plt
import astropy.units as u
import sunpy.map as smap
from PIL import Image, ImageDraw
from datetime import datetime, timedelta
import hi_processing as hip
import misc
import data2df
from data_cme_complexity import CMEComplexity
from plotting_stereo import STEREOPlot
from data_stereo_hi import STEREOHI
from data_helcats import HELCATS
data_loc = r'C:\\Users\\shann\\OneDrive\\Documents\\Research\\Workspace\\Data'
fig_loc = r"C:\\Users\\shann\\OneDrive\\Documents\\Research\\Workspace\\Plots"
hi_data = STEREOHI(data_loc)
def brightness_equalise_images(tag, img_type):
root = "C:\\Users\\shann\\OneDrive\\Documents\\Research\\Workspace\\Data\\STEREO_HI\\Images"
for craft in ['sta', 'stb']:
img_path = os.path.join(root, tag, img_type, craft)
be_path = os.path.join(root, tag, img_type + '_be', craft)
if not os.path.exists(be_path):
os.mkdir(be_path)
for filename in os.listdir(img_path):
if filename.endswith(".jpg"):
img = cv2.imread(os.path.join(img_path, filename),0)
be = cv2.equalizeHist(img)
cv2.imwrite(os.path.join(be_path, filename), be)
###############################################################################
# add a crude CME mask
def find_cme_mask_bounds(helcats_name):
"""finds coords of box of CME area.
"""
# get the upper and lower PAs, and elongation of the CME in the image
helcats = HELCATS(data_loc)
craft, time = helcats.get_cme_details(helcats_name)
pa_n = helcats.get_col_data('PA-N [deg]', [helcats_name])[0]
pa_s = helcats.get_col_data('PA-S [deg]', [helcats_name])[0]
start, mid, end, mid_el = helcats.get_te_track_times(helcats_name)
hi_data = STEREOHI(data_loc)
hi_map = hi_data.get_hi_map(craft, mid)
# convert these points into pixel coordinates
coord1 = hip.convert_hpr_to_pix(4 * u.deg, pa_n * u.deg, hi_map)
coord2 = hip.convert_hpr_to_pix(mid_el * u.deg, pa_n * u.deg, hi_map)
coord4 = hip.convert_hpr_to_pix(mid_el * u.deg, pa_s * u.deg, hi_map)
coord5 = hip.convert_hpr_to_pix(4 * u.deg, pa_s * u.deg, hi_map)
coord3_list = []
if pa_n < pa_s: # CME goes right to left (STA pre 2016)
# Loop over each position angle between PA_N and PA_S
pa = int(pa_n) + 1
while pa < pa_s:
coord = hip.convert_hpr_to_pix(mid_el * u.deg, pa * u.deg, hi_map)
coord3_list.append(coord)
pa = pa + 1
else: # CME goes left to right (STB & STA post 2016)
pa = pa_n - 1
while pa > pa_s:
coord = hip.convert_hpr_to_pix(mid_el * u.deg, pa * u.deg, hi_map)
coord3_list.append(coord)
pa = pa - 1
# append coords in order
polygon = [(coord1[0].value, coord1[1].value),
(coord2[0].value, coord2[1].value)]
for j in range(len(coord3_list)):
polygon.append((coord3_list[j][0].value, coord3_list[j][1].value))
polygon.append((coord4[0].value, coord4[1].value))
polygon.append((coord5[0].value, coord5[1].value))
return polygon
def add_cme_mask(helcats_name, img):
polygon = find_cme_mask_bounds(helcats_name)
# Crop image to required area
# Code adapted from https://stackoverflow.com/questions/22588074/polygon-crop-clip-using-python-pil
img_array = np.asarray(img)
if len(np.shape(img)) > 2:
img_array = img_array[:,:,0] # only needed if 3 dimensions
# create mask
mask_img = Image.new('L', (img_array.shape[1], img_array.shape[0]), 0)
ImageDraw.Draw(mask_img).polygon(polygon, outline=1, fill=1)
mask = np.array(mask_img)
# assemble new image (uint8: 0-255)
new_img_array = np.empty(img_array.shape, dtype='uint8')
new_img_array[:,:] = mask * img_array[:,:]
new_img = Image.fromarray(new_img_array)
return new_img
###############################################################################
# plots to demonstrate
def plot_img_with_mask(helcats_name, tag, img_type):
"""Makes a plot showing the CME image and CME masked image side-by-side.
"""
hi_data = STEREOHI(data_loc)
helcats = HELCATS(data_loc)
craft, time = helcats.get_cme_details(helcats_name)
img = hi_data.load_img(helcats_name, craft, tag, img_type)
masked_img = add_cme_mask(helcats_name, img)
# Plot to compare original and cropped images
f, (ax1, ax2) = plt.subplots(1, 2, figsize=[9, 6])
ax1.imshow(img)
ax2.imshow(masked_img)
ax1.set_title('Differenced Image')
ax2.set_title('CME Area')
ax1.set_xticks([])
ax1.set_yticks([])
ax2.set_xticks([])
ax2.set_yticks([])
f.savefig(os.path.join(fig_loc, 'CME Area ' + helcats_name + '.png'))
def plot_img_hist(helcats_name, tag, img_type):
"""Makes plot showing image on left and histogram of pixel values on the
right.
"""
hi_data = STEREOHI(data_loc)
helcats = HELCATS(data_loc)
craft, time = helcats.get_cme_details(helcats_name)
img = hi_data.load_img_from_file(tag, img_type, craft, name=helcats_name)
# Plot
f, (ax1, ax2) = plt.subplots(1, 2, figsize=[9, 6])
ax1.imshow(img, cmap='gray')
a = list(img.getdata(band=0))
ax2.hist(a, bins=np.arange(0, 255, 1), color='orange')
# Make pretty
ax1.set_xticks([])
ax1.set_yticks([])
ax2.set_ylim((0, 50000))
asp = np.diff(ax2.get_xlim())[0] / np.diff(ax2.get_ylim())[0]
ax2.set_aspect(asp)
ax2.set_xlabel('Pixel Value')
ax2.set_ylabel('Count')
plt.tight_layout()
f.savefig(os.path.join(fig_loc, helcats_name + ' hist ' + img_type + '.png'))
def image_summary(df, helcats_name, tag, img_type, mask=False):
"""Does all the image processing stuff for one image, and prints the
values.
:param: df: pandas df
:param: helcats_name: str, HELCATS id of CME
:param: mask: bool, should the rest of the image be masked out, leaving
only the CME?
:param: tag: suffix for images folder
"""
# get data
hi_data = STEREOHI(data_loc)
helcats = HELCATS(data_loc)
craft, time = helcats.get_cme_details(helcats_name)
img = hi_data.load_img_from_file(tag, img_type, craft, name=helcats_name)
if mask == True:
masked_img = add_cme_mask('helcats_name', img)
data = list(masked_img.getdata(0))
# Remove masked values
data[:] = [value for value in data if value != 0]
elif mask == False:
data = list(img.getdata(0))
# plot the CME image
f, (ax1, ax2) = plt.subplots(1, 2, figsize=[9, 6])
ax1.imshow(img)
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_xticks([])
ax1.set_yticks([])
ax2.hist(data, bins=np.arange(0, 255, 1))
ax2.set_ylim((0, 50000))
asp = np.diff(ax2.get_xlim())[0] / np.diff(ax2.get_ylim())[0]
ax2.set_aspect(asp)
ax2.set_xlabel('Pixel Value')
ax2.set_ylabel('Count')
plt.tight_layout()
# Print the summary
print("Summary of CME: %s" %(helcats_name))
print("Image type: %s" %(img_type))
print("CME mask: %s" %(str(mask)))
# first use the hi_map
# print("NaNs count: %s" %(np.isnan(hi_map.data).sum()))
# then look at the image
print("Fraction of saturated pixels: %s" %(data.count(255)/len(data)))
print("Mean pixel value: %s" %(np.mean(data)))
print("Standard deviation: %s" %(np.std(data)))
print("Absolute mean pixel value: %s" %(np.mean(abs(np.array(data)))))
print("Standard dev of abs mean pixel value: %s" %(np.std(abs(np.array(data)))))
###############################################################################
# Code to apply image processing above to all CMEs in a df
def save_img_stats(df, tag, img_type):
"""Loops over all the images, does stuff, and saves it as a .csv file.
:param: df: pandas df
:param: tag: str
"""
df = data2df.add_width_to_df(df)
df = data2df.add_te_track_times_to_df(df)
hi_data = STEREOHI(data_loc)
nan_count = []
bright_pix = []
for i in df.index:
print('processing image %s of %s'%(i, len(df)))
img = hi_data.load_img_from_file(tag, img_type, df['craft'][i],
name=df['helcats_name'][i])
hi_map = hi_data.get_hi_map(df['craft'][i], df['time'][i])
if hi_map != False:
# Stats from complete image
nan_count.append(np.isnan(hi_map.data).sum())
# sat_pix_image.append(list(img.getdata(0)).count(255))
# stats from cropped image
new_img = add_cme_mask(df['helcats_name'][i], img)
cme = list(new_img.getdata(0))
# cme.count(0) is number of masked out pixels
# len(cme) is total pixels in the image
# cme_pix = len(cme) - cme.count(0) # so this is pixels in cme
bright = [value for value in cme if value > 222]
bright_pix.append(len(bright))
else:
print('error: hi_map not found for cme', df['helcats_name'][i])
nan_count.append(np.NaN)
bright_pix.append(np.NaN)
df['nan_count'] = pd.Series(nan_count, index=df.index)
df['bright_pix'] = | pd.Series(bright_pix, index=df.index) | pandas.Series |
'''
Created on Jan 15, 2020
@author: bsana
'''
from os.path import join
import sys,datetime
import pandas as pd
OUT_SEP = ' '
COUNTY_FIPS = [37,59]
if __name__ == '__main__':
if len(sys.argv)<2:
print('Please provide a control file which contains all the required input parameters as an argument!')
else:
print('Reformat survey program started: {}'.format(datetime.datetime.now()))
#Initiate log file
logfilename = 'reformat_survey.log'
logfile = open(logfilename,'w')
logfile.write('Reformat survey program started: ' + str(datetime.datetime.now()) + '\n')
inputctlfile = sys.argv[1]
ctlfile = open(inputctlfile)
for ctlfileline in ctlfile:
logfile.write(ctlfileline)
if len(str.split(ctlfileline))>1:
param = (str.split(ctlfileline)[0]).upper()
value = str.split(ctlfileline)[1]
if param == 'INDIR':
inputdir = value
elif param == 'INHHFILE':
inhhfilename = value
elif param == 'INPERFILE':
inperfilename = value
elif param == 'INTRIPFILE':
intripfilename = value
elif param == 'OUTDIR':
outputdir = value
elif param == 'OUTHHFILE':
outhhfilename = value
elif param == 'OUTPERFILE':
outperfilename = value
elif param == 'OUTTRIPFILE':
outtripfilename = value
inhhfilename = join(inputdir, inhhfilename)
inperfilename = join(inputdir, inperfilename)
intripfilename = join(inputdir, intripfilename)
outhhfilename = join(outputdir, outhhfilename)
outperfilename = join(outputdir, outperfilename)
outtripfilename = join(outputdir, outtripfilename)
###### Household file processing
print('Household file processing started: {}'.format(datetime.datetime.now()))
logfile.write('\n')
logfile.write('Household file processing started: ' + str(datetime.datetime.now()) + '\n')
hh = pd.read_csv(inhhfilename, sep='\t')
hh['hhno'] = hh['hh_id']
hh['hhsize'] = hh['num_people']
hh.loc[hh['hhsize']>900, 'hhsize'] = -1
hh['hhvehs'] = hh['num_vehicles']
hh.loc[hh['hhvehs']>900, 'hhvehs'] = -1
INC1_DICT = {999:-1, 1:7500, 2:20000, 3:30000, 4:42500, 5:62500, 6:87500, 7:125000, 8:175000, 9:225000, 10:350000}
hh['hhincome'] = hh['income_detailed'].map(INC1_DICT)
INC2_DICT = {999:-1, 1:12500, 2:37500, 3:62500, 4:87500, 5:175000, 6:350000}
hh['hhinc2'] = hh['income_followup'].map(INC2_DICT)
hh.loc[(hh['hhincome']<0) & (hh['hhinc2']>0), 'hhincome'] = hh.loc[(hh['hhincome']<0) & (hh['hhinc2']>0), 'hhinc2']
hh['hownrent'] = hh['rent_own']
hh.loc[hh['hownrent']==997, 'hownrent'] = 3 #Other
hh.loc[hh['hownrent']==999, 'hownrent'] = 9 #Prefer not to answer -> Missing
hh.loc[hh['hownrent']<0, 'hownrent'] = -1
RESTYPE_DICT = {1:1, 2:2, 3:3, 4:3, 5:3, 6:5, 7:4, 997:6}
hh['hrestype'] = hh['res_type'].map(RESTYPE_DICT)
hh.loc[pd.isnull(hh['hrestype']), 'hrestype'] = -1
hh['hxcord'] = hh['reported_home_lon']
hh['hycord'] = hh['reported_home_lat']
hh['hhtaz'] = hh['home_taz']
hh['hhparcel'] = hh['home_bg_geoid']
int_cols = ['hhparcel','hhtaz','hhincome','hrestype']
hh[int_cols] = hh[int_cols].astype('int64')
out_colnames = ['hhno','hhsize','hhvehs','hhincome','hownrent','hrestype','hhparcel','hhtaz','hxcord','hycord','wt_alladult_wkday','wt_alladult_7day']
hh = hh[out_colnames]
hh = hh.sort_values('hhno')
hh.to_csv(outhhfilename, sep=OUT_SEP, index=False)
print('Household file processing finished: {}'.format(datetime.datetime.now()))
logfile.write('Household file processing finished: ' + str(datetime.datetime.now()) + '\n')
###### Person file processing
print('Person file processing started: {}'.format(datetime.datetime.now()))
logfile.write('\n')
logfile.write('Person file processing started: ' + str(datetime.datetime.now()) + '\n')
per = pd.read_csv(inperfilename, sep='\t')
per['person_id'] = per['person_id'].round()
per['hhno'] = per['hh_id']
per['pno'] = per['person_num']
AGE_DICT = {1:3, 2:10, 3:16, 4:21, 5:30, 6:40, 7:50, 8:60, 9:70, 10:80}
per['pagey'] = per['age'].map(AGE_DICT)
GEND_DICT = {1:2, 2:1, 3:3, 4:3, 997:3, 995:9, 999:9}
per['pgend'] = per['gender'].map(GEND_DICT)
per.loc[per['pgend']<0, 'pgend'] = -1
per.loc[pd.isna(per['pgend']), 'pgend'] = -1
per['pptyp'] = 0
per['pwtyp'] = 0
per['pstyp'] = 0
per.loc[(per['pagey']>=0) & (per['pagey']<5), 'pptyp'] = 8
per.loc[(per['pagey']>=0) & (per['pagey']<16) & (per['pptyp']==0), 'pptyp'] = 7
per.loc[(per['employment']==1) & (per['hours_work'].isin([1,2,3])) & (per['pptyp']==0), 'pptyp'] = 1
per.loc[(per['pagey']>=16) & (per['pagey']<18) & (per['pptyp']==0), 'pptyp'] = 6
per.loc[(per['pagey']>=16) & (per['pagey']<25) & (per['school_type'].isin([4,7])) & (per['student']==1) & (per['pptyp']==0), 'pptyp'] = 6
per.loc[(per['student'].isin([1,2])) & (per['pptyp']==0), 'pptyp'] = 5
per.loc[(per['employment'].isin([1,2,3])) & (per['pptyp']==0), 'pptyp'] = 2 # Remaining workers are part-time
per.loc[(per['pagey']>65) & (per['pptyp']==0), 'pptyp'] = 3
per.loc[per['pptyp']==0, 'pptyp'] = 4
per.loc[per['pptyp']==1, 'pwtyp'] = 1
per.loc[per['pptyp']==2, 'pwtyp'] = 2
# student workers are also part-time workers
per.loc[(per['pptyp']==5) & (per['employment'].isin([1,2,3])), 'pwtyp'] = 2
per.loc[(per['pptyp']==6) & (per['employment'].isin([1,2,3])), 'pwtyp'] = 2
per.loc[per['student']==1, 'pstyp'] = 1
per.loc[per['student']==2, 'pstyp'] = 2
per['pwxcord'] = per['work_lon']
per['pwycord'] = per['work_lat']
per['psxcord'] = per['school_lon']
per['psycord'] = per['school_lat']
per['ppaidprk'] = 1
per.loc[per['work_park']==1, 'ppaidprk'] = 0
per = per.rename(columns={'work_taz':'pwtaz_tmp', 'school_taz':'pstaz_tmp',
'work_bg_geo_id':'pwpcl_tmp', 'school_bg_geo_id':'pspcl_tmp'})
per.loc[per['work_county_fips'].isin(COUNTY_FIPS), 'pwtaz'] = per.loc[per['work_county_fips'].isin(COUNTY_FIPS), 'pwtaz_tmp']
per.loc[per['work_county_fips'].isin(COUNTY_FIPS), 'pwpcl'] = per.loc[per['work_county_fips'].isin(COUNTY_FIPS), 'pwpcl_tmp']
per.loc[per['school_county_fips'].isin(COUNTY_FIPS), 'pstaz'] = per.loc[per['school_county_fips'].isin(COUNTY_FIPS), 'pstaz_tmp']
per.loc[per['school_county_fips'].isin(COUNTY_FIPS), 'pspcl'] = per.loc[per['school_county_fips'].isin(COUNTY_FIPS), 'pspcl_tmp']
per.loc[pd.isnull(per['pwtaz']), 'pwtaz'] = -1
per.loc[pd.isnull(per['pstaz']), 'pstaz'] = -1
per.loc[pd.isnull(per['pwpcl']), 'pwpcl'] = -1
per.loc[pd.isnull(per['pspcl']), 'pspcl'] = -1
per.loc[pd.isnull(per['pwxcord']), 'pwxcord'] = -1.0
per.loc[pd.isnull(per['pwycord']), 'pwycord'] = -1.0
per.loc[pd.isnull(per['psxcord']), 'psxcord'] = -1.0
per.loc[pd.isnull(per['psycord']), 'psycord'] = -1.0
# there appear to be some person records who are not students but have school loc Ex. hhid 181005890
# account for that by setting school loc to null/missing
per.loc[per['pstyp']==0, 'pstaz'] = -1
per.loc[per['pstyp']==0, 'pspcl'] = -1
per.loc[per['pstyp']==0, 'psxcord'] = -1.0
per.loc[per['pstyp']==0, 'psycord'] = -1.0
# there appear to be some person records who are not workers but have work loc Ex. hhid 181007697
# account for that by setting work loc to null/missing
per.loc[per['pwtyp']==0, 'pwtaz'] = -1
per.loc[per['pwtyp']==0, 'pwpcl'] = -1
per.loc[per['pwtyp']==0, 'pwxcord'] = -1.0
per.loc[per['pwtyp']==0, 'pwycord'] = -1.0
int_cols = ['pwtaz','pstaz','pwpcl','pspcl','pgend']
per[int_cols] = per[int_cols].astype('int64')
out_colnames = ['hhno','pno','pptyp','pagey','pgend','pwtyp','pwpcl','pwtaz','pstyp','pspcl','pstaz','ppaidprk','pwxcord','pwycord','psxcord','psycord']
out_colnames = out_colnames + ['wt_alladult_wkday','wt_alladult_7day']
out_colnames = out_colnames + ['mon_complete','tue_complete','wed_complete','thu_complete','fri_complete',
'sat_complete','sun_complete','nwkdaywts_complete','n7daywts_complete']
per = per[out_colnames]
per = per.sort_values(['hhno','pno'])
per.to_csv(outperfilename, sep=OUT_SEP, index=False)
print('Person file processing finished: {}'.format(datetime.datetime.now()))
logfile.write('Person file processing finished: ' + str(datetime.datetime.now()) + '\n')
###### Trip processing
print('Trip file processing started: {}'.format(datetime.datetime.now()))
logfile.write('\n')
logfile.write('Trip file processing started: ' + str(datetime.datetime.now()) + '\n')
trip = | pd.read_csv(intripfilename, sep='\t') | pandas.read_csv |
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import style
#changing local directory to csv file location
os.chdir('E:\\auto')
#reading prices of BSE Auto companies on 26/nov/2019 interval 1min
df=pd.read_csv('26novprice.csv')
#df
#getting % change in price with respect to initial value of day and adding data
#to new dataframe
df_percentage=pd.DataFrame()
for i in df.columns[1:]:
df_percentage[i]=(df[i]-df[i][0])/df[i][0]*100
#plt.show()
df_percentage.describe()
#removing motherson, bosch as they are outliers in BSE Auto
df1=df_percentage.drop(['Motherson_Sumi','Bosch'],axis=1)
#getting the correlation of %change in price with other companies
sns.heatmap(df1.corr())
df1.describe()
style.use('fivethirtyeight')
plt.figure(figsize=(12,12))
df1.plot(lw=2)
plt.xlabel('time from 9:15am(step=1min)')
plt.ylabel('%change in price')
plt.legend(bbox_to_anchor=(1,1))
plt.savefig('%price change over the day.jpeg')
#df1['Apollo_Tyres'].plot()
#reading volume of BSE Auto companies on 26/nov/2019
df2= | pd.read_csv('26novvolume.csv') | pandas.read_csv |
# Primary Packages
import pandas as pd
import numpy as np
import streamlit as st
# Package to Load Model
from sklearn.externals import joblib
@st.cache
def load_data():
return pd.read_csv('data/Sell-Out Data.csv')
data = load_data()
data = data[data['COMPANY']!='Competitors']
### Set Title
st.title("Cortex Sales Predictor")
st.write("""From the AC metadata, we built a machine learning-based forecasting model
to predict ac unit sales.""")
# Show data
st.subheader('AC Metadata')
if st.checkbox('Show Raw Data'):
st.write(data.head(20))
st.sidebar.title('Parameters')
# Unit Level
unit_values = pd.Series(data['UNIT'].unique()).str.strip()
unit_dummies = pd.get_dummies(unit_values)
unit_sample = st.sidebar.selectbox("AC Unit", unit_values.values.tolist())
unit_sample_dummies = (unit_dummies.loc[np.where(unit_values.values == unit_sample)[0]]
.values.tolist()[0])
# Dealer Level
dealer_values = pd.Series(data['DEALER'].unique()).str.strip()
dealer_dummies = pd.get_dummies(dealer_values)
dealer_sample = st.sidebar.selectbox("Dealer", dealer_values.values.tolist())
dealer_sample_dummies = (dealer_dummies.loc[np.where(dealer_values.values == dealer_sample)[0]]
.values.tolist()[0])
# Channel Level
channel_values = pd.Series(data['CHANNEL'].unique()).str.strip()
channel_dummies = pd.get_dummies(channel_values)
channel_sample = st.sidebar.selectbox("Channel", channel_values.values.tolist())
channel_sample_dummies = (channel_dummies.loc[np.where(channel_values.values == channel_sample)[0]]
.values.tolist()[0])
# Sales Channel Level
sales_channel_values = pd.Series(data['SALES CHANNEL'].unique()).str.strip()
sales_channel_dummies = pd.get_dummies(sales_channel_values)
sales_channel_sample = st.sidebar.selectbox("Sales Channel", sales_channel_values.values.tolist())
sales_channel_sample_dummies = (sales_channel_dummies.loc[np.where(sales_channel_values.values == sales_channel_sample)[0]]
.values.tolist()[0])
# Region Level
region_values = pd.Series(data['REGION'].unique()).str.strip()
region_dummies = pd.get_dummies(region_values)
region_sample = st.sidebar.selectbox("Region", region_values.values.tolist())
region_sample_dummies = (region_dummies.loc[np.where(region_values.values == region_sample)[0]]
.values.tolist()[0])
# Brand Level
brand_values = pd.Series(data['BRAND'].unique()).str.strip()
brand_dummies = pd.get_dummies(brand_values)
brand_sample = st.sidebar.selectbox("Brand", brand_values.values.tolist())
brand_sample_dummies = (brand_dummies.loc[np.where(brand_values.values == brand_sample)[0]]
.values.tolist()[0])
# Capacity Level
cap_values = pd.Series(data['CAPACITY'].unique()).str.strip()
cap_dummies = | pd.get_dummies(cap_values) | pandas.get_dummies |
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib.pyplot as plt
import seaborn as sns; sns.set_context('notebook')
from collections import OrderedDict
import pickle
from pystan import StanModel
"""Multilevel Modeling with Poststratification (MRP)"""
# Use multilevel regression to model individual survey responses as a function of demographic and geographic
# predictors, partially pooling respondents across states/regions to an extent determined by the data.
# The final step is post-stratification.
# Read the data & define variables
# Data are from http://www.stat.columbia.edu/~gelman/arm/examples/election88
"""Step 1: gather national opinion polls (they need to include respondent information down to the level of disaggregation
the analysis is targetting) """
# Load in data from the CBS polls with the following covariates (individual level):
# - org: organisation which collected the poll
# - year: year id
# - survey: survey id
# - bush: indicator (=1) for support of bush
# - state: state id
# - edu: categorical variable indicating level of education
# - age: categorical variable indicating age
# - female: indicator (=1) for female
# - black: indicator (=1) for black
# - weight: sample weight
polls = pd.read_csv('./data/polls.csv')
polls = polls.drop(polls.columns[[0]], axis=1)
"""Step 2: create a separate dataset of state-level predictors """
# Load in data for region indicators (state level). The variables are:
# - state_abbr: abbreviations of state names
# - regions: 1=northeast, 2=south, 3=north central, 4=west, 5=d.c.
# - not_dc: indicator variable which is 1 for non_dc states
state_info = pd.read_csv('./data/state.csv')
state_info = state_info.rename(columns={'Unnamed: 0': 'state'})
# Include a measure of previous vote as a state-level predictor. The variables are:
# - g76_84pr: state average in previous election
# - stnum2: state id
presvote = pd.read_csv("./data/presvote.csv")
presvote = presvote.drop(presvote.columns[[0]], axis=1)
presvote = presvote.rename(columns={'g76_84pr': 'v_prev', 'stnum2': 'state'})
# Include a measure of candidate effects as a state-level predictor and add empty row for DC.
candidate_effects = | pd.read_csv("./data/candidate_effects.csv") | pandas.read_csv |
import pytest
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize("n, frac", [(2, None), (None, 0.2)])
def test_groupby_sample_balanced_groups_shape(n, frac):
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=n, frac=frac)
values = [1] * 2 + [2] * 2
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=n, frac=frac)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_unbalanced_groups_shape():
values = [1] * 10 + [2] * 20
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=5)
values = [1] * 5 + [2] * 5
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=5)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_index_value_spans_groups():
values = [1] * 3 + [2] * 3
df = DataFrame({"a": values, "b": values}, index=[1, 2, 2, 2, 2, 2])
result = df.groupby("a").sample(n=2)
values = [1] * 2 + [2] * 2
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=2)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_n_and_frac_raises():
df = DataFrame({"a": [1, 2], "b": [1, 2]})
msg = "Please enter a value for `frac` OR `n`, not both"
with pytest.raises(ValueError, match=msg):
df.groupby("a").sample(n=1, frac=1.0)
with pytest.raises(ValueError, match=msg):
df.groupby("a")["b"].sample(n=1, frac=1.0)
def test_groupby_sample_frac_gt_one_without_replacement_raises():
df = DataFrame({"a": [1, 2], "b": [1, 2]})
msg = "Replace has to be set to `True` when upsampling the population `frac` > 1."
with pytest.raises(ValueError, match=msg):
df.groupby("a").sample(frac=1.5, replace=False)
with pytest.raises(ValueError, match=msg):
df.groupby("a")["b"].sample(frac=1.5, replace=False)
@pytest.mark.parametrize("n", [-1, 1.5])
def test_groupby_sample_invalid_n_raises(n):
df = DataFrame({"a": [1, 2], "b": [1, 2]})
if n < 0:
msg = "Please provide positive value"
else:
msg = "Only integers accepted as `n` values"
with pytest.raises(ValueError, match=msg):
df.groupby("a").sample(n=n)
with pytest.raises(ValueError, match=msg):
df.groupby("a")["b"].sample(n=n)
def test_groupby_sample_oversample():
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(frac=2.0, replace=True)
values = [1] * 20 + [2] * 20
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(frac=2.0, replace=True)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_without_n_or_frac():
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=None, frac=None)
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=None, frac=None)
expected = Series([1, 2], name="b", index=result.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index, expected_index",
[(["w", "x", "y", "z"], ["w", "w", "y", "y"]), ([3, 4, 5, 6], [3, 3, 5, 5])],
)
def test_groupby_sample_with_weights(index, expected_index):
# GH 39927 - tests for integer index needed
values = [1] * 2 + [2] * 2
df = DataFrame({"a": values, "b": values}, index=Index(index))
result = df.groupby("a").sample(n=2, replace=True, weights=[1, 0, 1, 0])
expected = DataFrame({"a": values, "b": values}, index=Index(expected_index))
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=2, replace=True, weights=[1, 0, 1, 0])
expected = Series(values, name="b", index= | Index(expected_index) | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 18:16:34 2019
"""
import requests
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#league year and ID for data download
url = "https://fantasy.espn.com/apis/v3/games/ffl/seasons/2019/segments/0/leagues/298982"
#for historical
#url = "https://fantasy.espn.com/apis/v3/games/ffl/leagueHistory/" + str(league_id) + "?seasonId=" + str(year)
#if using historical year and not current year, change to r.json()[0]
#weekly matchups
r = requests.get(url, params = {"view": "mMatchup"})
d = r.json()
#figuring out how many weeks of material to pull, so no empty json
maximum = max(pd.DataFrame(d["schedule"]).index.tolist()) #how many obs?
length_df = pd.DataFrame([[d["schedule"][i]["winner"]] for i in range(maximum)])
#remove "undecided" game winners - weeks that have yet to be played
length_df = length_df[length_df[0] != "UNDECIDED"]
length = range(len(length_df)) #range for future loops for all games
#Selecting weeks, points, data
source = pd.DataFrame([[d["schedule"][i]["matchupPeriodId"],
d["schedule"][i]["home"]["teamId"],
d["schedule"][i]["home"]["totalPoints"],
d["schedule"][i]["away"]["teamId"],
d["schedule"][i]["away"]["totalPoints"]] for i in length],
columns = ["Week", "Team1", "Score1", "Team2", "Score2"])
#Create a list of each teams' margin of defeat/victory for each game
margins = source.assign(Margin1 = source["Score1"] - source["Score2"],
Margin2 = source["Score2"] - source["Score1"])
#team number to name Dictionary
mapping = {1: "Mount",
2: "Alec",
3: "Sirpi",
4: "Oatman",
5: "Babcock",
9: "Jordan",
11: "Casey",
12: "Badillo",
13: "Naki",
14: "Kooper"}
#transpose from wide to long
margins_long = (margins[["Week", "Team1", "Margin1", "Score1"]]
.rename(columns = {"Team1": "Team",
"Margin1": "Margin",
"Score1": "Score"})
.append(margins[["Week", "Team2", "Margin2", "Score2"]]
.rename(columns = {"Team2": "Team",
"Margin2": "Margin",
"Score2": "Score"})))
#add team name to the margins_long data frame
margins_long = (margins_long.assign(teamname = margins_long.Team)
.replace({"teamname": mapping}))
#creating record from values
team_ids = margins_long.Team.unique()
def team_win_loss(dataset, i):
"""
Adding the win-loss record
dataset: which dataset to use?
i: iterator
"""
team = dataset[dataset["Team"] == team_ids[i]]
team_wins = sum(n > 0 for n in team["Margin"])
team_ties = sum(n == 0 for n in team["Margin"])
team_loss = sum(n < 0 for n in team["Margin"])
points = np.sum(team.Score)
wl_info = pd.DataFrame([[team_ids[i], team_wins, team_loss,
team_ties, points]],
columns = ["Team", "Wins", "Losses", "Ties",
"Points"])
return wl_info
#initialize an empty dataframe to append to
win_loss = []
#loop through all the teams and have the rows append
for j in range(len(team_ids)):
row = team_win_loss(margins_long, j)
win_loss.append(row)
win_loss = (pd.concat(win_loss)
.sort_values(by = ["Wins", "Ties", "Points"], ascending = False)
.assign(Standing = np.arange(1, 11))
.reset_index(drop = True))
win_loss = (win_loss.assign(teamname = win_loss.Team,
Record = win_loss.Wins.map(str)
+ "-"
+ win_loss.Losses.map(str)
+ "-"
+ win_loss.Ties.map(str))
.replace({"teamname": mapping}))
########################## QED: Record and W/L ################################
##### Plot the Win/Loss Margins
fig, ax = plt.subplots(1, 1, figsize = (16, 6))
order = win_loss.teamname
sns.boxplot(x = "teamname", y = "Margin", data = margins_long, order = order)
ax.axhline(0, ls = "--")
ax.set_xlabel("")
ax.set_title("Win/Loss Margins")
plt.show()
##################################### Luck ####################################
#get the average of each week
averages = (margins.filter(["Week", "Score1", "Score2"])
.melt(id_vars = ["Week"],
value_name = "Score")
.groupby("Week")
.mean()
.reset_index())
#initialize empty list
margin_average = []
for i in range(len(team_ids)):
#select the team and corresponding owner name
team = team_ids[i]
team_owner = mapping[team]
#create a dataframe for the score margin against team average per week
df2 = (margins.query("Team1 == @team | Team2 == @team")
.reset_index(drop = True))
#move df2 to have all team of interest into one column
team_loc = list(df2["Team2"] == team)
df2.loc[team_loc,
["Team1", "Score1",
"Team2", "Score2"]] = df2.loc[team_loc, ["Team2", "Score2",
"Team1", "Score1"]].values
#Add new score and win columns
df2 = (df2.assign(change1 = df2["Score1"] - averages["Score"],
change2 = df2["Score2"] - averages["Score"],
Win = df2["Score1"] > df2["Score2"]))
#Append it to the end
margin_average.append(df2)
#remove the useless initiator row
margin_average = | pd.concat(margin_average) | pandas.concat |
import sys
import os
from PyQt5 import QtCore, QtWidgets
from qtmodern.styles import dark
from qtmodern.windows import ModernWindow
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import pyqtgraph as pg
import numpy as np
import pandas
import random
import time
import pickle
from pyqtgraph.Qt import QtCore, QtGui
from decimal import Decimal, ROUND_DOWN
import cProfile
import pstats
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QLineEdit, QTabWidget, \
QGridLayout, QVBoxLayout, QHBoxLayout, QGroupBox, QDialog
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
# import pyqtgraph.console
from collections import namedtuple
from itertools import chain
import glob
from pathlib import Path
from ms2analyte.file_handling import data_import
from ms2analyte.file_handling import file_load
from ms2analyte.visualizations import file_open_model
from pyteomics import mgf, auxiliary
import numpy
from matchms import Spectrum
from matchms.exporting import save_as_mgf
from zipfile import ZipFile
import ms2analyte.file_open_dialogue
from threading import *
from PyQt5.QtWidgets import *
from ms2analyte.file_open_dialogue import MainWindowUIClass
from PyQt5 import uic
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtCore import *
from PIL import Image
global open_state
open_state = 1
# Handle high resolution displays:
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
########### Data Import #################
lastClicked = []
clickedPen = pg.mkPen('s', width=5)
clickedPen_legend = pg.mkPen('s', width=3)
lastClicked_legend = []
def fetch_sample_list(input_structure, input_type):
if open_state ==2:
sample_list = data_import.name_extract(input_structure, input_type)
else:
sample_list = []
return sample_list
#######################################################################################
def import_dataframe(input_structure, input_type, sample_name):
print('TEST importdataframe 1')
if open_state == 2:
with open((os.path.join(output_path, experiment_name + "_experiment_import_parameters.pickle")), 'rb') as f:
experiment_info = pickle.load(f)
print(experiment_info.blanks_exist)
print('TEST importdataframe 2')
print(input_structure)
print(sample_name)
print(input_type)
if experiment_info.blanks_exist == True:
with open((os.path.join(input_structure, input_type, sample_name + "_all_replicates_blanked_dataframe.pickle")), 'rb') as f:
print('TEST importdataframe 3')
df = pickle.load(f)
# df = pandas.read_pickle(f)
else:
with open((os.path.join(input_structure, input_type, sample_name + "_all_replicates_dataframe.pickle")), 'rb') as f:
print('TEST importdataframe 3')
df = pickle.load(f)
print('TEST importdataframe 4')
print('TEST importdataframe 5')
print(df)
else:
df = []
print('TEST importdataframe 6')
return df
#######################################################################################
def import_experiment_dataframe(input_structure):
if open_state == 2:
with open((os.path.join(input_structure, experiment_name + "_experiment_analyte_overview_tableau_output.csv"))) as f:
df = pandas.read_csv(f)
else:
df = []
return df
#######################################################################################
def import_ms1_dataframe(input_structure, input_type, sample_name):
if open_state == 2:
print('test1')
with open((os.path.join(input_structure, input_type, sample_name + '_replicate_analyte_mass_spectra.pickle')), 'rb') as f:
inputdata = pickle.load(f)
print('test2')
print('ms1 inputdata',inputdata)
b = 0
replicate_analyte_id = []
average_mass = []
relative_intensity = []
while b < len(inputdata):
i = 0
while i <len(inputdata[b].replicate_analyte_ms1_spectrum):
replicate_analyte_id.append(inputdata[b].replicate_analyte_id)
average_mass.append(inputdata[b].replicate_analyte_ms1_spectrum[i].average_mass)
relative_intensity.append(inputdata[b].replicate_analyte_ms1_spectrum[i].relative_intensity)
i+=1
b+=1
np.round(average_mass,1)
relative_intensity_zeros = [0]*len(relative_intensity)
average_mass_lower = [0]*len(average_mass)
i=0
while i < len(average_mass):
average_mass_lower[i] = average_mass[i] - 0.0001
i+=1
average_mass_upper = [0]*len(average_mass)
i=0
while i < len(average_mass):
average_mass_upper[i] = average_mass[i] + 0.0001
i+=1
data1 = {'replicate_analyte_id': replicate_analyte_id,
'average_mass': average_mass,
'relative_intensity': relative_intensity
}
data2 = {'replicate_analyte_id': replicate_analyte_id,
'average_mass': average_mass_upper,
'relative_intensity': relative_intensity_zeros
}
data3 = {'replicate_analyte_id': replicate_analyte_id,
'average_mass': average_mass_lower,
'relative_intensity': relative_intensity_zeros
}
df1 = pandas.DataFrame (data1, columns = ['replicate_analyte_id', 'average_mass','relative_intensity'])
df2 = pandas.DataFrame (data2, columns = ['replicate_analyte_id', 'average_mass','relative_intensity'])
df3 = pandas.DataFrame (data3, columns = ['replicate_analyte_id', 'average_mass','relative_intensity'])
df_combine = [df1,df2,df3]
df_combine = pandas.concat(df_combine)
print('test3')
with open((os.path.join(input_structure, input_type, sample_name + '_R1_analytes.pickle')), 'rb') as f:
inputdata2 = pickle.load(f)
print('test4')
b = 0
analyte_id = []
max_peak_intensity_mass = []
max_peak_intensity = []
while b < len(inputdata2):
analyte_id.append(inputdata2[b].analyte_id)
max_peak_intensity_mass.append(inputdata2[b].max_peak_intensity_mass)
max_peak_intensity.append(inputdata2[b].max_peak_intensity)
b+=1
max_peak_intensity_zeros = [0]*len(max_peak_intensity)
max_peak_intensity_mass_lower = [0]*len(max_peak_intensity_mass)
i=0
while i < len(max_peak_intensity_mass):
max_peak_intensity_mass_lower[i] = max_peak_intensity_mass[i] - 0.0001
i+=1
max_peak_intensity_zeros = [0]*len(max_peak_intensity)
max_peak_intensity_mass_upper = [0]*len(max_peak_intensity_mass)
i=0
while i < len(max_peak_intensity_mass):
max_peak_intensity_mass_upper[i] = float(max_peak_intensity_mass[i] + 0.0001)
i+=1
data4 = {'analyte_id': analyte_id,
'max_peak_intensity_mass': max_peak_intensity_mass,
'max_peak_intensity': max_peak_intensity
}
data5 = {'analyte_id': analyte_id,
'max_peak_intensity_mass': max_peak_intensity_mass_upper,
'relative_intensity': max_peak_intensity_zeros
}
data6 = {'analyte_id': analyte_id,
'max_peak_intensity_mass': max_peak_intensity_mass_lower,
'max_peak_intensity': max_peak_intensity_zeros
}
df4 = pandas.DataFrame (data4, columns = ['analyte_id', 'max_peak_intensity_mass','max_peak_intensity'])
df5 = pandas.DataFrame (data5, columns = ['analyte_id', 'max_peak_intensity_mass','max_peak_intensity'])
df6 = | pandas.DataFrame (data6, columns = ['analyte_id', 'max_peak_intensity_mass','max_peak_intensity']) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import assignment2_helper as helper
# Look pretty...
# matplotlib.style.use('ggplot')
plt.style.use('ggplot')
# Do * NOT * alter this line, until instructed!
scaleFeatures = True
#
file_path = "/Users/szabolcs/dev/git/DAT210x/Module4/Datasets/"
file_name = "kidney_disease.csv"
exclude_columns = ['id', 'classification'] #, 'rbc', 'pc', 'pcc', 'ba', 'htn', 'dm', 'cad', 'appet', 'pe', 'ane']
df = pd.read_csv(file_path + file_name)
labels = ['red' if i=='ckd' else 'green' for i in df.classification]
df.drop(exclude_columns, axis=1, inplace=True)
print(df.head())
df = pd.get_dummies(df, columns=["rbc"])
df = pd.get_dummies(df, columns=["pc"])
df = pd.get_dummies(df, columns=["pcc"])
df = pd.get_dummies(df, columns=["ba"])
df = pd.get_dummies(df, columns=["htn"])
df = pd.get_dummies(df, columns=["dm"])
df = pd.get_dummies(df, columns=["cad"])
df = pd.get_dummies(df, columns=["appet"])
df = pd.get_dummies(df, columns=["pe"])
df = | pd.get_dummies(df, columns=["ane"]) | pandas.get_dummies |
from itertools import combinations_with_replacement as cwr
from itertools import product
import numpy as np
import numpy.ma as ma
import pandas as pd
from scipy.spatial.distance import cdist
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from helpers import timing
class LPA:
def __init__(self, dvr: pd.DataFrame, epsilon_frac: int = 2):
self.dvr = dvr.sort_values("frequency_in_category", ascending=False)
self.epsilon = 1 / (len(dvr) * epsilon_frac)
@staticmethod
def create_dvr(frequency: pd.DataFrame) -> pd.DataFrame:
"""Creates the DVR table of the domain"""
dvr = (
frequency.groupby("element", as_index=False)
.sum()
.sort_values(by="frequency_in_category", ascending=False)
)
dvr["global_weight"] = dvr["frequency_in_category"] / sum(
dvr["frequency_in_category"]
)
return dvr.reset_index(drop=True)
def create_pvr(self, frequency: pd.DataFrame) -> pd.DataFrame:
"""Creates a vector for every category in the domain"""
frequency["local_weight"] = frequency[
"frequency_in_category"
] / frequency.groupby("category")["frequency_in_category"].transform("sum")
return frequency
@staticmethod
def KLD(P: np.ndarray, Q: np.ndarray) -> np.ndarray:
"""
Kullback-Leibler distance.
P represents the data, the observations, or a measured probability distribution.
Q represents instead a theory, a model, a description or an approximation of P.
"""
return (P - Q) * (ma.log(P) - ma.log(Q))
def normalize_pvr(
self, pvr: pd.DataFrame, pvr_lengths: np.array, missing: np.array
) -> pd.DataFrame:
"""
The extended pvr (with ɛ) is no longer a probability vector - the sum of all
coordinates is now larger than 1. We correct this by multiplying all non-ɛ
frequencies by a normalization coefficient β. This normalization coefficient is
given by the formula β=1-N*ɛ, where N is the number of words missing in one vector compared to the other (variable named `missing`).
"""
betas = [
item
for sublist in [
times * [(1 - missing * self.epsilon)[i]]
for i, times in enumerate(pvr_lengths)
]
for item in sublist
]
pvr["local_weight"] = pvr["local_weight"] * pd.Series(betas)
return pvr
def betas(self, pvr: pd.DataFrame) -> pd.DataFrame:
pvr_lengths = (
pvr["category"].drop_duplicates(keep="last").index
- pvr["category"].drop_duplicates(keep="first").index
+ 1
).to_numpy()
missing = len(self.dvr) - pvr_lengths
return self.normalize_pvr(pvr, pvr_lengths, missing)
def create_arrays(self, frequency: pd.DataFrame) -> pd.DataFrame:
"""Prepares the raw data and creates signatures for every category in the domain.
`epsilon_frac` defines the size of epsilon, default is 1/(corpus size * 2)
`sig_length` defines the length of the signature, default is 500"""
pvr = self.create_pvr(frequency)
vecs = self.betas(pvr)
vecs = vecs.pivot_table(
values="local_weight", index="element", columns="category"
)
self.dvr_array = (
self.dvr[self.dvr["element"].isin(vecs.index)]
.sort_values("element")["global_weight"]
.to_numpy()
)
self.vecs_array = (
vecs.fillna(self.epsilon).replace(0, self.epsilon).to_numpy().T
)
def create_distances(self, frequency: pd.DataFrame) -> pd.DataFrame:
frequency = frequency.sort_values("category").reset_index(drop=True)
self.create_arrays(frequency)
categories = frequency["category"].drop_duplicates()
elements = frequency["element"].drop_duplicates().dropna().sort_values()
return (
pd.DataFrame(
self.KLD(self.dvr_array, self.vecs_array),
index=categories,
columns=elements,
)
.stack()
.reset_index()
.rename(columns={0: "KL"})
)
def add_underused(self, distances: pd.DataFrame) -> pd.DataFrame:
underused = np.greater(self.dvr_array, self.vecs_array)
underused = underused.reshape(np.multiply(*underused.shape))
distances["underused"] = underused
return distances
def cut(self, sigs: pd.DataFrame, sig_length: int = 500) -> pd.DataFrame:
# TODO: diminishing return
return (
sigs.sort_values(["category", "KL"], ascending=[True, False])
.groupby("category")
.head(sig_length)
.reset_index(drop=True)
)
def create_and_cut(
self, frequency: pd.DataFrame, sig_length: int = 500
) -> pd.DataFrame:
distances = self.create_distances(frequency)
sigs = self.add_underused(distances)
cut = self.cut(sigs, sig_length)
return cut
def distance_summary(self, frequency: pd.DataFrame) -> pd.DataFrame:
sigs = self.create_distances(frequency)
return sigs.groupby("category").sum()
@timing
def sockpuppet_distance(
self, signatures1: pd.DataFrame, signatures2: pd.DataFrame
) -> pd.DataFrame:
"""
Returns size*size df
"""
# TODO: triu
categories1 = signatures1["category"].drop_duplicates()
categories2 = signatures2["category"].drop_duplicates()
pivot = | pd.concat([signatures1, signatures2]) | pandas.concat |
import pandas as pd
import numpy as np
import sklearn.feature_selection
import sklearn.preprocessing
import sklearn.model_selection
import mlr
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import statistics
# sorting variables
def sort_by_feature_name(df):
df =df.T
a = []
for i in df.T.columns:
a.append(len(i))
df["len"] = a
df_sorted = df.sort_values(["len"])
df_sorted = df_sorted.drop(["len"],axis=1)
return df_sorted.T
# Remove feature correlations, using Pearson correlation, based on the variable threshold
def remove_correlation(dataset, threshold):
col_corr = set() # Set of all the names of deleted columns
corr_matrix = dataset.corr().abs()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if corr_matrix.iloc[i, j] >= threshold:
colname = corr_matrix.columns[i] # getting the name of column
col_corr.add(colname)
if colname in dataset.columns:
del dataset[colname] # deleting the column from the dataset
return dataset
# SEP is the standard error of prediction (test set). SEE is the error for training
def sep(yt,yp):
return np.sqrt(mean_squared_error(yt, yp))
def run_MLREM(df2, name, dependent_variable, up_to_beta=200, screen_variance=False):
df=df2.copy()
# Separating independent and dependent variables x and y
y = df[dependent_variable].to_numpy().reshape(-1,1)
x = df.drop(dependent_variable,axis=1)
x_sorted=sort_by_feature_name(x)
x_pastvar = x_sorted.copy()
if screen_variance:
selector = sklearn.feature_selection.VarianceThreshold(threshold=0.01)
selector.fit(x_sorted)
x_pastvar=x_sorted.T[selector.get_support()].T
x_remcorr = remove_correlation(x_pastvar,0.9)
y_scaller = sklearn.preprocessing.StandardScaler()
x_scaller = sklearn.preprocessing.StandardScaler()
ys_scalled = y_scaller.fit_transform(y)
xs_scalled = x_scaller.fit_transform(x_remcorr)
ind = x_remcorr.columns
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(xs_scalled, ys_scalled, test_size=0.3)
df_X_test = pd.DataFrame(X_test, columns=ind) # this is to be able to calculate SEP for each iteration of beta
df_X_train = pd.DataFrame(X_train, columns=ind)
sepai = []
betai = []
indexai = []
weights = []
pvalues = []
# Beta optimisation
for i in range(1,up_to_beta):
beta = 0.1 * i
betai.append(beta)
w, indice, pv = mlr.train(X_train, y_train, ind, beta=beta)
indexai.append(indice)
weights.append(w)
pvalues.append(pv)
X_test2 = df_X_test[indice[1:]]
X_train2 = df_X_train[indice[1:]]
# RMSE calculation - test set
yp = np.dot(X_test2,w[1:])
yp = y_scaller.inverse_transform(yp)
yt = y_scaller.inverse_transform(y_test)
sepai.append(sep(yp,yt))
# RMSE calculation - training set
yp = np.dot(X_train2,w[1:])
yp = y_scaller.inverse_transform(yp)
yt = y_scaller.inverse_transform(y_train)
#print(beta, ';', sep(yp,yt),';', sepai[-1])
# Extracting best results obtained in the previous loop based on the minimum error of prediction
best_beta_indx = sepai.index(np.array(sepai).min())
print('Best beta =', betai[best_beta_indx])
# weights for each remaining feature after correlation has been performed
df_features = pd.DataFrame(weights[best_beta_indx],index=indexai[best_beta_indx])
df_features.columns = ["weights"]
# p value calculation for the regression
df_pvalues = | pd.DataFrame(pvalues[best_beta_indx],index=indexai[best_beta_indx]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
import statsmodels.api as sm
from statsmodels.tsa.seasonal import STL
import pandas_datareader.data as DataReader
stock0='ZARJPY=X' #'USDJPY=X' #'EURJPY=X' #AUDJPY=X JPYAUD=X ZARJPY=X GBPJPY=X JPYUSD=X
bunseki = "trend"
start = dt.date(2019,4,1)
end = dt.date(2020,8,14)
df=DataReader.get_data_yahoo("{}".format(stock0),start,end)
print(df)
series=df['Close']
cycle, trend = sm.tsa.filters.hpfilter(series, 144)
df['trend']= trend
f_ratio = 0.6180339887498949
def MAX_(x):
return pd.Series(x).max()
def MIN_(x):
return pd.Series(x).min()
def M618_(x, n):
# 下限値と上限値の間のリトレースメントを求める
f = 1
for i in range(0,n,1):
f *= f_ratio
m618 = pd.Series(x).min() + f*(pd.Series(x).max()-pd.Series(x).min())
return m618
def M618_2(x, n):
# 下限値以下のリトレースメントを求める
f = 1
for i in range(0,n,1):
f *= f_ratio
m618 = pd.Series(x).min() - f*(pd.Series(x).max()-pd.Series(x).min())
return m618
def M618_1(x, n):
# 上限値近傍のリトレースメントを求める
f = 1
for i in range(0,n,1):
f *= f_ratio
m618 = pd.Series(x).max() - f*(pd.Series(x).max()-pd.Series(x).min())
return m618
def M50_(x, n):
# 50%のリトレースメントを求める
f = 1
for i in range(0,n,1):
f *= 0.5
m618 = pd.Series(x).max() - f*(pd.Series(x).max()-pd.Series(x).min())
return m618
def M50_1(x, n):
# 下限値以下のさらに50%のリトレースメントを求める
f = 1
for i in range(0,n,1):
f *= 0.5
m618 = pd.Series(x).min() - f*( | pd.Series(x) | pandas.Series |
import datetime
from flask import Flask,redirect, url_for, request
import matplotlib.pyplot as plt
import pandas as pd
import anomalies.config as config
def plot_results():
year = request.form["year"]
from_month = request.form["from_month"]
to_month = request.form["to_month"]
currency = request.form["currency"]
print('anomalies are detecting...')
print('year: ' + str(year))
print('from_month: ' + str(from_month))
print('to_month: ' + str(to_month))
start_date = request.form["year"] + "-" + request.form["from_month"] + "-01"
#end_date = request.form["year"] + "-" + str(int(request.form["to_month"]) + 1) + "-01"
if (int(request.form["to_month"]) == 12):
end_date = str(int(request.form["year"]) + 1) + "-" + str(1) + "-01"
else:
end_date = request.form["year"] + "-" + str(int(request.form["to_month"]) + 1) + "-01"
quotes = pd.read_csv("static/data/"+currency+"/DAT_MT_"+currency+"_M1_" + str(year) + ".csv")
quotes['Time'] = quotes[['Date', 'Time']].apply(lambda x: ' '.join(x), axis=1)
quotes['Time'] = quotes['Time'].apply(lambda x: pd.to_datetime(x) - datetime.timedelta(hours=2))
quotes.index = quotes.Time
print(quotes)
# select desired range of dates
mask = (quotes.index > start_date) & (quotes.index <= end_date)
quotes = quotes.loc[mask]
print(quotes)
fig, ax = plt.subplots()
ax.plot(quotes['Close'])
ax.set_title('Black Regions')
anormalies = pd.read_csv('static/anomalies/detected_black_regions/'+str(config.ANOMALY_PERCENTAGE) + '_' + str(config.NEAREST_NEIGHBOURS) + '_' + request.form["currency"] + '_' + request.form["year"]+'_all_anomalies.csv')
anormalies['Time'] = anormalies['DateHour'].apply(lambda x: pd.to_datetime(x))
anormalies.index = anormalies.Time
mask = (anormalies.index > start_date) & (anormalies.index <= end_date)
anormalies = anormalies.loc[mask]
#xdate = [datetime.datetime.fromtimestamp(i) for i in quotes['Time']]
for index, row in anormalies.iterrows():
a_index = row["Time"]
b_index = row["Time"]+datetime.timedelta(hours=1)
ax.axvspan(a_index, b_index, color='red', alpha=0.5)
plt.show()
#https://stackoverflow.com/questions/42373104/candlestick-ochl-graph
def plot_results_offline(year,month,currency):
from_month = month
to_month = month
print('anomalies are detecting...')
print('year: ' + str(year))
print('from_month: ' + str(from_month))
print('to_month: ' + str(to_month))
start_date = year + "-" + from_month + "-01"
#end_date = request.form["year"] + "-" + str(int(request.form["to_month"]) + 1) + "-01"
if (int(to_month) == 12):
end_date = str(int(year) + 1) + "-" + str(1) + "-01"
else:
end_date = year + "-" + str(int(to_month) + 1) + "-01"
quotes = pd.read_csv("D:/coursework/L4S2/GroupProject/repo/TeamFxPortal/static/data/"+currency+"/DAT_MT_"+currency+"_M1_" + str(year) + ".csv")
quotes['Time'] = quotes[['Date', 'Time']].apply(lambda x: ' '.join(x), axis=1)
quotes['Time'] = quotes['Time'].apply(lambda x: pd.to_datetime(x) - datetime.timedelta(hours=2))
quotes.index = quotes.Time
print(quotes)
# select desired range of dates
mask = (quotes.index > start_date) & (quotes.index <= end_date)
quotes = quotes.loc[mask]
print(quotes)
fig, ax = plt.subplots()
ax.plot(quotes['Close'])
ax.set_title('Black Regions')
anormalies = pd.read_csv(' D:/coursework/L4S2/GroupProject/repo/TeamFxPortal/static/anomalies/detected_black_regions/'+str(config.ANOMALY_PERCENTAGE) + '_' + str(config.NEAREST_NEIGHBOURS) + '_' + currency+ '_' + year+'_all_anomalies.csv')
anormalies['Time'] = anormalies['DateHour'].apply(lambda x: | pd.to_datetime(x) | pandas.to_datetime |
"""
Created on Wed Feb 27 15:12:14 2019
@author: cwhanse
"""
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from datetime import datetime
import pytz
import pytest
from solarforecastarbiter.validation import validator
import pvlib
from pvlib.location import Location
@pytest.fixture
def irradiance_QCRad():
output = pd.DataFrame(
columns=['ghi', 'dhi', 'dni', 'solar_zenith', 'dni_extra',
'ghi_limit_flag', 'dhi_limit_flag', 'dni_limit_flag',
'consistent_components', 'diffuse_ratio_limit'],
data=np.array([[-100, 100, 100, 30, 1370, 0, 1, 1, 0, 0],
[100, -100, 100, 30, 1370, 1, 0, 1, 0, 0],
[100, 100, -100, 30, 1370, 1, 1, 0, 0, 1],
[1000, 100, 900, 0, 1370, 1, 1, 1, 1, 1],
[1000, 200, 800, 15, 1370, 1, 1, 1, 1, 1],
[1000, 200, 800, 60, 1370, 0, 1, 1, 0, 1],
[1000, 300, 850, 80, 1370, 0, 0, 1, 0, 1],
[1000, 500, 800, 90, 1370, 0, 0, 1, 0, 1],
[500, 100, 1100, 0, 1370, 1, 1, 1, 0, 1],
[1000, 300, 1200, 0, 1370, 1, 1, 1, 0, 1],
[500, 600, 100, 60, 1370, 1, 1, 1, 0, 0],
[500, 600, 400, 80, 1370, 0, 0, 1, 0, 0],
[500, 500, 300, 80, 1370, 0, 0, 1, 1, 1],
[0, 0, 0, 93, 1370, 1, 1, 1, 0, 0]]))
dtypes = ['float64', 'float64', 'float64', 'float64', 'float64',
'bool', 'bool', 'bool', 'bool', 'bool']
for (col, typ) in zip(output.columns, dtypes):
output[col] = output[col].astype(typ)
return output
def test_check_ghi_limits_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
ghi_out_expected = expected['ghi_limit_flag']
ghi_out = validator.check_ghi_limits_QCRad(expected['ghi'],
expected['solar_zenith'],
expected['dni_extra'])
assert_series_equal(ghi_out, ghi_out_expected)
def test_check_dhi_limits_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
dhi_out_expected = expected['dhi_limit_flag']
dhi_out = validator.check_dhi_limits_QCRad(expected['dhi'],
expected['solar_zenith'],
expected['dni_extra'])
assert_series_equal(dhi_out, dhi_out_expected)
def test_check_dni_limits_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
dni_out_expected = expected['dni_limit_flag']
dni_out = validator.check_dni_limits_QCRad(expected['dni'],
expected['solar_zenith'],
expected['dni_extra'])
assert_series_equal(dni_out, dni_out_expected)
def test_check_irradiance_limits_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
ghi_out_expected = expected['ghi_limit_flag']
ghi_out, dhi_out, dni_out = validator.check_irradiance_limits_QCRad(
expected['solar_zenith'], expected['dni_extra'], ghi=expected['ghi'])
assert_series_equal(ghi_out, ghi_out_expected)
assert dhi_out is None
assert dni_out is None
dhi_out_expected = expected['dhi_limit_flag']
ghi_out, dhi_out, dni_out = validator.check_irradiance_limits_QCRad(
expected['solar_zenith'], expected['dni_extra'], ghi=expected['ghi'],
dhi=expected['dhi'])
assert_series_equal(dhi_out, dhi_out_expected)
dni_out_expected = expected['dni_limit_flag']
ghi_out, dhi_out, dni_out = validator.check_irradiance_limits_QCRad(
expected['solar_zenith'], expected['dni_extra'],
dni=expected['dni'])
assert_series_equal(dni_out, dni_out_expected)
def test_check_irradiance_consistency_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
cons_comp, diffuse = validator.check_irradiance_consistency_QCRad(
expected['ghi'], expected['solar_zenith'], expected['dni_extra'],
expected['dhi'], expected['dni'])
assert_series_equal(cons_comp, expected['consistent_components'])
assert_series_equal(diffuse, expected['diffuse_ratio_limit'])
@pytest.fixture
def weather():
output = pd.DataFrame(columns=['air_temperature', 'wind_speed',
'relative_humidity',
'extreme_temp_flag', 'extreme_wind_flag',
'extreme_rh_flag'],
data=np.array([[-40, -5, -5, 0, 0, 0],
[10, 10, 50, 1, 1, 1],
[140, 55, 105, 0, 0, 0]]))
dtypes = ['float64', 'float64', 'float64', 'bool', 'bool', 'bool']
for (col, typ) in zip(output.columns, dtypes):
output[col] = output[col].astype(typ)
return output
def test_check_temperature_limits(weather):
expected = weather
result_expected = expected['extreme_temp_flag']
result = validator.check_temperature_limits(expected['air_temperature'])
assert_series_equal(result, result_expected)
def test_check_wind_limits(weather):
expected = weather
result_expected = expected['extreme_wind_flag']
result = validator.check_wind_limits(expected['wind_speed'])
assert_series_equal(result, result_expected)
def test_check_rh_limits(weather):
expected = weather
data = expected['relative_humidity']
result_expected = expected['extreme_rh_flag']
result = validator.check_rh_limits(data)
result.name = 'extreme_rh_flag'
assert_series_equal(result, result_expected)
def test_check_ac_power_limits():
index = pd.date_range(
start='20200401 0700', freq='2h', periods=6, tz='UTC')
power = pd.Series([0, -0.1, 0.1, 1, 1.1, -0.1], index=index)
day_night = pd.Series([0, 0, 0, 1, 1, 1], index=index, dtype='bool')
capacity = 1.
expected = pd.Series([1, 0, 0, 1, 0, 0], index=index).astype(bool)
out = validator.check_ac_power_limits(power, day_night, capacity)
assert_series_equal(out, expected)
def test_check_dc_power_limits():
index = pd.date_range(
start='20200401 0700', freq='2h', periods=6, tz='UTC')
power = pd.Series([0, -0.1, 0.1, 1, 1.3, -0.1], index=index)
day_night = pd.Series([0, 0, 0, 1, 1, 1], index=index, dtype='bool')
capacity = 1.
expected = pd.Series([1, 0, 0, 1, 0, 0], index=index).astype(bool)
out = validator.check_dc_power_limits(power, day_night, capacity)
assert_series_equal(out, expected)
def test_check_limits():
# testing with input type Series
expected = pd.Series(data=[True, False])
data = pd.Series(data=[3, 2])
result = validator._check_limits(val=data, lb=2.5)
assert_series_equal(expected, result)
result = validator._check_limits(val=data, lb=3, lb_ge=True)
assert_series_equal(expected, result)
data = pd.Series(data=[3, 4])
result = validator._check_limits(val=data, ub=3.5)
assert_series_equal(expected, result)
result = validator._check_limits(val=data, ub=3, ub_le=True)
assert_series_equal(expected, result)
result = validator._check_limits(val=data, lb=3, ub=4, lb_ge=True,
ub_le=True)
assert all(result)
result = validator._check_limits(val=data, lb=3, ub=4)
assert not any(result)
with pytest.raises(ValueError):
validator._check_limits(val=data)
@pytest.fixture
def location():
return Location(latitude=35.05, longitude=-106.5, altitude=1619,
name="Albuquerque", tz="MST")
@pytest.fixture
def times():
MST = pytz.timezone('MST')
return pd.date_range(start=datetime(2018, 6, 15, 12, 0, 0, tzinfo=MST),
end=datetime(2018, 6, 15, 13, 0, 0, tzinfo=MST),
freq='10T')
def test_check_ghi_clearsky(mocker, location, times):
clearsky = location.get_clearsky(times)
# modify to create test conditions
ghi = clearsky['ghi'].copy()
ghi.iloc[0] *= 0.5
ghi.iloc[-1] *= 2.0
clear_times = np.tile(True, len(times))
clear_times[-1] = False
expected = pd.Series(index=times, data=clear_times)
result = validator.check_ghi_clearsky(ghi, clearsky['ghi'])
assert_series_equal(result, expected)
def test_check_poa_clearsky(mocker, times):
dt = pd.date_range(start=datetime(2019, 6, 15, 12, 0, 0),
freq='15T', periods=5)
poa_global = pd.Series(index=dt, data=[800, 1000, 1200, -200, np.nan])
poa_clearsky = pd.Series(index=dt, data=1000)
result = validator.check_poa_clearsky(poa_global, poa_clearsky)
expected = pd.Series(index=dt, data=[True, True, False, True, False])
assert_series_equal(result, expected)
result = validator.check_poa_clearsky(poa_global, poa_clearsky, kt_max=1.2)
expected = pd.Series(index=dt, data=[True, True, True, True, False])
assert_series_equal(result, expected)
def test_check_day_night():
MST = pytz.timezone('MST')
times = [datetime(2018, 6, 15, 12, 0, 0, tzinfo=MST),
datetime(2018, 6, 15, 22, 0, 0, tzinfo=MST)]
expected = pd.Series(data=[True, False], index=times)
solar_zenith = pd.Series(data=[11.8, 114.3], index=times)
result = validator.check_day_night(solar_zenith)
assert_series_equal(result, expected)
@pytest.mark.parametrize('closed,solar_zenith,expected', (
('left', [89]*11 + [80]*13,
pd.Series([False, True], index=pd.DatetimeIndex(
['20200917 0000', '20200917 0100'], freq='1h'))),
('right', [89]*11 + [80]*13,
pd.Series([False, True], index=pd.DatetimeIndex(
['20200917 0100', '20200917 0200'], freq='1h'))),
('left', [89]*10 + [80]*14,
pd.Series([True, True], index=pd.DatetimeIndex(
['20200917 0000', '20200917 0100'], freq='1h'))),
('right', [89]*10 + [80]*14,
pd.Series([True, True], index=pd.DatetimeIndex(
['20200917 0100', '20200917 0200'], freq='1h')))
))
def test_check_day_night_interval(closed, solar_zenith, expected):
interval_length = pd.Timedelta('1h')
index = pd.date_range(
start='20200917 0000', end='20200917 0200', closed=closed,
freq='5min')
solar_zenith = pd.Series(solar_zenith, index=index)
result = validator.check_day_night_interval(
solar_zenith, closed, interval_length
)
assert_series_equal(result, expected)
def test_check_day_night_interval_no_infer():
interval_length = pd.Timedelta('1h')
closed = 'left'
index = pd.DatetimeIndex(
['20200917 0100', '20200917 0200', '20200917 0400'])
solar_zenith = pd.Series([0]*3, index=index)
with pytest.raises(ValueError, match='contains gaps'):
validator.check_day_night_interval(
solar_zenith, closed, interval_length
)
def test_check_day_night_interval_irregular():
interval_length = pd.Timedelta('1h')
solar_zenith_interval_length = pd.Timedelta('5min')
closed = 'left'
index1 = pd.date_range(
start='20200917 0000', end='20200917 0100', closed=closed,
freq=solar_zenith_interval_length)
index2 = pd.date_range(
start='20200917 0200', end='20200917 0300', closed=closed,
freq=solar_zenith_interval_length)
index = index1.union(index2)
solar_zenith = pd.Series([89]*11 + [80]*13, index=index)
result = validator.check_day_night_interval(
solar_zenith, closed, interval_length,
solar_zenith_interval_length=solar_zenith_interval_length
)
expected = pd.Series([False, False, True], index=pd.DatetimeIndex(
['20200917 0000', '20200917 0100', '20200917 0200'], freq='1h'))
assert_series_equal(result, expected)
def test_check_timestamp_spacing(times):
assert_series_equal(
validator.check_timestamp_spacing(times, times.freq),
pd.Series(True, index=times))
assert_series_equal(
validator.check_timestamp_spacing(times[[0]], times.freq),
pd.Series(True, index=[times[0]]))
assert_series_equal(
validator.check_timestamp_spacing(times[[0, 2, 3]], times.freq),
pd.Series([True, False, True], index=times[[0, 2, 3]]))
assert_series_equal(
validator.check_timestamp_spacing(times, '30min'),
pd.Series([True] + [False] * (len(times) - 1), index=times))
def test_detect_stale_values():
data = [1.0, 1.001, 1.001, 1.001, 1.001, 1.001001, 1.001, 1.001, 1.2, 1.3]
x = pd.Series(data=data)
res1 = validator.detect_stale_values(x, window=3)
res2 = validator.detect_stale_values(x, rtol=1e-8, window=2)
res3 = validator.detect_stale_values(x, window=7)
res4 = validator.detect_stale_values(x, window=8)
res5 = validator.detect_stale_values(x, rtol=1e-8, window=4)
res6 = validator.detect_stale_values(x[1:], window=3)
res7 = validator.detect_stale_values(x[1:8], window=3)
assert_series_equal(res1, pd.Series([False, False, False, True, True, True,
True, True, False, False]))
assert_series_equal(res2, pd.Series([False, False, True, True, True, False,
False, True, False, False]))
assert_series_equal(res3, pd.Series([False, False, False, False, False,
False, False, True, False, False]))
assert not all(res4)
assert_series_equal(res5, pd.Series([False, False, False, False, True,
False, False, False, False, False]))
assert_series_equal(res6, pd.Series(index=x[1:].index,
data=[False, False, True, True, True,
True, True, False, False]))
assert_series_equal(res7, pd.Series(index=x[1:8].index,
data=[False, False, True, True, True,
True, True]))
data = [0.0, 0.0, 0.0, -0.0, 0.00001, 0.000010001, -0.00000001]
y = pd.Series(data=data)
res = validator.detect_stale_values(y, window=3)
assert_series_equal(res, pd.Series([False, False, True, True, False, False,
False]))
res = validator.detect_stale_values(y, window=3, atol=1e-3)
assert_series_equal(res, pd.Series([False, False, True, True, True, True,
True]))
res = validator.detect_stale_values(y, window=3, atol=1e-5)
assert_series_equal(res, pd.Series([False, False, True, True, True, False,
False]))
res = validator.detect_stale_values(y, window=3, atol=2e-5)
assert_series_equal(res, pd.Series([False, False, True, True, True, True,
True]))
with pytest.raises(ValueError):
validator.detect_stale_values(x, window=1)
def test_detect_interpolation():
data = [1.0, 1.001, 1.002001, 1.003, 1.004, 1.001001, 1.001001, 1.001001,
1.2, 1.3, 1.5, 1.4, 1.5, 1.6, 1.7, 1.8, 2.0]
x = pd.Series(data=data)
res1 = validator.detect_interpolation(x, window=3)
assert_series_equal(res1, pd.Series([False, False, False, False, False,
False, False, True, False, False,
False, False, False, True, True, True,
False]))
res2 = validator.detect_interpolation(x, window=3, rtol=1e-2)
assert_series_equal(res2, pd.Series([False, False, True, True, True,
False, False, True, False, False,
False, False, False, True, True, True,
False]))
res3 = validator.detect_interpolation(x, window=5)
assert_series_equal(res3, pd.Series([False, False, False, False, False,
False, False, False, False, False,
False, False, False, False, False,
True, False]))
res4 = validator.detect_interpolation(x, window=3, atol=1e-2)
assert_series_equal(res4, pd.Series([False, False, True, True, True,
True, True, True, False, False,
False, False, False, True, True, True,
False]))
data = [0.0, 0.0, 0.0, -0.0, 0.00001, 0.000010001, -0.00000001]
y = pd.Series(data=data)
res = validator.detect_interpolation(y, window=3, atol=1e-5)
assert_series_equal(res, pd.Series([False, False, True, True, True, True,
False]))
res = validator.detect_stale_values(y, window=3, atol=1e-4)
assert_series_equal(res, pd.Series([False, False, True, True, True, True,
True]))
with pytest.raises(ValueError):
validator.detect_interpolation(x, window=2)
@pytest.fixture
def ghi_clearsky():
MST = pytz.timezone('Etc/GMT+7')
dt = pd.date_range(start=datetime(2019, 4, 3, 5, 0, 0, tzinfo=MST),
periods=60, freq='15T')
loc = pvlib.location.Location(latitude=35, longitude=-110, tz=MST)
cs = loc.get_clearsky(dt)
return cs['ghi']
@pytest.fixture
def ghi_clipped(ghi_clearsky):
ghi_clipped = ghi_clearsky.copy()
ghi_clipped = np.minimum(ghi_clearsky, 800)
ghi_clipped.iloc[12:17] = np.minimum(ghi_clearsky.iloc[12:17], 300)
ghi_clipped.iloc[18:20] = np.minimum(ghi_clearsky.iloc[18:20], 300)
ghi_clipped.iloc[26:28] *= 0.5
ghi_clipped.iloc[36:] = np.minimum(ghi_clearsky.iloc[36:], 400)
return ghi_clipped
def test_detect_clipping(ghi_clipped):
placeholder = pd.Series(index=ghi_clipped.index, data=False)
expected = placeholder.copy()
# for window=4 and fraction_in_window=0.75
expected.iloc[3:6] = True
expected.iloc[14:17] = True
expected.iloc[18:20] = True
expected.iloc[25] = True
expected.iloc[30:36] = True
expected.iloc[38:46] = True
expected.iloc[56:60] = True
flags = validator.detect_clipping(ghi_clipped, window=4,
fraction_in_window=0.75, rtol=5e-3,
levels=4)
assert_series_equal(flags, expected)
def test_detect_clipping_some_nans(ghi_clipped):
placeholder = pd.Series(index=ghi_clipped.index, data=False)
expected = placeholder.copy()
# for window=4 and fraction_in_window=0.75
expected.iloc[3:6] = True
expected.iloc[14:17] = True
expected.iloc[18:20] = True
expected.iloc[25] = True
expected.iloc[30:36] = True
expected.iloc[38:46] = True
expected.iloc[56:60] = True
inp = ghi_clipped.copy()
inp.iloc[48] = np.nan
flags = validator.detect_clipping(inp, window=4,
fraction_in_window=0.75, rtol=5e-3,
levels=4)
assert_series_equal(flags, expected)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_detect_clearsky_ghi(ghi_clearsky):
flags = validator.detect_clearsky_ghi(ghi_clearsky, ghi_clearsky)
# first 7 and last 6 values are judged not clear due to night (ghi=0)
# and rapid change in ghi with sunrise and sunset
assert all(flags[7:-6])
assert not flags[0:7].any() and not flags[-6:].any()
ghi_cloud = ghi_clearsky.copy()
ghi_cloud[12:15] *= 0.5
flags = validator.detect_clearsky_ghi(ghi_cloud, ghi_clearsky)
assert all(flags[7:12]) and all(flags[15:-6])
def test_detect_clearsky_ghi_warn_interval_length(ghi_clearsky):
with pytest.warns(RuntimeWarning):
flags = validator.detect_clearsky_ghi(ghi_clearsky[::4],
ghi_clearsky[::4])
assert (flags == 0).all()
def test_detect_clearsky_ghi_warn_regular_interval(ghi_clearsky):
with pytest.warns(RuntimeWarning):
ser = ghi_clearsky[:-2].append(ghi_clearsky[-1:])
flags = validator.detect_clearsky_ghi(ser, ser)
assert (flags == 0).all()
def test_detect_clearsky_ghi_one_val(ghi_clearsky):
ser = ghi_clearsky[:1]
assert len(ser) == 1
with pytest.warns(RuntimeWarning):
flags = validator.detect_clearsky_ghi(ser, ser)
assert (flags == 0).all()
@pytest.mark.parametrize('inp,expected', [
(pd.Timedelta('15min'), 6),
( | pd.Timedelta('1h') | pandas.Timedelta |
# -*- coding: utf-8 -*-
import sys
import requests
from bs4 import BeautifulSoup,BeautifulStoneSoup
from sqlalchemy import *
from flask import g
import pandas as pd
import numpy as np
from flask import current_app as app
from webapp.services import db,db_service as dbs
from webapp.models import Stock,Comment,FinanceBasic
import json,random,time
import http.cookiejar
from pandas.tseries.offsets import *
from datetime import datetime
import urllib2,re,html5lib
group_stockholder_rate = None
def getLatestStockHolder():
global group_stockholder_rate
if group_stockholder_rate is None:
hdf = pd.read_sql_query("select code,report_date,holder_type,holder_name,rate\
from stock_holder order by report_date desc", db.engine)
group_stockholder_rate = hdf.groupby(['code']).head(10)
#group_stockholder_rate = gdf[gdf['holder_type'] != '自然人股']
return group_stockholder_rate
def getRefreshStocks():
start_date = datetime.now().strftime('%Y-%m-%d')
#获得所有股票代码列表
stocks = db.session.query(Stock).\
filter(or_(Stock.holder_updated_time == None,Stock.holder_updated_time < start_date)).\
filter_by(flag=0).all()
return map(lambda x:x.code, stocks)
def refreshStockHolderSum(gdf,code):
#gdf = getLatestStockHolder()
agdf = gdf[gdf['holder_type'] != '自然人股']
a1gdf = agdf.groupby(['report_date'])
t1_gdf = a1gdf['rate'].agg({'size': np.size})
t1_gdf = t1_gdf.reset_index()
t2_gdf = a1gdf['rate'].agg({'sum': np.sum})
t2_gdf = t2_gdf.reset_index()
t3_gdf = pd.merge(t1_gdf, t2_gdf, on='report_date')
t3_gdf = t3_gdf.sort_values(by='report_date', ascending=False).head(1)
t3_gdf['code'] = code
bdf = pd.read_sql_query("select * from stock_basic where code =%(code)s and flag=0", db.engine, params={'code': code})
t3_df = pd.merge(t3_gdf, bdf, on='code')
m2_df = pd.DataFrame({
'code': t3_df.code,
'name': t3_df['name'],
'report_date': t3_df.report_date,
'count': t3_df['size'],
'sum': t3_df['sum']
})
if not m2_df.empty:
for row_index, row in m2_df.iterrows():
sql = text("delete from stock_holder_sum where code =:code")
result = db.session.execute(sql,{'code': row.code})
m2_df.to_sql('stock_holder_sum', db.engine, if_exists='append', index=False, chunksize=1000)
global group_stockholder_rate
group_stockholder_rate = None
#获得机构持股比例
def getGroupStockHolderRate():
df = pd.read_sql_query(
"select code,count,sum,report_date from stock_holder_sum",
db.engine)
return df
def queryHolderName(hcode):
sql = "select holder_name from stock_holder where holder_code=:code limit 1";
resultProxy = db.session.execute(text(sql), {'code': hcode})
return resultProxy.scalar()
#获得自然人持股排行榜
def getStockHolderRank():
gdf = getLatestStockHolder()
agdf = gdf[gdf['holder_type'] == '自然人股']
a1gdf = agdf.groupby(['code'])
t2_gdf = a1gdf['rate'].agg({'sum': np.sum}) #比例
t3_gdf = a1gdf['rate'].agg({'size': np.size}) #个数
t4_df = | pd.concat([t2_gdf, t3_gdf], axis=1, join='inner') | pandas.concat |
import pandas as pd
def import_sensor_file_mhealth(filepath, verbose=False):
df = pd.read_csv(filepath,
dtype=str,
error_bad_lines=False,
warn_bad_lines=False,
skip_blank_lines=True,
low_memory=True,
comment='#')
df.iloc[:,0] = | pd.to_datetime(df.iloc[:,0], infer_datetime_format=True, errors='coerce', format='%Y-%m-%d %H:%M:%S.%f', exact=True) | pandas.to_datetime |
import pandas as pd
import numpy as np
from nastran.post.f06.common import extract_tabulated_data, parse_text_value, find_tabular_line_range, parse_label_subcase, F06Page
REALIGVAL_SUBCASE_LINE = 2
REALIGVAL_TABULAR_LINE = 7
REALEIGVAL_CHECK_AUGMENTATION_LINE = 5
REALEIGVAL_CHECK_AUGMENTATION_STR = 'AUGMENTATION OF RESIDUAL VECTORS'
REALIGVAL_KEYS = {
'MODE': 'Mode No.',
'EXTRACTIONORDER': 'Mode Extraction Order',
'EIGENVALUE': 'Real Eigenvalue',
'RADIANS': 'Frequency in Radians',
'CYCLES': 'Frequency in Hz',
'GENERALIZEDMASS': 'Generalized Mass',
'GENERALIZEDSTIFF': 'Generalized Stiffness',
}
class RealEigValF06Page(F06Page):
def __init__(self, df=None, info=None, raw_lines=None, meta=None):
super().__init__(raw_lines, meta)
self.df = df
self.info = {} if info == None else info
def __repr__(self):
return self.__str__()
def __str__(self):
return 'REAL EIGVAL F06\tSUBCASE {}\tPAGE {}'.format(self.info['SUBCASE'], self.meta['page'])
def parse_realeigval_page(lines):
a, b = find_tabular_line_range(lines, REALIGVAL_TABULAR_LINE)
if REALEIGVAL_CHECK_AUGMENTATION_STR in lines[REALEIGVAL_CHECK_AUGMENTATION_LINE]:
a += 1
parsed_data = extract_tabulated_data(lines[a:b])
df = pd.DataFrame(parsed_data, columns=list(REALIGVAL_KEYS.keys()))
info = {}
label, subcase = parse_label_subcase(lines[REALIGVAL_SUBCASE_LINE])
info['LABEL'] = label
info['SUBCASE'] = subcase
return RealEigValF06Page(df, info, lines)
def summarize_real_eigvals(results, key='CYCLES'):
vals = list(map(lambda p: (p.info['SUBCASE'], p.df[key]), results.eigval))
df = | pd.DataFrame(vals, columns=('SUBCASE', key)) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 11 13:20:11 2019
@author: strq
"""
import pytest
import viessmann_data_audit as vda
import numpy as np
import pandas as pd
from sklearn.datasets.samples_generator import make_blobs
from datetime import datetime
strEntityFile = 'tests/resources/entity.gz.pkl'
strEntityFastFile = 'tests/resources/entity_fast.gz.pkl'
def create_test_data():
# Unique Numbers
arrNumbers = np.arange(0,5)
# Numbers with Nan
arrNanNumbers = np.array([1, np.nan, 2, np.nan, 3])
# Non unique objects
arrObj = ["A", "A", "B", "C", "D"]
# Categorical
# Categorical with Nan
serCat = pd.Series(["a", "b", "c", "b", "a"])
serNanCatl = pd.Series(["b", "a", "c", "e", "f"])
cat_type = pd.api.types.CategoricalDtype(
categories=["a", "b", "c", "d"], ordered=True)
serCategorical = serCat.astype(cat_type)
serNanCategorical = serNanCatl.astype(cat_type)
serNoCat = pd.Series(["a", "b", "c", "b", "a"])
cat_no_order_type = pd.api.types.CategoricalDtype(
categories=["a", "b", "c", "d"], ordered=False)
serNoOrderCategorical = serNoCat.astype(cat_no_order_type)
# Outlier
arrOutlier = np.array([1,1,1,1,10])
dictionary = {"id": arrNumbers,
"nanNumbers": arrNanNumbers,
"nonUniqueObjects": arrObj,
"categorical": serCategorical,
"nanCategorical": serNanCategorical,
"noOrderCategorical": serNoOrderCategorical,
"sigOutlier": arrOutlier}
dfData = pd.DataFrame(dictionary)
dfData.insert(0,'TimeStamp',pd.datetime.now().replace(microsecond=0))
dfData.loc[0,"TimeStamp"] = pd.Timestamp('2018-12-01 08:00:00.000000', tz=None)
dfData.loc[1,"TimeStamp"] = pd.Timestamp('2018-12-01 08:00:01.000000', tz=None)
dfData.loc[2,"TimeStamp"] = pd.Timestamp('2019-01-31 08:00:00.000000', tz=None)
dfData.loc[3,"TimeStamp"] = pd.Timestamp('2019-01-31 08:01:00.000000', tz=None)
dfData.loc[4,"TimeStamp"] = | pd.Timestamp('2021-01-31 09:00:00.000000', tz=None) | pandas.Timestamp |
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
dff = pd.read_csv(filename, parse_dates=['Date'])
# no nans, days & months & years are in range,
# remove 99F (-72.77777777777777 C) - missing data
indx = dff[dff['Temp'] == -72.77777777777777].index
dff.drop(indx, inplace=True)
# add dayof year column.
dff['DayOfYear'] = dff.apply(lambda row: row.Date.timetuple().tm_yday, axis=1)
return dff
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
df = load_data(r'C:\Users\<NAME>\Documents\GitHub\IML.HUJI\datasets\City_Temperature.csv')
df['Year'] = df.loc[:, 'Year'].astype(str)
# Question 2 - Exploring data for specific country
il_data = df[df['Country'] == 'Israel']
fig2_1 = px.scatter(il_data, x='DayOfYear', y='Temp', color='Year', title='Temp as a function of DayOfYear')
fig2_1.write_html('israel_temp_year.html', auto_open=True)
sd_func = lambda v: np.sqrt(np.var(list(v)))
month_df = il_data.groupby('Month').agg({'Temp': sd_func})
fig2_2 = px.bar(month_df, height=300, labels={'x': 'Month', 'value': 'sd of daily Temp'},
title='bar plot of sd of Temps as a function of month')
fig2_2.write_html('bar_plot_months.html', auto_open=True)
# Question 3 - Exploring differences between countries
avg_func = lambda v: np.mean(v)
month_country_df = df.groupby(['Country', 'Month']).Temp.agg([sd_func, avg_func]).rename(
columns={'<lambda_0>': 'sd', '<lambda_1>': 'mean'}).reset_index()
fig3 = px.line(month_country_df, x='Month', y='mean', color='Country', title='mean temp as a function month',
error_y='sd')
fig3.write_html('plot_months_country.html', auto_open=True)
# Question 4 - Fitting model for different values of `k`
temp_label = il_data['Temp']
temp_feature = il_data['DayOfYear']
train_x, train_y, test_x, test_y = split_train_test(temp_feature, temp_label, 0.75)
res = [[], []]
for i in range(1, 10):
pol = PolynomialFitting(i)
pol.fit(train_x.to_numpy(), train_y.to_numpy())
res[0].append(i)
res[1].append(round(pol.loss(test_x.to_numpy(), test_y.to_numpy()), 2))
k_mse_df = pd.DataFrame({'k': res[0], 'mse': res[1]})
print(k_mse_df)
fig4_1 = px.bar(k_mse_df, x='k', y='mse', height=300, labels={'x': 'k', 'value': 'mse'},
title='bar plot of mse as a function of k')
fig4_1.write_html('k_mse.html', auto_open=True)
# Question 5 - Evaluating fitted model on different countries
pol = PolynomialFitting(4)
pol.fit(il_data['DayOfYear'].to_numpy(), il_data['Temp'].to_numpy())
indx = df[df['Country'] == 'Israel'].index
no_il = df.drop(indx, inplace=False)
res = [[], []]
for i in no_il.Country.unique():
indx = no_il[df['Country'] != i].index
country_df = no_il.drop(indx, inplace=False)
res[0].append(i)
res[1].append(pol.loss(country_df['DayOfYear'].to_numpy(), country_df['Temp'].to_numpy()))
res = | pd.DataFrame({'Country': res[0], 'mse': res[1]}) | pandas.DataFrame |
import tweepy
# # For tweepy documentation go to https://www.tweepy.org/
import pandas as pd
# # For pandas documentation go to https://pandas.pydata.org/docs/
from datetime import datetime, timezone
import time
from collections import Counter
# # Authentification
client = tweepy.Client(bearer_token="<KEY>",
consumer_key="BzjYwEDLCdGI0D45tQiemLPp1",
consumer_secret="<KEY>",
access_token="<KEY>",
access_token_secret="<KEY>",
wait_on_rate_limit=True)
auth = tweepy.OAuthHandler("BzjYwEDLCdGI0D45tQiemLPp1", "<KEY>")
auth.set_access_token("<KEY>", "<KEY>")
api = tweepy.API(auth, wait_on_rate_limit=True)
tweet_id_list = []
tweets_list = []
retweeters_list = []
tell_tweet_dict = {'tweet_id': [], 'amount_retweets': [], 'retweeters': []}
look_up_list = []
tweet_obj_list = []
def scrape_tweets(user_id):
for tweet in tweepy.Paginator(client.get_users_tweets, id=user_id, start_time='2021-08-26T00:00:01Z', end_time='2021-09-26T23:59:59Z',
max_results=100, exclude="retweets").flatten(250):
tweet_id_list.append(tweet.id)
tweet_obj_list.append(tweet)
print(len(tweet_id_list))
return tweet_id_list
def get_retweeters(tweet_id):
tell_tweet_dict['tweet_id'].append(tweet_id)
tell_retweeters_list = []
for retweeter in tweepy.Paginator(client.get_retweeters, id=tweet_id).flatten(1000):
tell_retweeters_list.append(retweeter.id)
tell_amount_retweets = len(tell_retweeters_list)
if retweeter.id not in retweeters_list:
retweeters_list.append(retweeter.id)
tell_tweet_dict['amount_retweets'].append(tell_amount_retweets)
tell_tweet_dict['retweeters'].append(tell_retweeters_list)
return tell_tweet_dict, retweeters_list
def clean_alt_list(list_):
list_ = list_.replace(', ', '","')
list_ = list_.replace('[', '["')
list_ = list_.replace(']', '"]')
return list_
def to_1d(series):
return pd.Series([x for _list in series for x in _list])
def user_lookup(retweeter_id):
user = api.get_user(user_id=retweeter_id, include_entities=True)
return user
def lookup_user_list(user_id_list, api):
full_users = []
users_count = len(user_id_list)
try:
for i in range(int((users_count / 100) + 1)):
full_users.extend(api.lookup_users(user_id=user_id_list[i*100:min((i+1)*100, users_count)]))
# print 'getting users batch:', i
except tweepy.TweepyException as e:
print('Something went wrong, quitting...', e)
time.sleep(15 * 60)
return full_users
def add_to_lookup(lookup_item):
if lookup_item not in look_up_list:
look_up_list.append(lookup_item)
def tweets_per_day(user_id):
now = datetime.now(timezone.utc)
created_date = info.created_at
delta = now - created_date
# print(delta.days)
tweet_count = info.statuses_count
# print(tweet_count)
# print("Tweets per day: ")
tweets_per_day = round((tweet_count / delta.days), 2)
# print(tweets_per_day)
return tweets_per_day
def years_since_created_at(user_id):
now = datetime.now(timezone.utc)
created_date = info.created_at
delta = now - created_date
years = round((delta.days/365), 2)
return years
# # Defining User with ID
user_id = 844081278
# # Scrape Tweets of defined User
print(scrape_tweets(user_id))
# # Get Retweeters and save to csv
for tweet_id in tweet_id_list:
get_retweeters(tweet_id)
df_checklist = pd.DataFrame(retweeters_list)
df_checklist.to_csv('retweeters_list.csv', index=False, header=True)
print(tweet_obj_list)
df_tell_tweet = pd.DataFrame(tell_tweet_dict)
print(df_tell_tweet)
df_tell_tweet.to_csv('tell_tweet.csv', index=False, header=True)
print("retweeters scraped and dataframe saved")
# # Get Content of Tweets
data_list = []
for info in tweet_obj_list:
tweet_dict = {
'data_id': str(info.id),
'date_time_posted': str(info.created_at),
'tweet': info.text,
}
tweets_list.append(tweet_dict)
print(tweets_list)
df_tell_tweet = pd.DataFrame(tweets_list)
print(df_tell_tweet)
df_tell_tweet.to_csv('tweets.csv')
# # Possibility to implement a sample, not used
# # Sample retweeters and add to lookup list, write to csv
# sample_retweeters = random.sample(retweeters_list, 5)
# for sample_item in retweeters_list:
# add_to_lookup(sample_item)
# print("Retweeters sampled")
# # Load CSV File
df = pd.read_csv('tell_tweet.csv')
# # Count Retweets by User_id
df["retweeters"] = df["retweeters"].apply(clean_alt_list)
df["retweeters"] = df["retweeters"].apply(eval)
retweeters_counted = to_1d(df["retweeters"]).value_counts()
retweeters_counted = retweeters_counted.to_frame().reset_index()
retweeters_counted = retweeters_counted.rename(columns={0: 'retweets'})
retweeters_counted = retweeters_counted.rename(columns={'index': 'user_id'})
# # Look up Retweeters Info and add to list
retweeters_info_list = []
for retweeter in retweeters_counted.user_id:
print(retweeter)
try:
info = user_lookup(retweeter)
user_info_dict = {
'user_id': info.id_str,
'screen_name': info.screen_name,
'verified': info.verified,
'followers_count': info.followers_count,
'friends_count': info.friends_count,
'tweets': info.statuses_count,
'age_in_years': years_since_created_at(info),
'tweets_per_day': tweets_per_day(info)
}
except tweepy.TweepyException:
user_info_dict = {
'user_id': retweeter,
'screen_name': "private user",
'verified': "private user",
'followers_count': "private user",
'friends_count': "private user",
'tweets': "private user",
'age_in_years': "private user",
'tweets_per_day': "private user"
}
retweeters_info_list.append(user_info_dict)
df_retweeter_info = pd.DataFrame(retweeters_info_list)
print(df_retweeter_info)
# # Combine both dataframes and sage to csv
df_combined = pd.merge(retweeters_counted, df_retweeter_info, on='user_id')
print(df_combined)
df_combined.to_csv("retweets_afd.csv")
# # Scrape retweeters friends, add to dict_ids, write to retweetersfriendscount.csv
dict_ids = {'id': [], 'amount': []}
print("Start get_friends_ids")
retweeter_friends_object_list = []
retweeter_friends = []
retweeter_friends_objects = {'id': [], 'username': [], 'verified': [], 'followers_count': []}
for retweeter_id in retweeters_list:
x = retweeter_id
for rf in tweepy.Paginator(client.get_users_following, id=x, user_fields=['username','verified','public_metrics'], max_results=1000).flatten(limit=178000):
# print(rf.data['id'])
retweeter_friends.append(rf.data['id'])
if rf.data['id'] not in retweeter_friends_objects['id']:
try:
retweeter_friends_objects['id'].append(rf.data['id'])
retweeter_friends_objects['username'].append(rf.data['username'])
retweeter_friends_objects['verified'].append(rf.data['verified'])
retweeter_friends_objects['followers_count'].append(rf.public_metrics['followers_count'])
except tweepy.TweepyException:
retweeter_friends_objects['id'].append(rf.data['id'])
retweeter_friends_objects['username'].append("private user")
retweeter_friends_objects['verified'].append("private user")
retweeter_friends_objects['followers_count'].append("private user")
print("get one user: ", (x), "current length retweeter_friends: ", (len(retweeter_friends)))
df_friend_info = pd.DataFrame(retweeter_friends_objects)
amount_retweeter_friends = len(retweeter_friends)
print("amount_retweeter_friends =")
print(amount_retweeter_friends)
u = Counter(retweeter_friends)
for u, count in u.items():
dict_ids['id'].append(u)
dict_ids['amount'].append(count)
print("get_friends_ids complete")
# # change dictionary to DataFrame and print
df_dict_ids = pd.DataFrame.from_dict(dict_ids)
print("print(df_dict_ids)")
print(df_dict_ids)
df_dict_ids.to_csv('user_dict_info.csv', index=False, header=True)
# # choose how many counts at least as relevant
# relevant_friend = df_dict_ids.loc[df_dict_ids['amount'] > 0] # change number if necessary
# print("relevant items:")
# print(relevant_friend)
# relevant_for_lookup_list = relevant_friend['id']
# for relevant_item in relevant_for_lookup_list:
# add_to_lookup(relevant_item)
# print(look_up_list)
# # combining userinfo and relevant users
combined = | pd.merge(df_dict_ids, df_friend_info, on='id') | pandas.merge |
"""
This module contains basic measurement classes for data acquired with the ROACH.
"""
from __future__ import division
import time
from collections import OrderedDict
import logging
import numpy as np
import pandas as pd
from matplotlib.pyplot import mlab
from scipy import signal
from memoized_property import memoized_property
from kid_readout.measurement import core
from kid_readout.analysis.resonator import lmfit_resonator
from kid_readout.analysis.timeseries import binning, despike, iqnoise, periodic
from kid_readout.roach import calculate
logger = logging.getLogger(__name__)
class RoachMeasurement(core.Measurement):
"""
An abstract base class for measurements taken with the ROACH.
"""
_version = 0
def start_epoch(self):
"""
Return self.epoch, if it exists, and if not return the earliest epoch of any RoachMeasurement that this
measurement contains. Measurements that are not RoachMeasurements are ignored.
Returns
-------
float
The epoch of this Measurement or the earliest epoch of its contents; np.nan if neither is found.
"""
if hasattr(self, 'epoch'):
return self.epoch
else:
possible_epochs = []
public_nodes = [(k, v) for k, v in self.__dict__.items()
if not k.startswith('_') and isinstance(v, core.Node)]
for name, node in public_nodes:
if isinstance(node, RoachMeasurement):
possible_epochs.append(node.start_epoch())
elif isinstance(node, core.MeasurementList):
possible_epochs.append(np.min([m.start_epoch() for m in node if isinstance(m, RoachMeasurement)]))
if possible_epochs:
return np.min(possible_epochs)
else:
return np.nan
def _delete_memoized_property_caches(self):
for attr in dir(self):
if hasattr(self, '_' + attr) and attr not in core.PRIVATE:
delattr(self, '_' + attr)
if self._parent is not None:
self._parent._delete_memoized_property_caches()
@property
def cryostat(self):
try:
return self._io.metadata.cryostat
except (AttributeError, KeyError) as e:
return None
class RoachStream(RoachMeasurement):
_version = 1
def __init__(self, tone_bin, tone_amplitude, tone_phase, tone_index, filterbank_bin, epoch, sequence_start_number,
s21_raw, data_demodulated, roach_state, state=None, description='', validate=True):
"""
Return a new RoachStream instance. This class has no dimensions and is intended to be subclassed.
Parameters
----------
tone_bin : numpy.ndarray(int)
An array of integers representing the frequencies of the tones played during the measurement.
tone_amplitude : numpy.ndarray(float)
An array of floats representing the amplitudes of the tones played during the measurement.
tone_phase : numpy.ndarray(float)
An array of floats representing the radian phases of the tones played during the measurement.
tone_index : int or numpy.ndarray(int)
tone_bin[tone_index] corresponds to the frequency used to produce s21_raw.
filterbank_bin : int or numpy.ndarray(int)
The filter bank bin(s) containing the tone(s).
epoch : float
The unix timestamp of the first sample of the time-ordered data.
sequence_start_number : int
The ROACH sequence number for the first sample of the time-ordered data.
s21_raw : numpy.ndarray(complex)
The data, demodulated or not.
data_demodulated : bool
True if the data is demodulated.
roach_state : dict
State information for the roach; the result of roach.state.
state : dict
All non-roach state information.
description : str
A human-readable description of this measurement.
validate : bool
If True, check that the array shapes match the dimensions OrderedDict.
"""
self.tone_bin = tone_bin
self.tone_amplitude = tone_amplitude
self.tone_phase = tone_phase
self.tone_index = tone_index
self.filterbank_bin = filterbank_bin
self.epoch = epoch
self.sequence_start_number = sequence_start_number
self.s21_raw = s21_raw
self.data_demodulated = data_demodulated
self.roach_state = core.StateDict(roach_state)
super(RoachStream, self).__init__(state=state, description=description, validate=validate)
@memoized_property
def sample_time(self):
"""numpy.ndarray(float): The time of each sample relative to the first sample in the stream."""
return (np.arange(self.s21_raw.shape[-1], dtype='float') /
self.stream_sample_rate)
@memoized_property
def frequency(self):
return calculate.frequency(self.roach_state, self.tone_bin[self.tone_index])
@property
def frequency_MHz(self):
return 1e-6 * self.frequency
@memoized_property
def baseband_frequency(self):
return calculate.baseband_frequency(self.roach_state, self.tone_bin[self.tone_index])
@property
def baseband_frequency_MHz(self):
return 1e-6 * self.baseband_frequency
@property
def stream_sample_rate(self):
return calculate.stream_sample_rate(self.roach_state)
@memoized_property
def s21_raw_mean(self):
"""
Return the mean of s21_raw for each channel. NaN samples are excluded from the calculation.
Raises
------
RuntimeWarning
If all of the samples for a channel are NaN; in this case the return value for that channel will be NaN.
Returns
-------
numpy.ndarray(complex)
The mean of s21_raw for each channel.
"""
return np.nanmean(self.s21_raw, axis=-1)
@memoized_property
def s21_raw_mean_error(self):
"""
Estimate the error in s21_raw_mean for each channel. NaN samples are excluded from the calculation.
The method assumes that the samples are independent, which is probably not true.
Raises
------
RuntimeWarning
If all of the samples for a channel are NaN; in this case the return value for that channel will be NaN.
Returns
-------
numpy.ndarray(complex)
An estimate of the complex standard error of the mean of s21_raw.
"""
# The float cast allows conversion to NaN.
num_good_samples = np.sum(~np.isnan(self.s21_raw), axis=-1).astype(np.float)
if isinstance(num_good_samples, np.ndarray):
# Avoid a ZeroDivisionError if some of the channels have no good samples.
num_good_samples[num_good_samples == 0] = np.nan
elif num_good_samples == 0: # num_good_samples is a scalar; avoid a ZeroDivisionError.
num_good_samples = np.nan
return ((np.nanstd(self.s21_raw.real, axis=-1) + 1j * np.nanstd(self.s21_raw.imag, axis=-1)) /
np.sqrt(num_good_samples))
@property
def s21_point(self):
"""
Return one s21 point per stream, calculated using the best available method.
The method used to calculate this point may change.
"""
return self.s21_raw_mean
@property
def s21_point_error(self):
"""
Return an estimate of the standard error of the mean for s21_point, calculated using the best available method.
The method used to calculate this error may change.
"""
return self.s21_raw_mean_error
def fold(self, array, period_samples=None, reduce=np.mean):
if period_samples is None:
period_samples = calculate.modulation_period_samples(self.roach_state)
return periodic.fold(array, period_samples, reduce=reduce)
def epochs(self, start=-np.inf, stop=np.inf):
"""
Return a StreamArray containing only the data between the given start and stop epochs.
The returned StreamArray has the same state.
The indexing follows the Python convention that the first value is inclusive and the second is exclusive:
start <= epoch < stop
Thus, the two slices stream_array.epochs(t0, t1) and stream_array.epochs(t1, t2) will contain all the data
occurring at or after t0 and before t2, with no duplication.
"""
start_index = np.searchsorted(self.epoch + self.sample_time, (start,), side='left')[0] # Need to be ints
stop_index = np.searchsorted(self.epoch + self.sample_time, (stop,), side='right')[0] # This index not included
return self.__class__(tone_bin=self.tone_bin, tone_amplitude=self.tone_amplitude,
tone_phase=self.tone_phase, tone_index=self.tone_index,
filterbank_bin=self.filterbank_bin, epoch=self.epoch + self.sample_time[start_index],
sequence_start_number=np.nan, # This may no longer be valid for the sliced data.
s21_raw=self.s21_raw[..., start_index:stop_index],
data_demodulated=self.data_demodulated, roach_state=self.roach_state,
state=self.state, description=self.description)
class RoachStream0(RoachMeasurement):
"""
This class is a factory for producing RoachStream version 1 instances from version 0 data.
Version 0 did not save the sequence_start_number integer.
"""
def __new__(cls, *args, **kwargs):
kwargs['sequence_start_number'] = None
return RoachStream(*args, **kwargs)
class StreamArray(RoachStream):
"""
This class represents simultaneously-sampled data from multiple channels.
"""
_version = 1
dimensions = OrderedDict([('tone_bin', ('tone_bin',)),
('tone_amplitude', ('tone_bin',)),
('tone_phase', ('tone_bin',)),
('tone_index', ('tone_index',)),
('filterbank_bin', ('tone_index',)),
('s21_raw', ('tone_index', 'sample_time'))])
def __init__(self, tone_bin, tone_amplitude, tone_phase, tone_index, filterbank_bin, epoch, sequence_start_number,
s21_raw, data_demodulated, roach_state, state=None, description='', validate=True):
"""
Return a new StreamArray instance. The integer array tone_index contains the indices of tone_bin,
tone_amplitude, and tone_phase for the tones demodulated to produce the time-ordered s21_raw data.
The tone_bin, tone_amplitude, tone_phase, tone_index, and filterbank_bin arrays are 1-D, while s21_raw is 2-D:
s21_raw.shape == (tone_index.size, sample_time.size)
Parameters
----------
tone_bin : numpy.ndarray(int)
An array of integers representing the frequencies of the tones played during the measurement.
tone_amplitude : numpy.ndarray(float)
An array of floats representing the amplitudes of the tones played during the measurement.
tone_phase : numpy.ndarray(float)
An array of floats representing the radian phases of the tones played during the measurement.
tone_index : numpy.ndarray(int)
tone_bin[tone_index] corresponds to the frequency used to produce s21_raw.
filterbank_bin : numpy.ndarray(int)
The filter bank bins in which the tones lie.
epoch : float
The unix timestamp of the first sample of the time-ordered data.
sequence_start_number : int
The ROACH sequence number for the first sample of the time-ordered data.
s21_raw : numpy.ndarray(complex)
The data, demodulated or not.
data_demodulated : bool
True if the data is demodulated.
roach_state : dict
State information for the roach; the result of roach.state.
state : dict
All non-roach state information.
description : str
A human-readable description of this measurement.
validate : bool
If True, check that the array shapes match the dimensions OrderedDict.
"""
super(StreamArray, self).__init__(tone_bin=tone_bin, tone_amplitude=tone_amplitude, tone_phase=tone_phase,
tone_index=tone_index, filterbank_bin=filterbank_bin, epoch=epoch,
sequence_start_number=sequence_start_number, s21_raw=s21_raw,
data_demodulated=data_demodulated, roach_state=roach_state, state=state,
description=description, validate=validate)
def __getitem__(self, number):
"""
See stream().
"""
number = int(number) # Avoid weird indexing bugs
ss = SingleStream(tone_bin=self.tone_bin, tone_amplitude=self.tone_amplitude, tone_phase=self.tone_phase,
tone_index=self.tone_index[number], filterbank_bin=self.filterbank_bin[number],
epoch=self.epoch, sequence_start_number=self.sequence_start_number,
s21_raw=self.s21_raw[number, :], data_demodulated=self.data_demodulated,
roach_state=self.roach_state, number=number, state=self.state,
description=self.description)
ss._io = self._io
ss._io_node_path = self._io_node_path
return ss
def stream(self, number):
"""
Return a SingleStream object containing the data from the channel corresponding to the given integer.
Parameters
----------
number : int
The index of the stream to use to create the new single-channel object.
Returns
-------
SingleStream
"""
return self[number]
def tone_offset_frequency(self, normalized_frequency=True):
offset = calculate.tone_offset_frequency(self.tone_bin,self.roach_state.num_tone_samples,self.filterbank_bin,
self.roach_state.num_filterbank_channels)
if not normalized_frequency:
offset = offset * self.stream_sample_rate
return offset
def to_dataframe(self, add_origin = False):
dataframes = []
for number in range(self.tone_bin.shape[0]):
dataframes.append(self.stream(number).to_dataframe(add_origin=add_origin))
return pd.concat(dataframes, ignore_index=True)
class StreamArray0(RoachStream0):
"""
This class is a factory for producing StreamArray version 1 instances from version 0 data.
Version 0 did not save the sequence_start_number integer.
"""
def __new__(cls, *args, **kwargs):
kwargs['sequence_start_number'] = None
return StreamArray(*args, **kwargs)
class SingleStream(RoachStream):
"""
This class contains time-ordered data from a single channel.
"""
_version = 1
dimensions = OrderedDict([('tone_bin', ('tone_bin',)),
('tone_amplitude', ('tone_bin',)),
('tone_phase', ('tone_bin',)),
('s21_raw', ('sample_time',))])
def __init__(self, tone_bin, tone_amplitude, tone_phase, tone_index, filterbank_bin, epoch, sequence_start_number,
s21_raw, data_demodulated, roach_state, number=None, state=None, description='', validate=True):
"""
Return a new SingleStream instance. The single integer tone_index is the common index of tone_bin,
tone_amplitude, and tone_phase for the tone used to produce the time-ordered s21_raw data.
The tone_bin, tone_amplitude, tone_phase, tone_index, filterbank_bin, and s21_raw arrays are all 1-D.
Parameters
----------
tone_bin : numpy.ndarray(int)
An array of integers representing the frequencies of the tones played during the measurement.
tone_amplitude : numpy.ndarray(float)
An array of floats representing the amplitudes of the tones played during the measurement.
tone_phase : numpy.ndarray(float)
An array of floats representing the radian phases of the tones played during the measurement.
tone_index : int
tone_bin[tone_index] corresponds to the frequency used to produce s21_raw.
filterbank_bin : int
An int that is the filter bank bin in which the tone lies.
epoch : float
The unix timestamp of the first sample of the time-ordered data.
sequence_start_number : int
The ROACH sequence number for the first sample of the time-ordered data.
s21_raw : numpy.ndarray(complex)
The data, demodulated or not.
data_demodulated : bool
True if the data is demodulated.
roach_state : dict
State information for the roach; the result of roach.state.
number : int or None
The number of this instance in some larger structure, such as a StreamArray.
state : dict
All non-roach state information.
description : str
A human-readable description of this measurement.
validate : bool
If True, check that the array shapes match the dimensions OrderedDict.
"""
self.number = number
super(SingleStream, self).__init__(tone_bin=tone_bin, tone_amplitude=tone_amplitude, tone_phase=tone_phase,
tone_index=tone_index, filterbank_bin=filterbank_bin, epoch=epoch,
sequence_start_number=sequence_start_number, s21_raw=s21_raw,
data_demodulated=data_demodulated, roach_state=roach_state, state=state,
description=description, validate=validate)
# ToDo: use tone_index?
def tone_offset_frequency(self, normalized_frequency=True):
offset = calculate.tone_offset_frequency(self.tone_bin,self.roach_state.num_tone_samples,self.filterbank_bin,
self.roach_state.num_filterbank_channels)
if not normalized_frequency:
offset = offset * self.stream_sample_rate
return offset
def to_dataframe(self, add_origin=True):
data = {'number': self.number, 'analysis_epoch': time.time(), 'start_epoch': self.start_epoch()}
try:
for key, value in self.roach_state.items():
data['roach_{}'.format(key)] = value
except KeyError:
pass
data.update(self.state.flatten(wrap_lists=True))
data['s21_point'] = [self.s21_point]
data['s21_point_error'] = [self.s21_point_error]
data['frequency'] = [self.frequency]
data['frequency_MHz'] = [self.frequency_MHz]
dataframe = pd.DataFrame(data, index=[0])
if add_origin:
self.add_origin(dataframe)
return dataframe
class SingleStream0(RoachStream0):
"""
This class is a factory for producing SingleStream version 1 instances from version 0 data.
Version 0 did not save the sequence_start_number integer.
"""
def __new__(cls, *args, **kwargs):
kwargs['sequence_start_number'] = None
return SingleStream(*args, **kwargs)
class SweepArray(RoachMeasurement):
"""
This class contains a list of StreamArrays.
The analysis methods and properties are designed for two modes of operation.
For simultaneous sweeps across individual resonators, use the sweep() method (equivalent to __getitem__ access)
to return SingleSweep instances containing the data from individual resonators.
For scans over many unknown resonators, use the properties to return values in ascending frequency order, and use
find_resonators() and resonator() to locate resonance frequencies and extract resonator objects.
"""
_version = 0
def __init__(self, stream_arrays, state=None, description=''):
"""
Parameters
----------
stream_arrays : iterable(StreamArray)
The streams that make up the sweep.
state : dict
All non-roach state information.
description : str
A human-readable description of this measurement.
"""
if not isinstance(stream_arrays, core.MeasurementList):
stream_arrays = core.MeasurementList(stream_arrays)
self.stream_arrays = stream_arrays
super(SweepArray, self).__init__(state=state, description=description)
def __getitem__(self, number):
"""
See sweep().
"""
number = int(number) # Avoid weird indexing bugs
ss = SingleSweep(streams=core.MeasurementList(sa.stream(number) for sa in self.stream_arrays),
number=number, state=self.state, description=self.description)
ss._io = self._io
ss._io_node_path = self._io_node_path
return ss
def sweep(self, number):
"""
Return a SingleSweep object containing the data from the channel corresponding to the given integer.
Parameters
----------
number : int
The index of the sweep to use to create the new single-channel object.
Returns
-------
SingleSweep
"""
return self[number]
@property
def num_channels(self):
try:
if np.any(np.diff([sa.tone_index.size for sa in self.stream_arrays])):
raise ValueError("Channel numbers differ between stream arrays.")
else:
return self.stream_arrays[0].tone_index.size
except IndexError:
return 0
@memoized_property
def ascending_order(self):
"""numpy.ndarray[int]: Re-arranges values for this SweepArray in ascending frequency order."""
return np.concatenate([sa.frequency for sa in self.stream_arrays]).argsort()
@memoized_property
def frequency(self):
"""numpy.ndarray[float]: The frequencies of all data points in ascending order."""
return np.concatenate([sa.frequency for sa in self.stream_arrays])[self.ascending_order]
@property
def frequency_MHz(self):
"""numpy.ndarray[float]: The frequencies in MHz of all data points, in ascending order."""
return 1e-6 * self.frequency
@memoized_property
def s21_point(self):
"""numpy.ndarray[complex]: The s21_point values of all data points, in ascending frequency order."""
return np.concatenate([sa.s21_point for sa in self.stream_arrays])[self.ascending_order]
@memoized_property
def s21_point_error(self):
"""numpy.ndarray[complex]: The s21_point_error values of all data points, in ascending frequency order."""
return np.concatenate([sa.s21_point_error for sa in self.stream_arrays])[self.ascending_order]
@memoized_property
def s21_raw(self):
"""numpy.ndarray[complex]: The raw s21 streams of all data points, in ascending frequency order."""
return np.vstack([stream_array.s21_raw for stream_array in self.stream_arrays])[self.ascending_order, :]
@property
def s21_point_foreground(self):
return self.s21_point / self.background
@property
def s21_point_error_foreground(self):
return self.s21_point_error / self.background
@property
def background(self):
return self.fit_background(frequency=self.frequency, s21=self.s21_point)
# ToDo: swap this out for a lmfit model
def fit_background(self, frequency=None, s21=None, amp_degree=3, phi_degree=3, weights=None, mask=None):
if frequency is None:
frequency = self.frequency
if s21 is None:
s21 = self.s21_point
if weights is None: # Attempt to down-weight resonances
weights = np.abs(s21) ** 2
if mask is None:
mask = np.ones(frequency.size, dtype=np.bool)
weights *= mask
amp_poly = np.polyfit(frequency, np.abs(s21), deg=amp_degree, w=weights)
phi_poly = np.polyfit(frequency, np.unwrap(np.angle(s21)), deg=phi_degree, w=weights)
return np.polyval(amp_poly, frequency) * np.exp(1j * np.polyval(phi_poly, frequency))
# ToDo: this could be much smarter about identifying large peaks
def find_peaks(self, expected_Q=30000, num_widths=100, threshold=1, minimum_linewidth_separation=1,
**find_peaks_cwt_kwargs):
linewidth = self.frequency.mean() / expected_Q
width = linewidth / (self.frequency[1] - self.frequency[0]) # in samples
data = 1 / np.abs(self.s21_point_foreground) - 1
widths = np.linspace(width / 10, 10 * width, num_widths)
peaks = np.array(signal.find_peaks_cwt(vector=data, widths=widths, **find_peaks_cwt_kwargs))
mask = (width < peaks) & (peaks < data.size - width) & (data[peaks] > threshold * np.std(data))
return peaks[mask]
def resonator(self, frequency, width, model=lmfit_resonator.LinearLossResonatorWithCable):
mask = (frequency - width / 2 <= self.frequency) & (self.frequency <= frequency + width / 2)
return model(frequency=self.frequency[mask], s21=self.s21_point_foreground[mask],
errors=self.s21_point_error_foreground[mask])
def to_dataframe(self, add_origin=True, one_sweep_per_row=True):
"""
Parameters
----------
add_origin
one_sweep_per_row: bool, default True
If True, return a dataframe with one row per sweep, usually what you want if each sweep corresponds to a
resonator.
If False, return a single row with frequency and s21_point arrays for VNA style sweep data.
Returns
-------
pandas.DataFrame
"""
dataframes = []
if one_sweep_per_row:
for number in range(self.num_channels):
dataframes.append(self.sweep(number).to_dataframe(add_origin=add_origin))
else:
data = {'analysis_epoch': time.time(), 'start_epoch': self.start_epoch()}
try:
for thermometer, temperature in self.state['temperature'].items():
data['temperature_{}'.format(thermometer)] = temperature
except KeyError:
pass
try:
for key, value in self.stream_arrays[0].stream(0).roach_state.items():
data['roach_{}'.format(key)] = value
except KeyError:
pass
flat_state = self.state.flatten(wrap_lists=True)
data.update(flat_state)
data['frequency'] = [self.frequency]
data['s21_point'] = [self.s21_point]
data['s21_point_error'] = [self.s21_point_error]
dataframe = | pd.DataFrame(data, index=[0]) | pandas.DataFrame |
import pandas as pd
from argparse import ArgumentParser
from pathlib import Path
from .logger import logger
import sqlite3
import random
import csv
import json
from tqdm import tqdm
DATA_DIR = Path('data')
FOLD = 5
NELA_DIR = DATA_DIR / 'NELA'
Path(NELA_DIR).mkdir(parents=True, exist_ok=True)
NELA_FNAME = '{mode}_{fold}.tsv'
RESAMPLE_LIMIT = 100
def read_constraint_splits():
train_fpath = DATA_DIR / 'train.tsv'
val_fpath = DATA_DIR / 'val.tsv'
test_fpath = DATA_DIR / 'test.tsv'
train = pd.read_csv(train_fpath, quoting=csv.QUOTE_NONE, error_bad_lines=False, sep='\t')
val = pd.read_csv(val_fpath, quoting=csv.QUOTE_NONE, error_bad_lines=False, sep='\t')
test = pd.read_csv(test_fpath, quoting=csv.QUOTE_NONE, error_bad_lines=False, sep='\t')
return {
'train': train,
'val': val,
'test': test
}
def normalize(domain):
domain = domain.strip()
domain = domain.replace(' ', '')
domain = domain.lower()
return domain
def read_simplewiki(path: str):
wiki = | pd.read_csv(path, sep='\t') | pandas.read_csv |
# import pandas
import pandas as pd
import numpy as np
import statsmodels.api as sm
pd.set_option('display.width', 200)
| pd.set_option('display.max_columns', 15) | pandas.set_option |
import zipfile
from datetime import timedelta
from os import path
from time import sleep
from unittest import TestCase
from django.test import tag
from django.utils import timezone
import pandas as pd
import pyrefinebio
import scipy.stats
from data_refinery_common.enums import ProcessorEnum
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
DownloaderJob,
Experiment,
ExperimentSampleAssociation,
Organism,
ProcessorJob,
Sample,
SampleComputedFileAssociation,
SampleResultAssociation,
)
from data_refinery_foreman.foreman.management.commands.create_compendia import create_compendia
from data_refinery_foreman.foreman.management.commands.create_quantpendia import create_quantpendia
from data_refinery_foreman.foreman.management.commands.run_tximport import (
run_tximport_for_all_eligible_experiments,
)
from data_refinery_foreman.surveyor.management.commands.dispatch_qn_jobs import (
dispatch_qn_job_if_eligible,
)
from data_refinery_foreman.surveyor.management.commands.surveyor_dispatcher import (
queue_surveyor_for_accession,
)
from data_refinery_foreman.surveyor.management.commands.unsurvey import purge_experiment
SMASHER_SAMPLES = ["GSM1487313", "SRR332914"]
SMASHER_EXPERIMENTS = ["GSE1487313", "SRP332914"]
MICROARRAY_ACCESSION_CODES = [
"E-TABM-496", # 39 samples of SACCHAROMYCES_CEREVISIAE microarray data
"GSE94793", # 24 samples of SACCHAROMYCES_CEREVISIAE microarray data
"GSE80822", # 12 samples of SACCHAROMYCES_CEREVISIAE microarray data
"GSE96849", # 68 samples of SACCHAROMYCES_CEREVISIAE microarray data
"GSE41094", # 18 samples of SACCHAROMYCES_CEREVISIAE submitter processed data
]
RNA_SEQ_ACCESSION_CODES = [
"SRP047410", # 26 samples of SACCHAROMYCES_CEREVISIAE RNA-Seq data, one will fail.
"SRP094706", # 4 samples of SACCHAROMYCES_CEREVISIAE RNA-Seq data
]
EXPERIMENT_ACCESSION_CODES = MICROARRAY_ACCESSION_CODES + RNA_SEQ_ACCESSION_CODES
TEST_DATA_BUCKET = "data-refinery-test-assets"
def wait_for_job(job) -> bool:
"""Waits for a job and all of its retries."""
job.refresh_from_db()
is_done = False
while not is_done:
if job.end_time is None and job.success is None:
print(f"Polling {type(job).__name__}s. Currently waiting for job id: {job.id}")
sleep(20)
job.refresh_from_db()
elif job.retried and job.retried_job:
job = job.retried_job
elif job.success:
return True
else:
print(f"{type(job).__name__} {job.id} failed!")
return False
return False
def prepare_computed_files():
# MICROARRAY TECH
experiment = Experiment()
experiment.accession_code = "GSE1487313"
experiment.num_processed_samples = 1
experiment.save()
result = ComputationalResult()
result.save()
gallus_gallus = Organism.get_object_for_name("GALLUS_GALLUS", taxonomy_id=1001)
sample = Sample()
sample.accession_code = "GSM1487313"
sample.title = "GSM1487313"
sample.organism = gallus_gallus
sample.technology = "MICROARRAY"
sample.is_processed = True
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
computed_file = ComputedFile()
computed_file.filename = "GSM1487313_liver.PCL"
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.s3_key = "GSM1487313_liver.PCL"
computed_file.s3_bucket = TEST_DATA_BUCKET
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
# RNASEQ TECH
experiment2 = Experiment()
experiment2.accession_code = "SRP332914"
experiment2.num_processed_samples = 1
experiment2.save()
result2 = ComputationalResult()
result2.save()
sample2 = Sample()
sample2.accession_code = "SRR332914"
sample2.title = "SRR332914"
sample2.organism = gallus_gallus
sample2.technology = "RNA-SEQ"
sample2.is_processed = True
sample2.save()
sra2 = SampleResultAssociation()
sra2.sample = sample2
sra2.result = result2
sra2.save()
esa2 = ExperimentSampleAssociation()
esa2.experiment = experiment2
esa2.sample = sample2
esa2.save()
computed_file2 = ComputedFile()
computed_file2.filename = "SRP149598_gene_lengthScaledTPM.tsv"
computed_file2.result = result2
computed_file2.size_in_bytes = 234
computed_file2.is_smashable = True
computed_file2.s3_key = "SRP149598_gene_lengthScaledTPM.tsv"
computed_file2.s3_bucket = TEST_DATA_BUCKET
computed_file2.save()
assoc2 = SampleComputedFileAssociation()
assoc2.sample = sample2
assoc2.computed_file = computed_file2
assoc2.save()
# Use unittest TestCase instead of django TestCase to avoid the test
# being done in a transaction.
class SmasherEndToEndTestCase(TestCase):
"""Test only the smasher using precomuted samples."""
@tag("end_to_end")
def test_smasher_job(self):
for accession_code in SMASHER_EXPERIMENTS:
purge_experiment(accession_code)
prepare_computed_files()
# The API sometimes takes a bit to come back up.
start_time = timezone.now()
while True:
try:
pyrefinebio.create_token(agree_to_terms=True, save_token=False)
break
except pyrefinebio.ServerError:
if timezone.now() - start_time > timedelta(minutes=15):
raise AssertionError("Server not up after 15 minutes")
else:
sleep(30)
dataset_path = "end_to_end_test_dataset"
pyrefinebio.download_dataset(
dataset_path,
"<EMAIL>",
dataset_dict={"GSE1487313": ["GSM1487313"], "SRP332914": ["SRR332914"]},
timeout=timedelta(minutes=15),
)
self.assertTrue(path.exists(dataset_path))
# Use unittest TestCase instead of django TestCase to avoid the test
# being done in a transaction.
class FullFlowEndToEndTestCase(TestCase):
"""In order to parallelize the jobs as much as possible, everything is
done in one big ol' function.
This includes, in order:
* Purging previously surveyed experiments.
* Surveying experiments which will trigger:
* An array_express downloader job triggering a affymetrix processor job.
* 3 GEO downloader job triggering affymetrix processor jobs, for a total
greater than 100 samples on a single platform.
* A GEO downloader job triggering a NO_OP processor job.
* Creating a transcriptome index for SACCHAROMYCES_CEREVISIAE.
* Surveying experiments which will trigger:
* A SRA downloader job triggering a salmon processor job for 26 samples.
(One of which will fail, requiring a run_tximport job.)
* A SRA downloader job triggering a salmon processor job for 4 samples.
(This should be fully processed without intervention.)
* Running tximport to process the experiment that had one bad sample.
* Creating a QN Target for SACCHAROMYCES_CEREVISIAE.
* Creating a Compendium for SACCHAROMYCES_CEREVISIAE.
* Creating a Quantpendium for SACCHAROMYCES_CEREVISIAE.
"""
@tag("end_to_end")
def test_all_the_things(self):
for accession_code in EXPERIMENT_ACCESSION_CODES:
purge_experiment(accession_code)
self.process_experiments()
self.check_transcriptome_index()
self.create_qn_reference()
self.create_compendia()
def process_experiments(self):
survey_jobs = []
# Kick off microarray jobs first because downloading the
# affymetrix image takes a long time.
for accession_code in MICROARRAY_ACCESSION_CODES:
survey_jobs.append(queue_surveyor_for_accession(accession_code))
# However, before we kick of RNA-Seq jobs, we have to build a transcriptome index for them.
transcriptome_survey_job = queue_surveyor_for_accession("SACCHAROMYCES_CEREVISIAE, Ensembl")
print(
"First, creating transcriptome indices (and starting on Affy jobs while we're at it):"
)
self.assertTrue(wait_for_job(transcriptome_survey_job))
transcriptome_downloader_jobs = DownloaderJob.objects.filter(
downloader_task="TRANSCRIPTOME_INDEX",
created_at__gt=transcriptome_survey_job.created_at,
)
for downloader_job in transcriptome_downloader_jobs:
self.assertTrue(wait_for_job(downloader_job))
transcriptome_processor_jobs = ProcessorJob.objects.filter(
pipeline_applied__startswith="TRANSCRIPTOME_INDEX",
created_at__gt=transcriptome_survey_job.created_at,
)
for processor_job in transcriptome_processor_jobs:
self.assertTrue(wait_for_job(processor_job))
for accession_code in RNA_SEQ_ACCESSION_CODES:
survey_jobs.append(queue_surveyor_for_accession(accession_code))
print("Next, processing all the raw data.")
for survey_job in survey_jobs:
self.assertTrue(wait_for_job(survey_job))
# We need to ignore a lot of samples in staging that aren't
# related to these experiments.
# sample_id_list = Experiment.objects.filter(accession_code__in=EXPERIMENT_ACCESSION_CODES)
sample_id_list = Sample.objects.filter(
experiment__accession_code__in=EXPERIMENT_ACCESSION_CODES
).values_list("id")
self.assertEqual(Sample.objects.filter(id__in=sample_id_list).count(), 191)
samples = []
for accession_code in EXPERIMENT_ACCESSION_CODES:
experiment = Experiment.objects.get(accession_code=accession_code)
samples.extend(list(experiment.samples.all()))
downloader_jobs = []
for sample in samples:
greatest_job_id = -1
last_job = None
# There should be only one, but if it got retried for some
# reason we want the latest one.
for job in sample.get_downloader_jobs():
if job.id > greatest_job_id:
greatest_job_id = job.id
last_job = job
downloader_jobs.append(last_job)
for downloader_job in downloader_jobs:
self.assertTrue(wait_for_job(downloader_job))
processor_jobs = []
for sample in samples:
# This sample fails for good reason, so don't expect it to pass.
if sample.accession_code == "SRR1583739":
continue
greatest_job_id = -1
last_job = None
for job in sample.get_processor_jobs():
if job.id > greatest_job_id:
greatest_job_id = job.id
last_job = job
processor_jobs.append(last_job)
for processor_job in processor_jobs:
self.assertTrue(wait_for_job(processor_job))
# Because SRR1583739 fails, the 26 samples from SRP047410 won't be processed
self.assertEqual(Sample.processed_objects.filter(id__in=sample_id_list).count(), 165)
print("Finally, need to run tximport to finish an experiment with one bad sample.")
tximport_jobs = run_tximport_for_all_eligible_experiments(dispatch_jobs=False)
self.assertEqual(len(tximport_jobs), 1)
self.assertTrue(wait_for_job(tximport_jobs[0]))
# This is the full total of jobs minus one.
self.assertEqual(Sample.processed_objects.filter(id__in=sample_id_list).count(), 190)
def check_transcriptome_index(self):
# Make sure that a processed file using our new transcriptome index is
# similar enough to a reference file
print(
"Now we are going to verify that the outputs of salmon look okay with"
"the transcriptome index we generated."
)
sample = Sample.objects.get(accession_code="SRR5085168")
# First, sanity check that the genome build hasn't changed. If it
# changes, all bets are off when comparing quant.sf files.
self.assertEqual(
sample.results.all()
.filter(processor__name=ProcessorEnum.SALMON_QUANT.value["name"])
.first()
.organism_index.assembly_name,
"R64-1-1",
)
# We are now going to download the `quant.sf` file by making a
# quant-only smasher job. We need to do this because the results bucket
# was locked down, so we can't access it without agreeing to the terms
# first.
pyrefinebio.create_token(agree_to_terms=True, save_token=False)
dataset_path = "end_to_end_quant_dataset.zip"
pyrefinebio.download_dataset(
dataset_path,
"<EMAIL>",
dataset_dict={"SRP094706": ["SRR5085168"]},
quant_sf_only=True,
aggregation="EXPERIMENT",
timeout=timedelta(minutes=15),
)
self.assertTrue(path.exists(dataset_path))
def squish_duplicates(data: pd.DataFrame) -> pd.DataFrame:
return data.groupby(data.index, sort=False).mean()
ref_filename = "/home/user/data_store/reference/SRR5085168_quant.sf"
ref = pd.read_csv(ref_filename, delimiter="\t", index_col=0)
ref_TPM = squish_duplicates( | pd.DataFrame({"reference": ref["TPM"]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * Series(rng5f + 0.1)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_df_fail):
# RangeIndex fails to return NotImplemented, for others
# DataFrame tries to broadcast incorrectly
box = box_df_fail
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__
def test_td64arr_div_nat_invalid(self, box_df_fail):
# don't allow division by NaT (maybe could in the future)
box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
def test_td64arr_div_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx // 1
tm.assert_equal(result, idx)
def test_td64arr_floordiv_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame returns m8[ns] instead of int64 dtype
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi // delta
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_df_fail):
# GH#19125
box = box_df_fail # DataFrame op returns m8[ns] instead of f8 dtype
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# Operations with invalid others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="__mul__ op treats "
"timedelta other as i8; "
"rmul OK",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
td1 * scalar_td
with tm.assert_raises_regex(TypeError, pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box, one, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box, two, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser / two
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('op', [operator.mul, ops.rmul])
def test_td64arr_rmul_numeric_array(self, op, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
# TODO: Make this up-casting more systematic?
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = op(vector, tdser)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_div_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = tdser / vector
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
vector / tdser
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_mul_int_series(self, box, names):
# GH#19042 test for correct name attachment
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(['0days', '1day', '4days', '9days', '16days'],
dtype='timedelta64[ns]',
name=names[2])
tdi = | tm.box_expected(tdi, box) | pandas.util.testing.box_expected |
import pandas as pd
class HistOutlier:
def __init__(self):
return
@staticmethod
def calc_outlier_degree(rating: pd.DataFrame, clusters: pd.DataFrame) -> pd.DataFrame:
"""
Params:
rating (DataFrame) - pandas DataFrame with columns 'object_id', 'start_time', 'end_time', 'cluster_end_time', 'rating'
containing the outlier rating for all subsequences
clusters (DataFrame) - pandas DataFrame with columns 'object_id', 'time', 'cluster_id' containing objects,
timestamps, cluster belongings, features ..
Note: The first three columns can have custom names as long as they represent the object
identifier, the timestamp and the cluster identifier in the right order
Returns:
result (DataFrame) - pandas DataFrame with columns 'object_id', 'start_time', 'end_time', 'cluster_end_time', 'rating', distance
containing the outlier rating and distances for all subsequences
"""
object_column = clusters.columns[0]
time_column = clusters.columns[1]
cluster_column = clusters.columns[2]
result = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# Author : <NAME>
# Initial Date: Apr 2, 2020
# About: strymmap class to visualize and analyze GPS data from CSV file recorded using Grey Panda device and libpanda software.
# Read associated README for full description
# License: MIT License
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
# ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS, COPYRIGHT HOLDERS OR ARIZONA BOARD OF REGENTS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = '<NAME>'
__email__ = '<EMAIL>'
## General Data processing and visualization Import
import time
import numpy as np
import math
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (16,8)
from scipy.interpolate import interp1d
from .phasespace import phasespace
from .strymread import strymread
from logging import Logger
from .utils import configure_logworker
LOGGER = configure_logworker()
from matplotlib import cm
import pandas as pd # Note that this is not commai Panda, but Database Pandas
import os
import sys
from subprocess import Popen, PIPE
import gmaps
from dotenv import load_dotenv
load_dotenv()
from .config import config
import IPython
shell_type = IPython.get_ipython().__class__.__name__
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
import ntpath
import bokeh.io
import bokeh.plotting
import bokeh.models
import bokeh.transform
from bokeh.palettes import Magma256 as palette
from bokeh.models import ColorBar
from bokeh.io import output_notebook
from bokeh.io.export import get_screenshot_as_png
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from .tools import ellipse_fit
output_notebook()
import plotly.express as px
import plotly.io as pio
import plotly.offline as pyo
# Set notebook mode to work in offline
pyo.init_notebook_mode()
class strymmap:
'''
`strymmap` reads the GPS data from the given CSV file.
This class provides several utilities functions to work with GPS Data
Parameters
----------------
csvfie: `str`
The CSV file to be read
Attributes
---------------
csvfile:`string`
The filepath of CSV Data file
dataframe: `pandas.Dataframe`
Pandas dataframe that stores content of csvfile as dataframe
aq_time: `string`
Acquisition Time of GPS Signal with valid Lattitude and Longitude in the form of human-readable date string as per local timezone
latitude: `pandas.DataFrame`
Latitude Timeseries
longitude: `pandas.DataFrame`
Longitude Timeseries
altitude: `pandas.DataFrame`
Altitude Timeseries
success: `bool`
If file reading was successful, then set to success to True
Returns
---------------
`strymmap`
Returns an object of type `strymmap`
Example
----------------
Generating GOOGLE MAP API KEY
You will ensure that you have right Google API KEY before you can use `strymmap`.
You can generate API KEY at https://console.developers.google.com/projectselector2/apis/dashboard.
Put API KEY as an environment variable in the file ~/.env by executing following from the command line
`echo "export GOOGLE_MAP_API_KEY=<KEY>" >> ~/.env`
Use your own key instead of `abcdefghijklmnopqrstuvwxyz`.
A good tutorial on how to perform API setup is given at https://web.archive.org/web/20200404070618/https://pybit.es/persistent-environment-variables.html
Generating MAP BOX API KEY
Generating MAP BOX API key is easier than generating, Google map API Key
Just create an account on mapbox.com and select create token.
You can also check tutorials on https://www.youtube.com/watch?v=6iQEhaE1bCY
Put API Key as an environment variable in the file ~/.env by executing following from the command line
`echo "export MAP_BOX_API=abcdefghijklmnopqrstuvwxyz" >> ~/.env`.
Use your own key instead of `abcdefghijklmnopqrstuvwxyz`.
>>> import strym
>>> from strym import strymmap
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> csvdata = '2020-03-20.csv'
>>> r0 = strymmap(csvfile=csvdata)
'''
def __init__(self, csvfile, **kwargs):
self.success = False
# if file size is less than 60 bytes, return without processing
if os.path.getsize(csvfile) < 60:
print("Nothing significant to read in {}. No further analysis is warranted.".format(csvfile))
return
if shell_type not in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
raise ValueError("strymmap can only be used within Jupyter Notebook.")
# CSV File
self.csvfile = csvfile
LOGGER.info("Reading GPS file {}".format(csvfile))
# All CAN messages will be saved as pandas dataframe
try:
status_category = pd.CategoricalDtype(categories=['A', 'V'], ordered=False)
is_windows = sys.platform.startswith('win')
if not is_windows:
word_counts = Popen(['wc', '-l', self.csvfile], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = word_counts.communicate()
output = output.decode("utf-8")
output = output.strip()
output = output.split(' ')
n_lines = int(output[0])
if n_lines < 4:
LOGGER.error("Not enough lines to read in {}".format(csvfile))
return
self.dataframe = pd.read_csv(self.csvfile, dtype={'Gpstime': np.float64,'Status':status_category, 'Long': np.float32, 'Lat': np.float32, 'Alt': np.float32, 'HDOP': np.float16, 'PDOP': np.float16, 'VDOP': np.float16}, nrows=n_lines - 2)
else:
self.dataframe = pd.read_csv(self.csvfile, dtype={'Gpstime': np.float64,'Status':status_category, 'Long': np.float32, 'Lat': np.float32, 'Alt': np.float32, 'HDOP': np.float16, 'PDOP': np.float16, 'VDOP': np.float16}, skipfooter=2)
except pd.errors.ParserError:
print("PraseError: Ill-formated CSV File {}. A properly formatted CSV file must have column names as ['Gpstime', 'Status', 'Long', 'Lat', 'Alt', 'HDOP', 'PDOP', 'VDOP']".format(self.csvfile))
print("Not generating map for the drive route.")
return
except UnicodeDecodeError:
print("Unicode Decode Error: Ill-formated CSV File {}. A properly formatted CSV file must have column names as ['Gpstime', 'Status', 'Long', 'Lat', 'Alt', 'HDOP', 'PDOP', 'VDOP']".format(self.csvfile))
print("Not generating map for the drive route.")
return
except pd.errors.EmptyDataError:
print("CSVfile is empty.")
return
if self.dataframe.shape[0] == 0:
print("No data was present in the csvfile. Not generating map for the drive.")
return
self.dataframe = self.dataframe.dropna()
if not set(['Gpstime' ,'Status' ,'Long', 'Lat' ,'Alt' ,'HDOP' ,'PDOP', 'VDOP']).issubset(set(self.dataframe.columns)):
print("Ill-formated CSV File. A properly formatted CSV file must have column names as ['Gpstime', 'Status', 'Long', 'Lat', 'Alt', 'HDOP', 'PDOP', 'VDOP']")
print("Column Names found are {}".format(self.dataframe.columns.values))
print("Not generating map for drive route.")
return
status = self.dataframe['Status'] == 'A'
self.dataframe = self.dataframe[status]
if self.dataframe.shape[0] == 0:
print("GPS failed to acquire satellite signal during this drive. Not generating map for the drive.")
return
# At this point, GPS data file reading is successful, set `success` to `True`
self.success = True
self.aq_time = strymread.dateparse(self.dataframe['Gpstime'].values[0])
print('GPS signal first acquired at {}'.format(self.aq_time))
self.dataframe = strymmap.timeindex(self.dataframe, inplace=True)
self.latitude = self.dataframe['Lat']
self.longitude = self.dataframe['Long']
self.altitude = self.dataframe['Alt']
centroid_lat, centroid_long = phasespace.centroid( self.latitude,self.longitude)
center_coordinates = (centroid_lat, centroid_long)
coordinates = pd.DataFrame()
coordinates['latitude'] = self.latitude
coordinates['longitude'] = self.longitude
self.mapfile = self.csvfile[0:-4] + '.html'
time_axis = kwargs.get("time_axis", True)
if config["map"] == "googlemap":
self.API_Key =os.getenv('GOOGLE_MAP_API_KEY')
if self.API_Key is None:
self.API_Key = input("Enter Google MAP API Key: ")
Popen('echo "export GOOGLE_MAP_API_KEY={}" >> ~/.env'.format(self.API_Key), shell= True)
gmaps.configure(api_key=self.API_Key)
styles="""
[{ "featureType": "all", "elementType": "geometry", "stylers": [ { "color": "#b5d3ff"}]},
{ "featureType": "all", "elementType": "labels.text.fill", "stylers": [ { "gamma": 0.01}, {"lightness": 20},{"weight": "1.39"},{"color": "#0d1529"}]},
{ "featureType": "all", "elementType": "labels.text.stroke", "stylers": [ { "weight": "0.96"}, { "saturation": "9"}, { "visibility": "on"},{ "color": "#f2f2f2"}] },
{ "featureType": "all", "elementType": "labels.icon", "stylers": [ { "visibility": "off"}]},
{ "featureType": "landscape", "elementType": "geometry", "stylers": [{ "lightness": 30}, { "saturation": "9"},{ "color": "#fbfffa"}] },
{ "featureType": "poi", "elementType": "geometry", "stylers": [ { "saturation": 20 }] },
{ "featureType": "poi.park", "elementType": "geometry", "stylers": [ {"lightness": 20 }, { "saturation": -20}]},
{ "featureType": "road", "elementType": "geometry", "stylers": [ { "lightness": 10 }, { "saturation": -30 } ]},
{ "featureType": "road", "elementType": "geometry.fill", "stylers": [ { "color": "#b1c4cc"}]},
{ "featureType": "road", "elementType": "geometry.stroke", "stylers": [ { "saturation": 25}, { "lightness": 25 }, { "weight": "0.01"}]},
{ "featureType": "water", "elementType": "all", "stylers": [ { "lightness": -20 }]}
]
"""
gmap_options = bokeh.models.GMapOptions(lat=centroid_lat, lng=centroid_long,
map_type='roadmap', zoom=int(config["mapzoom"]), styles = styles)
fig = bokeh.plotting.gmap(self.API_Key, gmap_options, title='Drive Route for ' + ntpath.basename(self.csvfile),
width=config["mapwidth"], height=config["mapheight"], tools=['hover', 'reset', 'wheel_zoom', 'pan'])
source = bokeh.models.ColumnDataSource(self.dataframe)
if time_axis:
mapper = bokeh.transform.linear_cmap('Gpstime', palette[:192], np.min(self.dataframe['Gpstime']), np.max(self.dataframe['Gpstime']))
center = fig.circle('Long', 'Lat', size=4, alpha=1.0,
color=mapper, source=source, )
color_bar = ColorBar(color_mapper=mapper['transform'], location=(0,0), title='Time')
fig.add_layout(color_bar, 'right')
else:
fig.circle('Long', 'Lat', size=5, alpha=1.0, fill_color='red',
line_color = 'red', source=source)
bokeh.plotting.output_file(filename= self.mapfile, title='Drive Router for ' + ntpath.basename(self.csvfile))
bokeh.plotting.save(fig, self.mapfile)
self.fig = fig
driver = webdriver.Chrome(ChromeDriverManager().install())
self.image = get_screenshot_as_png(fig, height=800, width=1800, driver=driver)
time.sleep(1)
driver.close()
driver.quit() # See https://web.archive.org/web/20200404100708/https://sites.google.com/a/chromium.org/chromedriver/getting-started and https://web.archive.org/web/20200404101003/https://www.selenium.dev/selenium/docs/api/py/index.html
self.image.save(self.csvfile[0:-4] + '.png',"PNG")
elif config["map"] == "mapbox":
self.API_Key =os.getenv('MAP_BOX_API')
if self.API_Key is None:
self.API_Key = input("Enter Mapbox API Key: ")
Popen('echo "export MAP_BOX_API={}" >> ~/.env'.format(self.API_Key), shell= True)
if time_axis:
color = "Gpstime"
else:
color = None
fig = px.scatter_mapbox(self.dataframe, lat="Lat", lon="Long", color=color,
color_continuous_scale=["black", "purple", "red" ], size_max=30, zoom=config["mapzoom"],
height = config["mapheight"], width = config["mapwidth"], #center = dict(lat = g.center)
title='Drive Route for ' + ntpath.basename(self.csvfile),
#mapbox_style="open-street-map"
)
Index = self.dataframe.index.strftime('%m/%d/%Y, %r')
cb_indices = np.linspace(0, self.dataframe.shape[0]-1, 10, dtype=int)
cb =Index[cb_indices]
cbtime = self.dataframe.Gpstime[cb_indices].values
fig.update_layout(font_size=16, title={'xanchor': 'center','yanchor': 'top', 'y':0.9, 'x':0.5,},
title_font_size = 24, mapbox_accesstoken=self.API_Key, mapbox_style = "mapbox://styles/strym/ckhd00st61aum19noz9h8y8kw",
coloraxis_colorbar=dict(
title="Time",
tickvals=cbtime,
ticktext=cb,
ticks="outside", ticksuffix=" TIME",
dtick=50
))
fig.update_traces(marker=dict(size=6))
fig.write_image(self.csvfile[0:-4] + '.png')
fig.write_html(self.csvfile[0:-4] + '.html')
self.fig = fig
def gpsdistance(self):
"""
Calculate the distance covered based on the Lat, Long coordinates traversed
Returns
---------
Distance covered in meters
"""
dist = strymmap._calcgpsdist(self.dataframe)
return dist
def plotroute(self, interactive=True, returnfig = False):
'''
Plot the driving routes on Google Map
Note: Only compatble to work with Jupyter Notebook.
You must execute `jupyter nbextension enable --py gmaps` before running jupyter notebook in your python environment.
Parameters
--------------
interactive: `bool`
`True`, `False`to specify whether to plot an interactive map or not. `True`: plot interactive map, `False`: plot map as an image
Returns
---------
`bokeh.plotting.gmap.GMap`
Figure object correspond to Google Map figure with waypoints embedded on it
'''
if not self.success:
print("There is no route to plot as GPS Data was not read successfully.")
return None
if interactive:
if config["map"] == "googlemap":
time.sleep(1)
try:
bokeh.plotting.reset_output()
bokeh.plotting.output_notebook()
bokeh.plotting.show(self.fig) # angrily yells at me about single ownership
except:
bokeh.plotting.output_notebook()
bokeh.plotting.show(self.fig)
elif config["map"] == "mapbox":
self.fig.show()
else:
if config["map"] == "googlemap":
display(self.image)
elif config["map"] == "mapbox":
from PIL import Image
im = Image.open(self.csvfile[0:-4] + '.png')
display(im)
if returnfig:
return self.fig
@staticmethod
def _calcgpsdist(df, sample_time = 0.1):
distance = 0.0
for i in range(1,df.shape[0]):
lat1 = df.iloc[i-1]['Lat']
long1 = df.iloc[i-1]['Long']
lat2 = df.iloc[i]['Lat']
long2 = df.iloc[i]['Long']
phi_1 = lat1*math.pi/180.0 # in radians
phi_2 = lat2*math.pi/180.0 # in radians
delta_phi = phi_1 - phi_2
lamda_1 = long1
lamda_2 = long2
delta_lambda = (lamda_1 - lamda_2 )*math.pi/180.0
R = 6371000 # Earth radius in meter
a = (math.sin(delta_phi/2))**2 + math.cos(phi_1)*math.cos(phi_2)*math.sin(delta_lambda/2)*math.sin(delta_lambda/2)
c = 2*math.atan2(math.sqrt(a), math.sqrt(1-a))
great_circle_distance = R*c # in meters
# If distance is too large, then there is a gap between gps data points, I won't include gap distances
# Here I assumed that in one sampke time interval car cannot go beyond 100 m/s, i.e. too large of a distance between two points
if great_circle_distance >= 100*sample_time:
continue
distance = distance + great_circle_distance
return distance
@staticmethod
def timeindex(df, inplace=False):
if inplace:
newdf = df
else:
newdf =df.copy()
newdf['Gpstime'] = df['Gpstime']
newdf['ClockTime'] = newdf['Gpstime'].apply(strymread.dateparse)
Time = pd.to_datetime(newdf['Gpstime'], unit='s')
newdf['Clock'] = | pd.DatetimeIndex(Time) | pandas.DatetimeIndex |
# -*- coding: utf-8; py-indent-offset:4 -*-
import datetime as dt
import pandas as pd
from ..utils import datetime_today
from .data_cache import get_all_symbol_name, get_cn_stock_fund_flow_rank, \
get_daily, get_stock_fund_flow_daily, \
get_stock_spot, \
get_index_daily
class Market:
all_symbol_name = {}
watching_symbols = []
symbol_daily = {}
symbol_daily_adjusted = {}
verbose = False
date_start = None
date_end = None
current_date = None
current_time = None
biz_time = None
biz_hour = (11.5 - 9.5) + (15.0 - 13.0)
sell_rule = 'T+1'
is_testing = False
def __init__(self, start, end):
self.all_symbol_name = get_all_symbol_name()
self.watching_symbols = []
self.symbol_daily = {}
self.symbol_daily_adjusted = {}
self.date_start = start
self.date_end = end
self.current_date = self.current_time = start
self.last_spot_time = dt.datetime.now() - dt.timedelta(minutes =10)
self.set_business_time(morning= ['9:30', '11:30'], afternoon= ['13:00', '15:00'])
self.set_sell_rule(sell_rule= 'T+1')
def set_sell_rule(self, sell_rule = 'T+1'):
self.sell_rule = sell_rule
def set_business_time(self, morning= ['9:30', '11:30'], afternoon= ['13:00', '15:00']):
business_time = morning + afternoon
biz_time = []
for k in business_time:
time_only = dt.datetime.strptime(k, '%H:%M')
biz_time.append(time_only.hour + time_only.minute / 60.0)
self.biz_time = biz_time
morning_start, morning_end, afternoon_start, afternoon_end = self.biz_time
self.biz_hour = (morning_end - morning_start) + (afternoon_end - afternoon_start)
def set_verbose(self, verbose = True):
self.verbose = verbose
# for testing purpose
def set_date_range(self, start, end):
self.date_start = start
self.date_end = end
self.current_date = self.current_time = start
def is_open(self) -> bool:
if self.is_testing:
return True
# business hour:
# monday to friday, 9:30 ~ 11:30, 13:00 ~ 15:00
now = dt.datetime.now()
wday = now.weekday() +1
if (wday >= 1) and (wday <= 5):
hour = now.hour + now.minute / 60.0
morning_start, morning_end, afternoon_start, afternoon_end = self.biz_time
if (hour >= morning_start) and (hour < morning_end):
return True
if (hour >= afternoon_start) and (hour < afternoon_end):
return True
return False
def load_history_price(self, symbol):
if symbol.startswith('sh') or symbol.startswith('sz'):
df = get_index_daily( symbol )
self.symbol_daily[ symbol ] = df
self.symbol_daily_adjusted[ symbol ] = df
else:
# OHCLV
df = get_daily( symbol )
if not symbol.startswith('fund'):
# fund flow
fund_df = get_stock_fund_flow_daily( symbol )
if fund_df is not None:
df['main_fund'] = fund_df['main_fund']
df['main_pct'] = fund_df['main_pct']
df = df.fillna(0)
self.symbol_daily[ symbol ] = df
if 'factor' in df.columns:
df = df.copy()
for k in ['open', 'close', 'high', 'low']:
df[k] = df[k] * df['factor']
self.symbol_daily_adjusted[ symbol ] = df
def watch(self, symbols):
for symbol in symbols:
if not (symbol in self.watching_symbols):
self.watching_symbols.append(symbol)
self.load_history_price(symbol)
def keep_daily_uptodate(self):
for symbol in self.watching_symbols:
self.load_history_price( symbol )
def update_fundflow_realtime(self, verbose = False) -> pd.DataFrame:
today = pd.to_datetime( datetime_today() )
# merge fund flow data into df
fund_df = get_cn_stock_fund_flow_rank()
fund_df = fund_df[ fund_df.symbol.isin(self.watching_symbols) ]
fund_df = fund_df.sort_values(by='main_pct', ascending=False).reset_index(drop= True)
if verbose:
print('')
print(fund_df)
for i, fund_row in fund_df.iterrows():
symbol = fund_row['symbol']
if symbol not in self.watching_symbols:
continue
df = self.symbol_daily_adjusted[ symbol ]
if today in df.index:
if 'main_pct' in df.columns:
df.loc[ today ]['main_pct'] = fund_row['main_pct']
if 'main_fund' in df.columns:
df.loc[ today ]['main_fund'] = fund_row['main_fund']
return fund_df
def update_daily_realtime(self, verbose = False) -> bool:
now = dt.datetime.now()
data_updated = False
# if the spot out of date, update for all stocks under watch
next_spot_time = self.last_spot_time + dt.timedelta(seconds=10)
if now > next_spot_time:
spot_df = get_stock_spot(self.watching_symbols, verbose)
if verbose:
print('')
print(spot_df)
self.last_spot_time = now
today = pd.to_datetime( datetime_today() )
for i, spot_row in spot_df.iterrows():
spot_date = spot_row['date'] if ('date' in spot_row) else today
if spot_date >= today:
data_updated = True
symbol = spot_row['symbol']
new_row = self.symbol_daily[ symbol ].iloc[-1].copy()
for k in ['open', 'high', 'low', 'close', 'volume']:
if k in spot_row:
new_row[ k ] = spot_row[ k ]
self.symbol_daily[ symbol ].loc[spot_date] = new_row
if 'factor' in new_row:
new_row = new_row.copy()
adjust_factor = new_row['factor']
for k in ['open', 'high', 'low', 'close']:
new_row[ k ] = new_row[ k ] * adjust_factor
self.symbol_daily_adjusted[ symbol ].loc[spot_date] = new_row
self.update_fundflow_realtime(verbose= verbose)
return data_updated
def get_index_daily(self, symbol, start = None, end = None, count = None) -> pd.DataFrame:
if end is None:
end = self.date_end
if start is None:
start = self.date_start - dt.timedelta(days=90)
df = get_index_daily( symbol )
if start:
df = df[ pd.to_datetime(start) : ]
if end:
df = df[ : | pd.to_datetime(end) | pandas.to_datetime |
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.repr import BrtcReprBuilder
from brightics.common.repr import strip_margin
from brightics.common.repr import dict2MD, plt2MD, pandasDF2MD
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import from_to
from brightics.common.validation import greater_than_or_equal_to
from brightics.common.groupby import _function_by_group
from brightics.function.utils import _model_dict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.holtwinters import ExponentialSmoothing
import pmdarima as pm
def arima_train(table, group_by=None, **params):
check_required_parameters(_arima_train, params, ['table'])
params = get_default_from_parameters_if_required(params, _arima_train)
param_validation_check = [greater_than_or_equal_to(params, 0, 'p'),
from_to(params, 0, 2, 'd'),
greater_than_or_equal_to(params, 0, 'q')]
validate(*param_validation_check)
if group_by is not None:
return _function_by_group(_arima_train, table, group_by=group_by, **params)
else:
return _arima_train(table, **params)
def _arima_train(table, input_cols, p, d, q, intercept=True):
arima = pm.ARIMA(order=(p, d, q), with_intercept=intercept)
model = _model_dict('arima_model')
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
|## ARIMA Train Result
|
""".format()))
for column in input_cols:
arima_fit = arima.fit(table[column])
model['arima_' + str(column)] = arima_fit
rb.addMD(strip_margin("""
|### Column : {col}
|
| - (p,d,q) order : ({p_val}, {d_val}, {q_val})
| - Intercept : {itc}
| - Coefficients Array : {ca}
| - AIC : {aic}
|
""".format(col=column, p_val=p, d_val=d, q_val=q,
itc=intercept, ca=str(arima_fit.params().tolist()), aic=arima_fit.aic())))
model['coefficients_array_' + str(column)] = arima_fit.params()
model['aic_' + str(column)] = arima_fit.aic()
model['input_columns'] = input_cols
# model['order'] = arima_fit.order()
model['intercept'] = intercept
model['_repr_brtc_'] = rb.get()
return{'model':model}
def arima_predict(model, **params):
check_required_parameters(_arima_predict, params, ['model'])
params = get_default_from_parameters_if_required(params,_arima_predict)
param_validation_check = [greater_than_or_equal_to(params, 1, 'prediction_num')]
validate(*param_validation_check)
if '_grouped_data' in model:
return _function_by_group(_arima_predict, model=model, **params)
else:
return _arima_predict(model, **params)
def _arima_predict(model, prediction_num):
df1 = pd.DataFrame()
df2 = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
self.ragged = DataFrame({"B": range(5)})
self.ragged.index = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
def test_doc_string(self):
df = DataFrame(
{"B": [0, 1, 2, np.nan, 4]},
index=[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
],
)
df
df.rolling("2s").sum()
def test_invalid_window_non_int(self):
# not a valid freq
msg = "passed window foobar is not compatible with a datetimelike index"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="foobar")
# not a datetimelike index
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
self.regular.reset_index().rolling(window="foobar")
@pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)])
def test_invalid_window_nonfixed(self, freq):
# non-fixed freqs
msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"])
def test_valid_window(self, freq):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])])
def test_invalid_minp(self, minp):
# non-integer min_periods
msg = (
r"local variable 'minp' referenced before assignment|"
"min_periods must be an integer"
)
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="1D", min_periods=minp)
def test_invalid_center_datetimelike(self):
# center is not implemented
msg = "center is not implemented for datetimelike and offset based windows"
with pytest.raises(NotImplementedError, match=msg):
self.regular.rolling(window="1D", center=True)
def test_on(self):
df = self.regular
# not a valid column
msg = (
r"invalid on specified as foobar, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling(window="2s", on="foobar")
# column is valid
df = df.copy()
df["C"] = date_range("20130101", periods=len(df))
df.rolling(window="2d", on="C").sum()
# invalid columns
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
df.rolling(window="2d", on="B")
# ok even though on non-selected
df.rolling(window="2d", on="C").B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
assert df.A.is_monotonic
df.rolling("2s", on="A").sum()
df = df.set_index("A")
assert df.index.is_monotonic
df.rolling("2s").sum()
def test_non_monotonic_on(self):
# GH 19248
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
df = df.set_index("A")
non_monotonic_index = df.index.to_list()
non_monotonic_index[0] = non_monotonic_index[3]
df.index = non_monotonic_index
assert not df.index.is_monotonic
msg = "index must be monotonic"
with pytest.raises(ValueError, match=msg):
df.rolling("2s").sum()
df = df.reset_index()
msg = (
r"invalid on specified as A, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling("2s", on="A").sum()
def test_frame_on(self):
df = DataFrame(
{"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")}
)
df["A"] = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
# we are doing simulating using 'on'
expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True)
result = df.rolling("2s", on="A").B.sum()
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (
df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]]
)
result = df.rolling("2s", on="A")[["B"]].sum()
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame(
{
"A": [0, 1, 2, 3, 4],
"B": [0, 1, 2, np.nan, 4],
"C": Index(
[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
),
},
columns=["A", "C", "B"],
)
expected1 = DataFrame(
{"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]},
columns=["A", "C", "B"],
)
result = df.rolling("2s", on="C").sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name="B")
result = df.rolling("2s", on="C").B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[["A", "B", "C"]]
result = df.rolling("2s", on="C")[["A", "B", "C"]].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self):
df = self.regular.copy()
df.index = date_range("20130101", periods=5, freq="D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="1D").sum()
tm.assert_frame_equal(result, expected)
df.index = date_range("20130101", periods=5, freq="2D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window="2D").sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self):
# compare for min_periods
df = self.regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_closed(self):
# xref GH13965
df = DataFrame(
{"A": [1] * 5},
index=[
Timestamp("20130101 09:00:01"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:04"),
Timestamp("20130101 09:00:06"),
],
)
# closed must be 'right', 'left', 'both', 'neither'
msg = "closed must be 'right', 'left', 'both' or 'neither'"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="2s", closed="blabla")
expected = df.copy()
expected["A"] = [1.0, 2, 2, 2, 1]
result = df.rolling("2s", closed="right").sum()
tm.assert_frame_equal(result, expected)
# default should be 'right'
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [1.0, 2, 3, 3, 2]
result = df.rolling("2s", closed="both").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 2, 2, 1]
result = df.rolling("2s", closed="left").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 1, 1, np.nan]
result = df.rolling("2s", closed="neither").sum()
tm.assert_frame_equal(result, expected)
def test_ragged_sum(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 3, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=2).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, np.nan, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s").sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=3).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 10]
tm.assert_frame_equal(result, expected)
def test_ragged_mean(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_median(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_quantile(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_std(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).std(ddof=0)
expected = df.copy()
expected["B"] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="1s", min_periods=1).std(ddof=1)
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).std(ddof=0)
expected = df.copy()
expected["B"] = [0.0] + [0.5] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).std(ddof=1)
expected = df.copy()
expected["B"] = [np.nan, 0.707107, 1.0, 1.0, 1.290994]
tm.assert_frame_equal(result, expected)
def test_ragged_var(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).var(ddof=0)
expected = df.copy()
expected["B"] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="1s", min_periods=1).var(ddof=1)
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).var(ddof=0)
expected = df.copy()
expected["B"] = [0.0] + [0.25] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).var(ddof=1)
expected = df.copy()
expected["B"] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.0]
tm.assert_frame_equal(result, expected)
def test_ragged_skew(self):
df = self.ragged
result = df.rolling(window="3s", min_periods=1).skew()
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).skew()
expected = df.copy()
expected["B"] = [np.nan] * 2 + [0.0, 0.0, 0.0]
tm.assert_frame_equal(result, expected)
def test_ragged_kurt(self):
df = self.ragged
result = df.rolling(window="3s", min_periods=1).kurt()
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).kurt()
expected = df.copy()
expected["B"] = [np.nan] * 4 + [-1.2]
tm.assert_frame_equal(result, expected)
def test_ragged_count(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).count()
expected = df.copy()
expected["B"] = [1.0, 1, 1, 1, 1]
tm.assert_frame_equal(result, expected)
df = self.ragged
result = df.rolling(window="1s").count()
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).count()
expected = df.copy()
expected["B"] = [1.0, 1, 2, 1, 2]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=2).count()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 2, np.nan, 2]
tm.assert_frame_equal(result, expected)
def test_regular_min(self):
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": [0.0, 1, 2, 3, 4]}
).set_index("A")
result = df.rolling("1s").min()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": [5, 4, 3, 4, 5]}
).set_index("A")
tm.assert_frame_equal(result, expected)
result = df.rolling("2s").min()
expected = df.copy()
expected["B"] = [5.0, 4, 3, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling("5s").min()
expected = df.copy()
expected["B"] = [5.0, 4, 3, 3, 3]
tm.assert_frame_equal(result, expected)
def test_ragged_min(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).min()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).min()
expected = df.copy()
expected["B"] = [0.0, 1, 1, 3, 3]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).min()
expected = df.copy()
expected["B"] = [0.0, 0, 0, 1, 1]
tm.assert_frame_equal(result, expected)
def test_perf_min(self):
N = 10000
dfp = DataFrame(
{"B": np.random.randn(N)}, index=date_range("20130101", periods=N, freq="s")
)
expected = dfp.rolling(2, min_periods=1).min()
result = dfp.rolling("2s").min()
assert ((result - expected) < 0.01).all().bool()
expected = dfp.rolling(200, min_periods=1).min()
result = dfp.rolling("200s").min()
assert ((result - expected) < 0.01).all().bool()
def test_ragged_max(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).max()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).max()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).max()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"freq, op, result_data",
[
("ms", "min", [0.0] * 10),
("ms", "mean", [0.0] * 9 + [2.0 / 9]),
("ms", "max", [0.0] * 9 + [2.0]),
("s", "min", [0.0] * 10),
("s", "mean", [0.0] * 9 + [2.0 / 9]),
("s", "max", [0.0] * 9 + [2.0]),
("min", "min", [0.0] * 10),
("min", "mean", [0.0] * 9 + [2.0 / 9]),
("min", "max", [0.0] * 9 + [2.0]),
("h", "min", [0.0] * 10),
("h", "mean", [0.0] * 9 + [2.0 / 9]),
("h", "max", [0.0] * 9 + [2.0]),
("D", "min", [0.0] * 10),
("D", "mean", [0.0] * 9 + [2.0 / 9]),
("D", "max", [0.0] * 9 + [2.0]),
],
)
def test_freqs_ops(self, freq, op, result_data):
# GH 21096
index = date_range(start="2018-1-1 01:00:00", freq=f"1{freq}", periods=10)
s = Series(data=0, index=index)
s.iloc[1] = np.nan
s.iloc[-1] = 2
result = getattr(s.rolling(window=f"10{freq}"), op)()
expected = Series(data=result_data, index=index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
"sum",
"mean",
pytest.param(
"count",
marks=pytest.mark.filterwarnings("ignore:min_periods:FutureWarning"),
),
"median",
"std",
"var",
"kurt",
"skew",
"min",
"max",
],
)
def test_all(self, f):
# simple comparison of integer vs time-based windowing
df = self.regular * 2
er = df.rolling(window=1)
r = df.rolling(window="1s")
result = getattr(r, f)()
expected = getattr(er, f)()
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = er.quantile(0.5)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
"sum",
"mean",
"count",
"median",
"std",
"var",
"kurt",
"skew",
"min",
"max",
],
)
def test_all2(self, f):
# more sophisticated comparison of integer vs.
# time-based windowing
df = DataFrame(
{"B": np.arange(50)}, index=date_range("20130101", periods=50, freq="H")
)
# in-range data
dft = df.between_time("09:00", "16:00")
r = dft.rolling(window="5H")
result = getattr(r, f)()
# we need to roll the days separately
# to compare with a time-based roll
# finally groupby-apply will return a multi-index
# so we need to drop the day
def agg_by_day(x):
x = x.between_time("09:00", "16:00")
return getattr(x.rolling(5, min_periods=1), f)()
expected = (
df.groupby(df.index.day).apply(agg_by_day).reset_index(level=0, drop=True)
)
tm.assert_frame_equal(result, expected)
def test_groupby_monotonic(self):
# GH 15130
# we don't need to validate monotonicity when grouping
data = [
["David", "1/1/2015", 100],
["David", "1/5/2015", 500],
["David", "5/30/2015", 50],
["David", "7/25/2015", 50],
["Ryan", "1/4/2014", 100],
["Ryan", "1/19/2015", 500],
["Ryan", "3/31/2016", 50],
["Joe", "7/1/2015", 100],
["Joe", "9/9/2015", 500],
["Joe", "10/15/2015", 50],
]
df = DataFrame(data=data, columns=["name", "date", "amount"])
df["date"] = to_datetime(df["date"])
expected = (
df.set_index("date")
.groupby("name")
.apply(lambda x: x.rolling("180D")["amount"].sum())
)
result = df.groupby("name").rolling("180D", on="date")["amount"].sum()
tm.assert_series_equal(result, expected)
def test_non_monotonic(self):
# GH 13966 (similar to #15130, closed by #15175)
dates = | date_range(start="2016-01-01 09:30:00", periods=20, freq="s") | pandas.date_range |
import random
import unittest
import numpy as np
import pandas as pd
from haychecker.chc.metrics import deduplication
class TestDeduplication(unittest.TestCase):
def test_singlecolumns_empty(self):
df = pd.DataFrame()
df["c1"] = []
df["c2"] = []
r1, r2 = deduplication(["c1", "c2"], df)
self.assertEqual(r1, 100.)
self.assertEqual(r2, 100.)
def test_wholetable_empty(self):
df = pd.DataFrame()
df["c1"] = []
df["c2"] = []
r = deduplication(df=df)[0]
self.assertEqual(r, 100.)
def test_singlecolumns_allsame(self):
df = pd.DataFrame()
df["c1"] = [chr(0) for _ in range(100)]
df["c2"] = [10 for _ in range(100)]
df["c3"] = [20 / 0.7 for _ in range(100)]
r1, r2, r3 = deduplication(["c1", "c2", "c3"], df)
self.assertEqual(r1, 1.0)
self.assertEqual(r2, 1.0)
self.assertEqual(r3, 1.0)
def test_wholetable_allsame(self):
df = pd.DataFrame()
df["c1"] = [chr(0) for _ in range(100)]
df["c2"] = [10 for _ in range(100)]
df["c3"] = [20 / 0.7 for _ in range(100)]
r = deduplication(df=df)[0]
self.assertEqual(r, 1.0)
def test_singlecolumns_alldifferent(self):
df = pd.DataFrame()
df["c1"] = [chr(i) for i in range(100)]
df["c2"] = [i for i in range(100)]
df["c3"] = [i / 0.7 for i in range(100)]
r1, r2, r3 = deduplication(["c1", "c2", "c3"], df)
self.assertEqual(r1, 100.0)
self.assertEqual(r2, 100.0)
self.assertEqual(r3, 100.0)
def test_wholetable_alldifferent(self):
df = pd.DataFrame()
df["c1"] = [chr(i) for i in range(100)]
df["c2"] = [i for i in range(100)]
df["c3"] = [i / 0.7 for i in range(100)]
r = deduplication(df=df)[0]
self.assertEqual(r, 100.0)
def test_singlecolumns_partial(self):
df = | pd.DataFrame() | pandas.DataFrame |
"""New style (fast) tag count annos
Use these for new projects.
"""
from mbf_genomics.annotator import Annotator
from typing import Dict, List
from pypipegraph import Job
from mbf_genomics import DelayedDataFrame
import numpy as np
import pypipegraph as ppg
import hashlib
import pandas as pd
import mbf_r
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri as numpy2ri
from pathlib import Path
from dppd import dppd
import dppd_plotnine # noqa:F401
from mbf_qualitycontrol import register_qc, QCCollectingJob, qc_disabled
from mbf_genomics.util import (
parse_a_or_c_to_plot_name,
parse_a_or_c_to_column,
parse_a_or_c_to_anno,
)
from pandas import DataFrame
dp, X = dppd()
# ## Base classes and strategies - skip these if you just care about using TagCount annotators
class _CounterStrategyBase:
cores_needed = 1
def extract_lookup(self, data):
"""Adapter for count strategies that have different outputs
(e.g. one-hashmap-unstranded or two-hashmaps-one-forward-one-reversed)
"""
return data
class CounterStrategyStrandedRust(_CounterStrategyBase):
cores_needed = -1
name = "stranded"
def __init__(self):
self.disable_sanity_check = False
def count_reads(
self,
interval_strategy,
genome,
bam_filename,
bam_index_name,
reverse=False,
dump_matching_reads_filename=None,
):
# bam_filename = bamfil
intervals = interval_strategy._get_interval_tuples_by_chr(genome)
gene_intervals = IntervalStrategyGene()._get_interval_tuples_by_chr(genome)
from mbf_bam import count_reads_stranded
if dump_matching_reads_filename:
dump_matching_reads_filename = str(dump_matching_reads_filename)
res = count_reads_stranded(
bam_filename,
bam_index_name,
intervals,
gene_intervals,
matching_reads_output_bam_filename=dump_matching_reads_filename,
)
self.sanity_check(res, bam_filename)
return res
def sanity_check(self, foward_and_reverse, bam_filename):
if self.disable_sanity_check:
return
error_count = 0
forward, reverse = foward_and_reverse
for gene_stable_id, forward_count in forward.items():
reverse_count = reverse.get(gene_stable_id, 0)
if (reverse_count > 100) and (reverse_count > forward_count * 1.1):
error_count += 1
if error_count > 0.1 * len(forward):
raise ValueError(
"Found at least %.2f%% of genes to have a reverse read count (%s) "
"above 110%% of the exon read count (and at least 100 tags). "
"This indicates that this lane (%s) should have been reversed before alignment. "
"Set reverse_reads=True on your Lane object"
% (
100.0 * error_count / len(forward),
self.__class__.__name__,
bam_filename,
)
)
def extract_lookup(self, data):
"""Adapter for count strategies that have different outputs
(e.g. one-hashmap-unstranded or two-hashmaps-one-forward-one-reversed)
"""
return data[0]
class CounterStrategyUnstrandedRust(_CounterStrategyBase):
cores_needed = -1
name = "unstranded"
def count_reads(
self,
interval_strategy,
genome,
bam_filename,
bam_index_name,
reverse=False,
dump_matching_reads_filename=None,
):
# bam_filename = bamfil
if dump_matching_reads_filename:
raise ValueError(
"dump_matching_reads_filename not supoprted on this Counter"
)
intervals = interval_strategy._get_interval_tuples_by_chr(genome)
gene_intervals = IntervalStrategyGene()._get_interval_tuples_by_chr(genome)
# chr -> [gene_id, strand, [start], [stops]
from mbf_bam import count_reads_unstranded
res = count_reads_unstranded(
bam_filename, bam_index_name, intervals, gene_intervals
)
return res
class _IntervalStrategy:
def get_interval_lengths_by_gene(self, genome):
by_chr = self._get_interval_tuples_by_chr(genome)
length_by_gene = {}
for chr, tups in by_chr.items():
for tup in tups: # stable_id, strand, [starts], [stops]
gene_stable_id = tup[0]
length = 0
for start, stop in zip(tup[2], tup[3]):
length += stop - start
length_by_gene[gene_stable_id] = length
return length_by_gene
def _get_interval_tuples_by_chr(self, genome): # pragma: no cover
raise NotImplementedError()
def get_deps(self):
return []
class IntervalStrategyGenomicRegion(_IntervalStrategy):
"""Used internally by _FastTagCounterGR"""
def __init__(self, gr):
self.gr = gr
self.name = f"GR_{gr.name}"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
if self.gr.genome != genome: # pragma: no cover
raise ValueError("Mismatched genomes")
df = self.gr.df
if not "strand" in df.columns:
df = df.assign(strand=1)
df = df[["chr", "start", "stop", "strand"]]
if df.index.duplicated().any():
raise ValueError("index must be unique")
for tup in df.itertuples():
result[tup.chr].append((str(tup[0]), tup.strand, [tup.start], [tup.stop]))
return result
class IntervalStrategyGene(_IntervalStrategy):
"""Count from TSS to TES"""
name = "gene"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
gene_info = genome.df_genes
for tup in gene_info[["chr", "start", "stop", "strand"]].itertuples():
result[tup.chr].append((tup[0], tup.strand, [tup.start], [tup.stop]))
return result
class IntervalStrategyExon(_IntervalStrategy):
"""count all exons"""
name = "exon"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
for gene in genome.genes.values():
exons = gene.exons_merged
result[gene.chr].append(
(gene.gene_stable_id, gene.strand, list(exons[0]), list(exons[1]))
)
return result
class IntervalStrategyIntron(_IntervalStrategy):
"""count all introns"""
name = "intron"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
for gene in genome.genes.values():
exons = gene.introns_strict
result[gene.chr].append(
(gene.gene_stable_id, gene.strand, list(exons[0]), list(exons[1]))
)
return result
class IntervalStrategyExonSmart(_IntervalStrategy):
"""For protein coding genes: count only in exons of protein-coding transcripts.
For other genes: count all exons"""
name = "exonsmart"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
for g in genome.genes.values():
e = g.exons_protein_coding_merged
if len(e[0]) == 0:
e = g.exons_merged
result[g.chr].append((g.gene_stable_id, g.strand, list(e[0]), list(e[1])))
return result
# Now the actual tag count annotators
class TagCountCommonQC:
def register_qc(self, genes):
if not qc_disabled():
self.register_qc_distribution(genes)
self.register_qc_pca(genes)
# self.register_qc_cummulative(genes)
def register_qc_distribution(self, genes):
output_filename = genes.result_dir / self.qc_folder / "read_distribution.png"
output_filename.parent.mkdir(exist_ok=True)
def plot(
output_filename,
elements,
qc_distribution_scale_y_name=self.qc_distribution_scale_y_name,
):
df = genes.df
df = dp(df).select({x.aligned_lane.name: x.columns[0] for x in elements}).pd
if len(df) == 0:
df = pd.DataFrame({"x": [0], "y": [0], "text": "no data"})
dp(df).p9().add_text("x", "y", "text").render(output_filename).pd
else:
plot_df = dp(df).melt(var_name="sample", value_name="count").pd
plot = dp(plot_df).p9().theme_bw()
print(df)
# df.to_pickle(output_filename + '.pickle')
if ((df > 0).sum(axis=0) > 1).any() and len(df) > 1:
# plot = plot.geom_violin(
# dp.aes(x="sample", y="count"), width=0.5, bw=0.1
# )
pass # oh so slow as of 20201019
if len(plot_df["sample"].unique()) > 1:
plot = plot.annotation_stripes(fill_range=True)
if (plot_df["count"] > 0).any():
# can't have a log boxplot with all nans (log(0))
plot = plot.scale_y_continuous(
trans="log10",
name=qc_distribution_scale_y_name,
breaks=[1, 10, 100, 1000, 10000, 100_000, 1e6, 1e7],
)
return (
plot.add_boxplot(
x="sample", y="count", _width=0.1, _fill=None, _color="blue"
)
.turn_x_axis_labels()
.title("Raw read distribution")
.hide_x_axis_title()
.render_args(limitsize=False)
.render(output_filename, width=0.2 * len(elements) + 1, height=4)
)
return register_qc(
QCCollectingJob(output_filename, plot)
.depends_on(genes.add_annotator(self))
.add(self)
)
def register_qc_pca(self, genes):
output_filename = genes.result_dir / self.qc_folder / "pca.png"
def plot(output_filename, elements):
import sklearn.decomposition as decom
if len(elements) == 1:
xy = np.array([[0], [0]]).transpose()
title = "PCA %s - fake / single sample" % genes.name
else:
pca = decom.PCA(n_components=2, whiten=False)
data = genes.df[[x.columns[0] for x in elements]]
data -= data.min() # min max scaling 0..1
data /= data.max()
data = data[~pd.isnull(data).any(axis=1)] # can' do pca on NAN values
if len(data):
pca.fit(data.T)
xy = pca.transform(data.T)
title = "PCA %s\nExplained variance: x %.2f%%, y %.2f%%" % (
genes.name,
pca.explained_variance_ratio_[0] * 100,
pca.explained_variance_ratio_[1] * 100,
)
else:
xy = np.array(
[[0] * len(elements), [0] * len(elements)]
).transpose()
title = "PCA %s - fake / no rows" % genes.name
plot_df = pd.DataFrame(
{"x": xy[:, 0], "y": xy[:, 1], "label": [x.plot_name for x in elements]}
)
print(plot_df)
(
dp(plot_df)
.p9()
.theme_bw()
.add_scatter("x", "y")
.add_text(
"x",
"y",
"label",
# cool, this can go into an endless loop...
# _adjust_text={
# "expand_points": (2, 2),
# "arrowprops": {"arrowstyle": "->", "color": "red"},
# },
)
.scale_color_many_categories()
.title(title)
.render(output_filename, width=8, height=6)
)
return register_qc(
QCCollectingJob(output_filename, plot)
.depends_on(genes.add_annotator(self))
.add(self)
)
class _FastTagCounter(Annotator, TagCountCommonQC):
def __init__(
self,
aligned_lane,
count_strategy,
interval_strategy,
column_name,
column_desc,
dump_matching_reads_filename=None,
):
if not hasattr(aligned_lane, "get_bam"):
raise ValueError("_FastTagCounter only accepts aligned lanes!")
self.aligned_lane = aligned_lane
self.genome = self.aligned_lane.genome
self.count_strategy = count_strategy
self.interval_strategy = interval_strategy
self.columns = [(column_name % (self.aligned_lane.name,)).strip()]
self.cache_name = (
"FT_%s_%s" % (count_strategy.name, interval_strategy.name)
+ "_"
+ hashlib.md5(self.columns[0].encode("utf-8")).hexdigest()
)
self.column_properties = {self.columns[0]: {"description": column_desc}}
self.vid = aligned_lane.vid
self.cores_needed = count_strategy.cores_needed
self.plot_name = self.aligned_lane.name
self.qc_folder = f"{self.count_strategy.name}_{self.interval_strategy.name}"
self.qc_distribution_scale_y_name = "raw counts"
self.dump_matching_reads_filename = dump_matching_reads_filename
def calc(self, df):
if ppg.inside_ppg():
data = self._data
else:
data = self.calc_data()
lookup = self.count_strategy.extract_lookup(data)
result = []
for gene_stable_id in df["gene_stable_id"]:
result.append(lookup.get(gene_stable_id, 0))
result = np.array(result, dtype=np.float)
return | pd.Series(result) | pandas.Series |
import pandas as pd
import os
import matplotlib.pyplot as plt
import numpy as np
# This only gets run at the end of the season to update the stats
def list_of_specific_files(r_then_file_directory):
files = os.listdir(f"{r_then_file_directory}")
new_file = []
for i in files:
new_file.append(i)
return new_file
def year_season():
year = []
for file_number in range(len(list_of_specific_files(r"C:\Users\sabzu\Documents\Fantasy_EPL"))):
year_in_file = list_of_specific_files(r"C:\Users\sabzu\Documents\Fantasy_EPL")[file_number]
split_year_in_file = year_in_file.split(" ")
for i in split_year_in_file:
if 'xlsx' in i:
y = i.split(".")
year.append(y[0])
return year
def weekly_stats(Df, player_name_from_class):
for year in year_season():
weekly = pd.read_excel(rf"C:\Users\sabzu\Documents\Fantasy_EPL\EPL Season {year}.xlsx",
sheet_name=player_name_from_class)
weekly = weekly.iloc[:, 1:9]
weekly.insert(0, "Year", [int(year)] * 38)
df_cols = weekly.columns
selected_cols = df_cols[2:]
lists_of_weekly_stats = []
column = 2
for col in selected_cols:
weekly_stat = []
row = 0
for i in weekly[col]:
if i == weekly.iloc[0, column]:
weekly_stat.append(i)
row += 1
else:
this = int(i) - weekly.iloc[(row - 1), column]
weekly_stat.append(this)
row += 1
lists_of_weekly_stats.append(weekly_stat)
column += 1
for col in selected_cols:
i = 0
weekly[f"Wkly_{col}"] = lists_of_weekly_stats[i]
lists_of_weekly_stats.pop(0)
weekly = weekly[
['Year', 'Mp', 'Wkly_Wins', 'Wkly_Ties', 'Wkly_Loss', 'Wkly_Points',
'Wkly_Tot_GF', 'Wkly_Tot_GA', 'Wkly_GD']]
weekly["Wkly_GF"] = weekly["Wkly_Tot_GF"]
del weekly["Wkly_Tot_GF"]
weekly["Wkly_GA"] = weekly["Wkly_Tot_GA"]
del weekly["Wkly_Tot_GA"]
if Df.empty:
Df = weekly
else:
Df = pd.concat([Df, weekly])
return Df
def player_total_stats():
player_totals = pd.DataFrame()
cols, cols2 = df_all_time_player_standings.columns[1:7], df_all_time_player_standings.columns[8:12]
cols = [i for i in cols]
cols2 = [n for n in cols2]
cols = cols+cols2
player = df_all_time_player_standings["Player"].unique()
for i in cols[1:]:
totals = []
for p in player:
pdf = df_all_time_player_standings[df_all_time_player_standings[cols[0]] == p]
summ = sum(pdf[i])
totals.append(summ)
player_totals[i] = totals
player_totals.insert(6,"Pts/G", (player_totals["Pts"] / player_totals["Mp"]).round(2))
player_totals.insert(0,"Player", player)
player_totals.sort_values(["Pts", "GD"], ascending=False, inplace=True)
return player_totals
sab_wkly_stats_df = pd.DataFrame()
sabastian_weekly_stats = weekly_stats(sab_wkly_stats_df, "Sabastian")
df_team_standings = pd.DataFrame()
for year in year_season():
team_standings = pd.read_excel(
rf"C:\Users\sabzu\Documents\All EPL Project Files\Seasons\Fantasy Premier League {year}.xlsx",
sheet_name="Standings")
team_standings = team_standings.iloc[:20, 0:9]
team_standings.insert(0, "Year", [int(year)] * 20)
if df_team_standings.empty:
df_team_standings = team_standings
else:
df_team_standings = pd.concat([df_team_standings, team_standings])
df_team_standings = df_team_standings.sort_values(["Pts", "GD"], ascending=False).reset_index(drop=True)
df_team_Xstandings = pd.DataFrame()
for year in year_season():
if int(year) > 2017:
team_standings = pd.read_excel(
rf"C:\Users\sabzu\Documents\All EPL Project Files\Seasons\Fantasy Premier League {year}.xlsx",
sheet_name="Standings")
team_standings = team_standings.iloc[:20, 10:19]
team_standings.insert(0, "Year", [int(year)] * 20)
if df_team_Xstandings.empty:
df_team_Xstandings = team_standings
else:
df_team_Xstandings = | pd.concat([df_team_Xstandings, team_standings]) | pandas.concat |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
codes, uniques = o.factorize()
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig), check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques, check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array(
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp
)
codes, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(
uniques, Index(orig).sort_values(), check_names=False
)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp)
codes, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name="a")
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep="last")
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with pytest.raises(
TypeError,
match=(
r"drop_duplicates\(\) got an " r"unexpected keyword argument"
),
):
idx.drop_duplicates(inplace=True)
else:
expected = Series(
[False] * len(original), index=original.index, name="a"
)
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name="a")
expected = Series(
[False] * len(original) + [True, True], index=idx, name="a"
)
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep="last"), expected)
tm.assert_series_equal(
s.drop_duplicates(keep="last"), s[~np.array(base)]
)
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(
s.drop_duplicates(keep=False), s[~np.array(base)]
)
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame(
{
"a": [1, 1, 1, "one", "one"],
"b": [2, 2, np.nan, np.nan, np.nan],
"c": [3, 3, np.nan, np.nan, "three"],
"d": [1, 2, 3, 4, 4],
"e": [
datetime(2015, 1, 1),
datetime(2015, 1, 1),
datetime(2015, 2, 1),
pd.NaT,
pd.NaT,
],
}
)
for column in df.columns:
for keep in ["first", "last", False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected, dtype=orig.dtype)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if is_object_dtype(o) or (
isinstance(o, Series) and is_object_dtype(o.index)
):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert (
o.memory_usage(index=False) + o.index.memory_usage()
) == o.memory_usage(index=True)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
def test_getitem(self):
for i in self.indexes:
s = pd.Series(i)
assert i[0] == s.iloc[0]
assert i[5] == s.iloc[5]
assert i[-1] == s.iloc[-1]
assert i[-1] == i[9]
with pytest.raises(IndexError):
i[20]
with pytest.raises(IndexError):
s.iloc[20]
@pytest.mark.parametrize("indexer_klass", [list, pd.Index])
@pytest.mark.parametrize(
"indexer",
[
[True] * 10,
[False] * 10,
[True, False, True, True, False, False, True, True, False, True],
],
)
def test_bool_indexing(self, indexer_klass, indexer):
# GH 22533
for idx in self.indexes:
exp_idx = [i for i in range(len(indexer)) if indexer[i]]
tm.assert_index_equal(idx[indexer_klass(indexer)], idx[exp_idx])
s = pd.Series(idx)
tm.assert_series_equal(s[indexer_klass(indexer)], s.iloc[exp_idx])
def test_get_indexer_non_unique_dtype_mismatch(self):
# GH 25459
indexes, missing = pd.Index(["A", "B"]).get_indexer_non_unique(pd.Index([0]))
tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)
tm.assert_numpy_array_equal(np.array([0], dtype=np.int64), missing)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
tm.assert_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(1)
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
tm.assert_equal(np.transpose(obj), obj)
with pytest.raises(ValueError, match=self.errmsg):
np.transpose(obj, axes=1)
class TestNoNewAttributesMixin:
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
with pytest.raises(AttributeError):
t.b = "test"
assert not hasattr(t, "b")
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
def test_iterable_object_and_category(self, typ, method, dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == "M"
assert res == exp
@pytest.mark.parametrize(
"array, expected_type, dtype",
[
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(pd.Categorical(["a", "b"]), pd.Categorical, "category"),
(
pd.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
pd.PeriodIndex([2018, 2019], freq="A"),
pd.core.arrays.PeriodArray,
pd.core.dtypes.dtypes.PeriodDtype("A-DEC"),
),
(
pd.IntervalIndex.from_breaks([0, 1, 2]),
pd.core.arrays.IntervalArray,
"interval",
),
# This test is currently failing for datetime64[ns] and timedelta64[ns].
# The NumPy type system is sufficient for representing these types, so
# we just use NumPy for Series / DataFrame columns of these types (so
# we get consolidation and so on).
# However, DatetimeIndex and TimedeltaIndex use the DateLikeArray
# abstraction to for code reuse.
# At the moment, we've judged that allowing this test to fail is more
# practical that overriding Series._values to special case
# Series[M8[ns]] and Series[m8[ns]] to return a DateLikeArray.
pytest.param(
pd.DatetimeIndex(["2017", "2018"]),
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values", strict=True)],
),
pytest.param(
pd.TimedeltaIndex([10 ** 10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)],
),
],
)
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize(
"array, expected",
[
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(["0", "1"]), np.array(["0", "1"], dtype=object)),
(pd.Categorical(["a", "a"]), np.array([0, 0], dtype="int8")),
(
pd.DatetimeIndex(["2017-01-01T00:00:00"]),
np.array(["2017-01-01T00:00:00"], dtype="M8[ns]"),
),
(
pd.DatetimeIndex(["2017-01-01T00:00:00"], tz="US/Eastern"),
np.array(["2017-01-01T05:00:00"], dtype="M8[ns]"),
),
(pd.TimedeltaIndex([10 ** 10]), np.array([10 ** 10], dtype="m8[ns]")),
(
pd.PeriodIndex(["2017", "2018"], freq="D"),
np.array([17167, 17532], dtype=np.int64),
),
],
)
def test_ndarray_values(array, expected):
l_values = pd.Series(array)._ndarray_values
r_values = pd.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
ser = pd.Series(arr)
result = ser.array
expected = PandasArray(arr)
tm.assert_extension_array_equal(result, expected)
def test_numpy_array_all_dtypes(any_numpy_dtype):
ser = pd.Series(dtype=any_numpy_dtype)
result = ser.array
if | is_datetime64_dtype(any_numpy_dtype) | pandas.core.dtypes.common.is_datetime64_dtype |
import time
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from pathlib import Path
import context
from mhealth.utils.plotter_helper import save_figure
from mhealth.utils.commons import create_progress_bar
# Used if command-line option --parameters is not provided.
DEFAULT_PARAMETERS = ["Temperatur", "Herzfrequenz", "Atemfrequenz"]
# Data sources included in HF-AF_25052021.csv.
VALIDATION_DATA_SOURCES = ["WELCHALLYN_MONITOR", "PHILIPS_GATEWAY"]
# Half-ranges relevant for the validation: x +/- delta
DELTAS = {
"Atemfrequenz": 3, # ±3bpm
"Herzfrequenz": 10, # ±10bpm
"Temperatur": 0.5 # ±0.5°C
}
# Half-range of the for the timestamp delta, in minutes.
DELTA_TS = 2.5 # ±2.5min
# Devices are identified by the bed number they are used with.
# In case of device breakdown (or other problems), some devices
# were replaced by a device of another room. The below lookup
# specifies which the bed ids (devices) must be renamed, as well
# as the time range, between which the lookup applies.
DEVICE_REPLACEMENT_LOOKUP = {
# Alias True From To
"2653F" : ("2655F", "2021-05-14 12:00:00+02:00", None),
"2652F" : ("2656FL", "2021-05-18 00:00:00+02:00", None),
"2661TL" : ("2661FL", "2021-05-20 00:00:00+02:00", None),
"2664T" : ("2664F", "2021-05-12 00:00:00+02:00", None),
"2665T" : ("2665F", None, "2021-05-19 10:30:00+02:00"),
}
# Expected value ranges per vital parameter.
VALUE_RANGES = {
"Atemfrequenz": [0, 35],
"Herzfrequenz": [30, 130],
"Temperatur": [35, 40],
}
BIN_WIDTHS = {
"Atemfrequenz": 0.5,
"Herzfrequenz": 1,
"Temperatur": 0.01,
}
BIN_WIDTHS_VALID = {
"Atemfrequenz": 1,
"Herzfrequenz": 2,
"Temperatur": 0.1,
}
def tic():
return time.time()
def toc(label, start):
diff = time.time()-start
print(label + (": %.3f" % diff))
def check_dir(path):
if not path.is_dir():
msg = "Requested folder does not exist: %s"
raise FileNotFoundError(msg % path)
def ensure_dir(path, exist_ok=True):
path = Path(path)
if not path.is_dir():
path.mkdir(parents=True, exist_ok=exist_ok)
return path.is_dir()
def apply_replacement_lookup(df):
print("Applying device replacements...")
def dt_to_str(dt):
return "--" if dt is None else dt.strftime("%m.%d.%y %H:%M")
for id_alias, replace_data in DEVICE_REPLACEMENT_LOOKUP.items():
id_true, repl_start, repl_stop = replace_data
repl_start = pd.to_datetime(repl_start)
repl_stop = pd.to_datetime(repl_stop)
mask = ((df["Bettenstellplatz"]==id_alias) &
((repl_start is None) or df["Timestamp"]>=repl_start) &
((repl_stop is None) or df["Timestamp"]<=repl_stop))
df.loc[mask, "Bettenstellplatz"] = id_true
print("%-6s => %-6s: %6d affected values in time range (%s, %s)"
% (id_alias, id_true, mask.sum(),
dt_to_str(repl_start), dt_to_str(repl_stop)))
print()
def read_validation_data(data_dir):
def no_whitespace(s):
return s.replace(" ", "")
def fix_time(s):
return s.replace(".", ":")
def form_timestamp(df, col_date, col_time):
timestamp = df[col_date] + " " + df[col_time]
timestamp = pd.to_datetime(timestamp, dayfirst=True)
timestamp = timestamp.dt.tz_localize("Europe/Zurich").copy()
timestamp[(df[col_date]=="") | (df[col_time]=="")] = None
return timestamp
def format_manual(df, timestamp, parameter):
df_ret = df[["Bettenstellplatz", parameter,
"Bemerkungen", "Abweichung_Trageort"]].copy()
df_ret = df_ret.rename({parameter: "Wert"}, axis=1)
icol = df_ret.columns.get_loc("Wert")
df_ret.insert(loc=icol, column="Vitalparameter", value=parameter)
df_ret.insert(loc=0, column="Timestamp", value=timestamp)
df_ret = df_ret[~df_ret["Wert"].isna()].copy()
return df_ret
def read_station_data(valid_dir):
file_path = valid_dir/"HF-AF_25052021.csv"
df = pd.read_csv(file_path,
converters={"Signatur": str.strip,
"Bettenstellplatz": str.strip})
df = df[df["Signatur"].isin(VALIDATION_DATA_SOURCES)]
timestamp = form_timestamp(df=df, col_date="Datum", col_time="Zeit")
df.insert(loc=0, column="Timestamp", value=timestamp)
df = df.drop(["Datum", "Zeit"], axis=1)
# Transform to long format.
df = df.melt(id_vars=["Timestamp", "Bettenstellplatz", "Signatur"],
value_vars=["Herzfrequenz", "Atemfrequenz", "Temperatur"],
var_name="Vitalparameter", value_name="Wert")
df = df[~df["Wert"].isna()].copy()
df["Bemerkungen"] = ""
df["Abweichung_Trageort"] = ""
return df
def read_manual_data(valid_dir):
file_path = valid_dir/"Validierung_Daten_manuell_Mai2021_alle.csv"
df = pd.read_csv(file_path,
converters={"Bettenstellplatz": no_whitespace,
"Zeit_AF": fix_time,
"Zeit_HF": fix_time,
"Bemerkungen": str.strip,
"Abweichung_Trageort": str.strip})
# Atemfrequenz
ts = form_timestamp(df=df, col_date="Datum", col_time="Zeit_AF")
df_a = format_manual(df=df, timestamp=ts, parameter="Atemfrequenz")
# Herzfrequenz
ts = form_timestamp(df=df, col_date="Datum", col_time="Zeit_HF")
df_h = format_manual(df=df, timestamp=ts, parameter="Herzfrequenz")
# Temperatur (Zeit_Temp, use Zeit_HF is missing!)
ts = form_timestamp(df=df, col_date="Datum", col_time="Zeit_HF")
df_t = format_manual(df=df, timestamp=ts, parameter="Temperatur")
df = pd.concat((df_a, df_h, df_t), axis=0)
df["Signatur"] = "MANUELL"
return df
print("Reading Validation data...")
valid_dir = data_dir/"original"/"validation"
check_dir(valid_dir)
df_station = read_station_data(valid_dir=valid_dir)
df_manual = read_manual_data(valid_dir=valid_dir)
df_valid = pd.concat((df_station, df_manual), axis=0)
df_valid = df_valid.sort_values(["Bettenstellplatz", "Timestamp"])
return df_valid
def read_baslerband_data(data_dir, n_max=None):
def read_bb_file(path):
# Sample path:
# ../2021-05-25/2617_FL/basler_band_DB_B4_2C_E5_CC_45_activity_file.csv
bed_id = path.parent.name.replace("_", "")
if bed_id == "2668":
bed_id = "2668E"
device_id = path.stem
device_id = device_id.replace("basler_band_", "")
device_id = device_id.replace("_activity_file", "")
df = pd.read_csv(path, index_col=[0], parse_dates=[0], sep=";")
df.index.name = "Timestamp"
# Filter by quality as specified
df = df[df["wearing"]==4]
df = df[["resp_filtered", "hrm_filtered",]]
df = df.rename({"resp_filtered": "Atemfrequenz",
"hrm_filtered": "Herzfrequenz"}, axis=1)
df["Bettenstellplatz"] = bed_id
df["DeviceID"] = device_id
df["Signatur"] = "BASLER_BAND"
df = df.reset_index(drop=False)
return df
print("Reading Basler Band data...")
bb_dir = data_dir/"original"/"basler_band"
check_dir(bb_dir)
files = bb_dir.glob("**/basler_band*activity_file.csv")
files = sorted(files)
dfs = []
progress = create_progress_bar(size=len(files),
label="Processing...")
for i, path in enumerate(files):
if i>=n_max:
break
progress.update(i)
df = read_bb_file(path=path)
dfs.append(df)
progress.finish()
df = | pd.concat(dfs, axis=0) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.