prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import warnings
import numpy as np
import pandas as pd
from lhorizon.constants import LUNAR_RADIUS
from lhorizon.lhorizon_utils import make_raveled_meshgrid
from lhorizon.solutions import make_ray_sphere_lambdas
from lhorizon.target import Targeter
from lhorizon.tests.data.test_cases import TEST_CASES
from lhorizon.kernels import load_metakernel
load_metakernel()
lunar_solutions = make_ray_sphere_lambdas(LUNAR_RADIUS)
def test_find_targets_long():
path = TEST_CASES["TRANQUILITY_2021"]["data_path"]
targeter = Targeter(
| pd.read_csv(path + "_CENTER.csv") | pandas.read_csv |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft & 1
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & Index(right)
tm.assert_series_equal(result, expected)
result = left & Series(right)
tm.assert_series_equal(result, expected)
expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
tm.assert_series_equal(result, expected)
result = left | Index(right)
tm.assert_series_equal(result, expected)
result = left | Series(right)
tm.assert_series_equal(result, expected)
expected = Series([False, True, True, True, True])
result = left ^ right
tm.assert_series_equal(result, expected)
result = left ^ np.array(right)
tm.assert_series_equal(result, expected)
result = left ^ Index(right)
tm.assert_series_equal(result, expected)
result = left ^ Series(right)
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self):
# GH#9016: support bitwise op for integer types
# with non-matching indexes, logical operators will cast to object
# before operating
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_tft = Series([True, False, True], index=index)
s_tff = Series([True, False, False], index=index)
s_0123 = Series(range(4), dtype="int64")
# s_0123 will be all false now because of reindexing like s_tft
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_tft & s_0123
tm.assert_series_equal(result, expected)
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_0123 & s_tft
tm.assert_series_equal(result, expected)
s_a0b1c0 = Series([1], list("b"))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list("abc"))
tm.assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list("abc"))
tm.assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
tm.assert_series_equal(result, expected)
def test_scalar_na_logical_ops_corners_aligns(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
d = DataFrame({"A": s})
expected = DataFrame(False, index=range(9), columns=["A"] + list(range(9)))
result = s & d
tm.assert_frame_equal(result, expected)
result = d & s
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", [operator.and_, operator.or_, operator.xor])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = | Index([1, 0, 1, 0]) | pandas.Index |
"""Move Mouse Pointer."""
"""
Copyright (c) 2018 Intel Corporation.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit person to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import cv2
import pandas as pd
import numpy as np
from sys import exit
from datetime import datetime
import time
from model_base import ModelBase
from gaze_estimation import GazeEstimation
from mouse_controller import MouseController
from MediaReader import MediaReader
from signal import SIGINT, signal
from argparse import ArgumentParser
from sys import platform
import os
import math
# Get correct CPU extension
if platform == "linux" or platform == "linux2":
CPU_EXTENSION = "/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension_sse4.so"
elif platform == "darwin":
CPU_EXTENSION = "/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension.dylib"
elif platform == "win32":
CPU_EXTENSION = None
else:
print("Unsupported OS.")
exit(1)
model_names = {'fd':'facial detection', 'fl': 'landmark detection', 'hp': 'head pose', 'ge':'gaze estimation'}
def build_argparser():
"""
Parse command line arguments.
:return: command line arguments
"""
parser = ArgumentParser()
parser.add_argument("-i", "--input", required=True, type=str,
help="Path to input image or video file. 0 for webcam.")
parser.add_argument("-p", "--precisions", required=False, type=str, default='FP16',
help="Set model precisions as a comma-separated list without spaces"
", e.g. FP32,FP16,FP32-INT8 (FP16 by default)")
parser.add_argument("-fdm", "--fd_model", required=False, type=str,
help="Path to directory for a trained Face Detection model."
" This directory path must include the model's precision because"
"face-detection-adas-binary-0001 has only one precision, FP32-INT1."
"(../models/intel/face-detection-adas-binary-0001/FP32-INT1/face-detection-adas-binary-0001"
" by default)",
default="../models/intel/face-detection-adas-binary-0001/FP32-INT1/face-detection-adas-binary-0001")
parser.add_argument("-flm", "--fl_model", required=False, type=str,
help="Path to directory for a trained Facial Landmarks model."
" The directory must have the model precisions as subdirectories."
"../models/intel/landmarks-regression-retail-0009 by default)",
default="../models/intel/landmarks-regression-retail-0009")
parser.add_argument("-hpm", "--hp_model", required=False, type=str,
help="Path to directory for a trained Head Pose model."
" The directory must have the model precisions as subdirectories."
"(../models/intel/head-pose-estimation-adas-0001 by default)",
default="../models/intel/head-pose-estimation-adas-0001")
parser.add_argument("-gem", "--ge_model", required=False, type=str,
help="Path to directory for a trained Gaze Detection model."
" The directory must have the model precisions as subdirectories."
"(../models/intel/gaze-estimation-adas-0002 by default)",
default="../models/intel/gaze-estimation-adas-0002")
parser.add_argument("-l", "--cpu_extension", required=False, type=str,
default=None,
help="MKLDNN (CPU)-targeted custom layers."
" Absolute path to a shared library with the"
" kernels impl.")
parser.add_argument("-d", "--device", type=str, required=False, default="CPU",
help="Specify the target device to infer on: "
"CPU, GPU, FPGA or MYRIAD is acceptable. The program "
"will look for a suitable plugin for the device "
"specified (CPU by default)")
parser.add_argument("-ct", "--conf_threshold", type=float, default=0.3, required=False,
help="Confidence threshold for detections filtering"
" (0.3 by default)")
parser.add_argument("-bm", "--benchmark", required=False, type=lambda s: s.lower() in ['true', 't', 'yes', '1'],
default=True, help="Show benchmark data? True|False (True by default)")
parser.add_argument("-nf", "--num_frames", required=False, type=int, default=100,
help="The number of frames to run. Use this to limit running time, "
"especially if using webcam. (100 by default)")
parser.add_argument("-sv", "--showvideo", required=False, type=lambda s: s.lower() in ['true', 't', 'yes', '1'],
default=True, help="Show video while running? True|False. (True by default)")
parser.add_argument("-async", "--async_inference", required=False, type=lambda s: s.lower() in ['true', 't', 'yes', '1'],
default=True, help="If True, run asynchronous inference where possible. "
"If false, run synchronous inference. True|False. (True by default)")
parser.add_argument("-v", "--visualize", required=False, type=lambda s: s.lower() in ['true', 't', 'yes', '1'],
default=True, help="If True, visualize the outputs from each model. "
"If -v is True then the video will be shown regardless of -sv. "
"If false, do not show outputs. True|False. (True by default)")
return parser
def draw_box(image, start_point, end_point):
box_col = (0,255,0) #GREEN
thickness = 4
image = cv2.rectangle(image, start_point, end_point, box_col, thickness)
return image
def scale_dims(shape, x, y):
width = shape[1]
height= shape[0]
x = int(x*width)
y = int(y*height)
return x, y
#build_camera_matrix and draw_axes code from https://knowledge.udacity.com/questions/171017, thanks to <NAME>
def build_camera_matrix(center_of_face, focal_length):
cx = int(center_of_face[0])
cy = int(center_of_face[1])
camera_matrix = np.zeros((3, 3), dtype='float32')
camera_matrix[0][0] = focal_length
camera_matrix[0][2] = cx
camera_matrix[1][1] = focal_length
camera_matrix[1][2] = cy
camera_matrix[2][2] = 1
return camera_matrix
def draw_axes(frame, center_of_face, yaw, pitch, roll, scale, focal_length):
yaw *= np.pi / 180.0
pitch *= np.pi / 180.0
roll *= np.pi / 180.0
cx = int(center_of_face[0])
cy = int(center_of_face[1])
Rx = np.array([[1, 0, 0],
[0, math.cos(pitch), -math.sin(pitch)],
[0, math.sin(pitch), math.cos(pitch)]])
Ry = np.array([[math.cos(yaw), 0, -math.sin(yaw)],
[0, 1, 0],
[math.sin(yaw), 0, math.cos(yaw)]])
Rz = np.array([[math.cos(roll), -math.sin(roll), 0],
[math.sin(roll), math.cos(roll), 0],
[0, 0, 1]])
# R = np.dot(Rz, Ry, Rx)
# ref: https://www.learnopencv.com/rotation-matrix-to-euler-angles/
# R = np.dot(Rz, np.dot(Ry, Rx))
R = Rz @ Ry @ Rx
# print(R)
camera_matrix = build_camera_matrix(center_of_face, focal_length)
xaxis = np.array(([1 * scale, 0, 0]), dtype='float32').reshape(3, 1)
yaxis = np.array(([0, -1 * scale, 0]), dtype='float32').reshape(3, 1)
zaxis = np.array(([0, 0, -1 * scale]), dtype='float32').reshape(3, 1)
zaxis1 = np.array(([0, 0, 1 * scale]), dtype='float32').reshape(3, 1)
o = np.array(([0, 0, 0]), dtype='float32').reshape(3, 1)
o[2] = camera_matrix[0][0]
xaxis = np.dot(R, xaxis) + o
yaxis = np.dot(R, yaxis) + o
zaxis = np.dot(R, zaxis) + o
zaxis1 = np.dot(R, zaxis1) + o
xp2 = (xaxis[0] / xaxis[2] * camera_matrix[0][0]) + cx
yp2 = (xaxis[1] / xaxis[2] * camera_matrix[1][1]) + cy
p2 = (int(xp2), int(yp2))
cv2.line(frame, (cx, cy), p2, (0, 0, 255), 2)
xp2 = (yaxis[0] / yaxis[2] * camera_matrix[0][0]) + cx
yp2 = (yaxis[1] / yaxis[2] * camera_matrix[1][1]) + cy
p2 = (int(xp2), int(yp2))
cv2.line(frame, (cx, cy), p2, (0, 255, 0), 2)
xp1 = (zaxis1[0] / zaxis1[2] * camera_matrix[0][0]) + cx
yp1 = (zaxis1[1] / zaxis1[2] * camera_matrix[1][1]) + cy
p1 = (int(xp1), int(yp1))
xp2 = (zaxis[0] / zaxis[2] * camera_matrix[0][0]) + cx
yp2 = (zaxis[1] / zaxis[2] * camera_matrix[1][1]) + cy
p2 = (int(xp2), int(yp2))
cv2.line(frame, p1, p2, (255, 0, 0), 2)
cv2.circle(frame, p2, 3, (255, 0, 0), 2)
return frame
#scale the landmarks to the whole frame size
def scale_landmarks(landmarks, image_shape, orig, image, draw):
color = (0,0,255) #RED
thickness = cv2.FILLED
num_lm = len(landmarks)
orig_x = orig[0]
orig_y = orig[1]
scaled_landmarks = []
for point in range(0, num_lm, 2):
x, y = scale_dims(image_shape, landmarks[point], landmarks[point+1])
x_scaled = orig_x + x
y_scaled = orig_y + y
if draw:
image = cv2.circle(image, (x_scaled, y_scaled), 2, color, thickness)
scaled_landmarks.append([x_scaled, y_scaled])
return scaled_landmarks, image
def process_model_names(name):
new_path = name.replace("\\","/")
dir, new_name = new_path.rsplit('/', 1)
if name.find(dir) == -1:
dir, _ = name.rsplit('\\',1)
return dir, new_name
def run_pipeline(network, input_image, duration):
# Detect faces
#Preprocess the input
start_time = time.perf_counter()
p_image = network.preprocess_input(input_image)
duration['input'] += time.perf_counter() - start_time
#print("duration ", duration['input']*100000)
#Infer the faces
start_time = time.perf_counter()
network.sync_infer(p_image)
duration['infer'] += time.perf_counter() - start_time
#Get the outputs
start_time = time.perf_counter()
output = network.preprocess_output()
duration['output'] += time.perf_counter() - start_time
return duration, output
def output_bm(args, t_df, r_df, frames):
t_df=t_df*1000 #Convert to (ms)
avg_df = t_df/frames
now = datetime.now()
print("OpenVINO Results")
print ("Current date and time: ",now.strftime("%Y-%m-%d %H:%M:%S"))
print("Platform: {}".format(platform))
print("Device: {}".format(args.device))
print("Asynchronous Inference: {}".format(args.async_inference))
print("Precision: {}".format(args.precisions))
print("Total frames: {}".format(frames))
print("Total runtimes(s):")
print(r_df)
print("\nTotal Durations(ms) per phase:")
print(t_df)
print("\nDuration(ms)/Frames per phase:")
print(avg_df)
print("\n*********************************************************************************\n\n\n")
def infer_on_stream(args):
"""
Initialize the inference network, stream video to network,
and output stats and video.
"""
try:
######### Setup fonts for text on image ########################
font = cv2.FONT_HERSHEY_SIMPLEX
org = (10,40)
fontScale = .5
# Blue color in BGR
color = (255, 0, 0)
# Line thickness of 1 px
thickness = 1
text = ""
#######################################
fd_dir, fd_model = process_model_names(args.fd_model)
_, fl_model = process_model_names(args.fl_model)
_, hp_model = process_model_names(args.hp_model)
_, ge_model = process_model_names(args.ge_model)
# Initialize the classes
fd_infer_network = ModelBase(name=model_names['fd'], dev=args.device, ext=args.cpu_extension, threshold=args.conf_threshold)
fl_infer_network = ModelBase(name = model_names['fl'], dev=args.device, ext=args.cpu_extension)
hp_infer_network = ModelBase(name = model_names['hp'], dev=args.device, ext=args.cpu_extension)
ge_infer_network = GazeEstimation(name = model_names['ge'],dev=args.device, ext=args.cpu_extension)
precisions=args.precisions.split(",")
columns=['load','input','infer','output']
model_indeces=[fd_infer_network.short_name, fl_infer_network.short_name, hp_infer_network.short_name, ge_infer_network.short_name]
iterables = [model_indeces,precisions]
index = pd.MultiIndex.from_product(iterables, names=['Model','Precision'])
total_df = pd.DataFrame(np.zeros((len(model_indeces)*len(precisions),len(columns)), dtype=float),index=index, columns=columns)
flip=False
cap = MediaReader(args.input)
if cap.sourcetype() == MediaReader.CAMSOURCE:
flip = True
frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
mc = MouseController('high', 'fast')
screenWidth, screenHeight = mc.monitor()
if args.showvideo:
cv2.startWindowThread()
cv2.namedWindow("Out")
if platform == "win32":
cv2.moveWindow("Out", int((screenWidth-frame_width)/2), int((screenHeight-frame_height)/2))
else:
cv2.moveWindow("Out", int((screenWidth-frame_width)/2), int((screenHeight+frame_height)/2))
# Process frames until the video ends, or process is exited
### TODO: Load the models through `infer_network` ###
print("Video being shown: ", str(args.showvideo))
#Dictionary to store runtimes for each precision
runtime={}
#Camera parameters for drawing pose axes
focal_length = 950.0
scale = 50
for precision in precisions:
print("Beginning test for precision {}.".format(precision))
mc.put(int(screenWidth/2), int(screenHeight/2)) #Place the mouse cursor in the center of the screen
frame_count=0
runtime_start = time.perf_counter()
fl_dir = os.path.join(args.fl_model, precision)
hp_dir = os.path.join(args.hp_model, precision)
ge_dir = os.path.join(args.ge_model, precision)
total_df.loc(axis=0)[fd_infer_network.short_name,precision]['load'] = fd_infer_network.load_model(dir=fd_dir, name=fd_model)
total_df.loc(axis=0)[fl_infer_network.short_name,precision]['load'] = fl_infer_network.load_model(dir=fl_dir, name=fl_model)
total_df.loc(axis=0)[hp_infer_network.short_name,precision]['load'] = hp_infer_network.load_model(dir=hp_dir, name=hp_model)
total_df.loc(axis=0)[ge_infer_network.short_name,precision]['load'] = ge_infer_network.load_model(dir=ge_dir, name=ge_model)
too_many = False
not_enough = False
single = False
gaze = [[0, 0, 0]]
cap.set(property=cv2.CAP_PROP_POS_FRAMES, val=0)
while cap.isOpened():
if args.num_frames!=None and frame_count>=args.num_frames:
break
# Read the next frame
flag, frame = cap.read()
if not flag:
break
#Flip the frame is the input is from the web cam
if flip: frame=cv2.flip(frame, 1)
frame_count+=1
frame = cv2.putText(frame, text, org, font, fontScale, color, thickness, cv2.LINE_AA)
# Break if escape key pressed
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Detect faces
total_df.loc(axis=0)[fd_infer_network.short_name,precision], outputs = run_pipeline(fd_infer_network, frame, total_df.loc(axis=0)[fd_infer_network.short_name,precision])
coords = [[x_min, y_min, x_max, y_max] for _, _, conf, x_min, y_min, x_max, y_max in outputs[fd_infer_network.output_name][0][0] if conf>=args.conf_threshold]
num_detections = len(coords)
### Execute the pipeline only if one face is in the frame
if num_detections == 1:
too_many = False
not_enough = False
if not single:
text="I see you. Move the mouse cursor with your eyes."
print(text)
single=True
x_min, y_min, x_max, y_max = coords[0]
x_min, y_min = scale_dims(frame.shape, x_min, y_min)
x_max, y_max = scale_dims(frame.shape, x_max, y_max)
face_frame = frame[y_min:y_max, x_min:x_max]
if args.async_inference: #Run asynchronous inference
#facial landmark detection preprocess the input
start_time = time.perf_counter()
frame_for_input = fl_infer_network.preprocess_input(face_frame)
total_df.loc(axis=0)[fl_infer_network.short_name,precision]['input'] += time.perf_counter() - start_time
#Run landmarks inference asynchronously
# do not measure time, not relevant since it is asynchronous
fl_infer_network.predict(frame_for_input)
#Send cropped frame to head pose estimation
start_time = time.perf_counter()
frame_for_input = hp_infer_network.preprocess_input(face_frame)
total_df.loc(axis=0)[hp_infer_network.short_name,precision]['input'] += time.perf_counter() - start_time
#Head pose infer
hp_infer_network.predict(frame_for_input)
#Wait for async inferences to complete
if fl_infer_network.wait()==0:
start_time = time.perf_counter()
outputs = fl_infer_network.preprocess_output()
scaled_lm, frame = scale_landmarks(landmarks=outputs[fl_infer_network.output_name][0], image_shape=face_frame.shape, orig=(x_min, y_min),image=frame,draw=args.visualize)
total_df.loc(axis=0)[fl_infer_network.short_name,precision]['output'] += time.perf_counter() - start_time
if hp_infer_network.wait()==0:
start_time = time.perf_counter()
outputs = hp_infer_network.preprocess_output()
hp_angles = [outputs['angle_y_fc'][0], outputs['angle_p_fc'][0], outputs['angle_r_fc'][0]]
total_df.loc(axis=0)[hp_infer_network.short_name,precision]['output'] += time.perf_counter() - start_time
else: #Run synchronous inference
#facial landmark detection preprocess the input
total_df.loc(axis=0)[fl_infer_network.short_name,precision], outputs = run_pipeline(fl_infer_network, face_frame, total_df.loc(axis=0)[fl_infer_network.short_name,precision])
scaled_lm, frame = scale_landmarks(landmarks=outputs[fl_infer_network.output_name][0], image_shape=face_frame.shape, orig=(x_min, y_min),image=frame,draw=args.visualize)
#Send cropped frame to head pose estimation
total_df.loc(axis=0)[hp_infer_network.short_name, precision], outputs = run_pipeline(hp_infer_network, face_frame, total_df.loc(axis=0)[hp_infer_network.short_name,precision])
hp_angles = [outputs['angle_y_fc'][0], outputs['angle_p_fc'][0], outputs['angle_r_fc'][0]]
input_duration, predict_duration, output_duration, gaze = ge_infer_network.sync_infer(face_image=frame, landmarks=scaled_lm, head_pose_angles=[hp_angles])
total_df.loc(axis=0)[ge_infer_network.short_name,precision]['input'] += input_duration
total_df.loc(axis=0)[ge_infer_network.short_name,precision]['infer'] += predict_duration
total_df.loc(axis=0)[ge_infer_network.short_name,precision]['output'] += output_duration
if args.visualize:
#draw box around detected face
frame = draw_box(frame,(x_min, y_min), (x_max, y_max))
center_of_face = (x_min + face_frame.shape[1] / 2, y_min + face_frame.shape[0] / 2, 0)
#draw head pose axes
frame = draw_axes(frame, center_of_face, hp_angles[0], hp_angles[1], hp_angles[2], scale, focal_length)
#left eye gaze
frame = draw_axes(frame, scaled_lm[0], gaze[0][0], gaze[0][1], gaze[0][2], scale, focal_length)
#draw gaze vectors on right eye
frame = draw_axes(frame, scaled_lm[1], gaze[0][0], gaze[0][1], gaze[0][2], scale, focal_length)
#Move the mouse cursor
mc.move(gaze[0][0], gaze[0][1])
elif num_detections > 1:
single = False
not_enough=False
if not too_many:
text="Too many faces confuse me. I need to see only one face."
print(text)
too_many=True
else:
too_many = False
single=False
if not not_enough:
text="Is there anybody out there?"
print(text)
not_enough=True
if args.showvideo or args.visualize: cv2.imshow("Out", frame)
## End While Loop
runtime[precision] = time.perf_counter() - runtime_start
# Release the capture and destroy any OpenCV windows
print("Completed run for precision {}.".format(precision))
if args.benchmark:
rt_df = pd.DataFrame.from_dict(runtime, orient='index', columns=["Total runtime"])
rt_df['FPS'] = frame_count/rt_df["Total runtime"]
### End For Loop
cap.release()
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
#Collect Stats
#Setup dataframe
if args.benchmark:
output_bm(args, total_df, rt_df, frame_count)
except KeyboardInterrupt:
#Collect Stats
print("Detected keyboard interrupt")
if args.benchmark:
rt_df = | pd.DataFrame.from_dict(runtime, orient='index', columns=["Total runtime"]) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, University of Copenhagen"
__email__ = "<EMAIL>"
__license__ = "MIT"
import json
import sys
import click
import pandas as pd
from scipy.stats.distributions import chi2
ANCESTRIES = ["ALL", "ANA", "CHG", "WHG", "EHG"]
@click.command()
@click.option("--data", "data_tsv", metavar="<file>", help="SNP data", type=click.Path(exists=True), required=True)
@click.option("--columns", metavar="<col,col>", help="rsID columns", required=True)
@click.option("--info", "info_tsv", metavar="<file>", help="INFO scores", type=click.Path(exists=True), required=True)
@click.option("--dataset", metavar="<string>", help="Name of the dataset", required=True)
@click.option("--population", metavar="<string>", help="Name of the population", required=True)
@click.option("--mode", metavar="<string>", help="Clues mode", required=True)
@click.option("--ancestry", metavar="<string>", help="Ancestral path", type=click.Choice(ANCESTRIES), required=True)
@click.option("--output", metavar="<file>", type=click.Path(writable=True), help="Output filename", required=True)
def clues_report(data_tsv, columns, info_tsv, dataset, population, mode, ancestry, output):
"""
Generate a CLUES report
"""
# get the list of SNPs to load
data = pd.read_table(data_tsv)
info = | pd.read_table(info_tsv) | pandas.read_table |
import numpy as np
import pandas as pd
import scipy.sparse as sps
import matplotlib.pyplot as plt
from mlhub.pkg import mlask, mlcat
from IPython.display import display
from collections import Counter
from relm.mechanisms import LaplaceMechanism
mlcat("Differentially Private Release Mechanism", """\
This demo is based on the Jupyter Notebook from the RelM
package on github.
RelM can be readily utilised for the differentially private
release of data. In our demo database the records indicate the age
group of each patient who received a COVID-19 test on 9 March 2020.
Each patient is classified as belonging to one of eight age groups:
0-9, 10-19, 20-29, 30-39, 40-49, 50-59, 60-69, and 70+. One common
way to summarise this kind of data is with a histogram. That is, to
report the number of patients that were classified as belonging to
each age group.
For this demonstration we will create a histogram for the actual data
and then a histogram for differentially private data.
The data is first loaded from a csv file. It simply consists of two
columns, the first is the date and the scond is the age group.""")
# Read the raw data.
data = pd.read_csv("pcr_testing_age_group_2020-03-09.csv")
mlask(True, True)
# Compute the exact query responses.
exact_counts = data["age_group"].value_counts().sort_index()
values = exact_counts.values
mlcat("Data Sample", """\
Here's a random sample of some of the records:
""")
print(data.sample(10))
mlask(True, True)
mlcat("Laplace Mechanism", """\
The Laplace mechanism can be used to produce a differentially private
histogram that summarises the data without compromising the privacy of
the patients whose data comprise the database. To do so, Laplace noise
is added to the count for each age group and the noisy counts are
released instead of the exact counts.
The noise that is added results in perturbed values that are real
numbers rather than integers, and so if the results we are expecting
are integers, the results can be rounded without loss of privacy. We
do that here for our peturbed values.""")
# Create a differentially private release mechanism
epsilon = 0.1
mechanism = LaplaceMechanism(epsilon=epsilon, sensitivity=1.0)
perturbed_counts = mechanism.release(values=values.astype(np.float))
perturbed_counts = perturbed_counts.astype(np.int64)
mlask(True, True)
mlcat("Choosing Epsilon", f"""\
The magnitude of the differences between the exact counts
and perturbed counts depends only on the value of the privacy parameter,
epsilon. Smaller values of epsilon yield larger perturbations. Larger
perturbations yeild lower utility.
To understand this compare the actual and peturbed values below. If
the value of epsilon is not too small, then we expect that the two
histograms will look similar.
For our purposes we have chosen epsilon as {epsilon} resulting in the
following peturbation.
""")
# Extract the set of possible age groups.
age_groups = np.sort(data["age_group"].unique())
# Reformat the age group names for nicer display.
age_ranges = np.array([a.lstrip("AgeGroup_") for a in age_groups])
# Create a dataframe with both exact and perturbed counts.
column_names = ["Age Group", "Exact Counts", "Perturbed Counts"]
column_values = [age_ranges, values, perturbed_counts]
table = {k: v for (k, v) in zip(column_names, column_values)}
df = pd.DataFrame(table)
# Display as a table.
print(df)
mlask(True, True)
mlcat("Visualising the Perturbations", """\
The two histograms show that the peturbed values remain consistent
with the true values, whilst ensuring privacy.
""")
# Plot the two histograms as bar graphs.
df.plot(x="Age Group", title="Test Counts by Age Group", kind="bar", rot=0)
plt.show()
exit()
mlcat("Geometric Mechanism", """\
TODO
In this example, all of the exact counts are integers. That is because
they are the result of so-called counting queries. The perturbed counts
produced by the Laplace mechanism are real-valued. In some applications,
e.g. when some downstream processing assumes it will receive
integer-valued data, we may need the perturbed counts to be integers.
One way to achieve this is by simply rounding the outputs of the Laplace
mechanism to the nearest integer. Because this differentially private
release mechanisms are not affected by this kind of post-processing,
doing so will not affect any privacy guarantees.
Alternatively, we could use the geometric mechanism to compute the
permuted counts. The geometric mechanism is simply a discrete version of
the Laplace mechanism and it produces integer valued perturbations.
""")
mlask()
mlcat("", """Basic Usage
""")
mlask()
mlcat("", """
# Create a differentially private release mechanism
from relm.mechanisms import GeometricMechanism
mechanism = GeometricMechanism(epsilon=0.1, sensitivity=1.0)
perturbed_counts = mechanism.release(values=values)
""")
mlask()
# Create a differentially private release mechanism
from relm.mechanisms import GeometricMechanism
mechanism = GeometricMechanism(epsilon=0.1, sensitivity=1.0)
perturbed_counts = mechanism.release(values=values)
mlcat("", """Visualising the Results
As with the Laplace mechanism, we can plot the exact histogram alongside
the differentially private histogram to get an idea if we have used too
small a value for epsilon.
""")
mlask()
mlcat("", """
# Create a dataframe with both exact and perturbed counts
column_values = [age_ranges, values, perturbed_counts]
table = {k: v for (k, v) in zip(column_names, column_values)}
df = pd.DataFrame(table)
# Display the two histograms as a table
display(df.style.set_caption("Test Counts by Age Group"))
# Plot the two histograms as bar graphs
df.plot(x="Age Group", title="Test Counts by Age Group", kind="bar", rot=0)
plt.show()
""")
mlask()
# Create a dataframe with both exact and perturbed counts
column_values = [age_ranges, values, perturbed_counts]
table = {k: v for (k, v) in zip(column_names, column_values)}
df = pd.DataFrame(table)
# Display the two histograms as a table
display(df.style.set_caption("Test Counts by Age Group"))
# Plot the two histograms as bar graphs
df.plot(x="Age Group", title="Test Counts by Age Group", kind="bar", rot=0)
plt.show()
mlcat("", """Exponential Mechanism
""")
mlask()
mlcat("", """Basic Usage
The ExponentialMechanism does not lend itself to vectorised queries as
easily as the LaplaceMechanism or GeometricMechanism. So, to produce a
histogram query that is comparable to those discussed above we wrap the
query releases in a loop and compute them one at a time.
""")
mlask()
mlcat("", """
# Create a differentially private release mechanism
from relm.mechanisms import ExponentialMechanism
output_range = np.arange(2**10)
utility_function = lambda x: -abs(output_range - x)
perturbed_counts = np.empty(len(values), dtype=np.int)
for i, value in enumerate(values.astype(np.float)):
mechanism = ExponentialMechanism(epsilon=0.1,
utility_function=utility_function,
sensitivity=1.0,
output_range=output_range)
perturbed_counts[i] = mechanism.release(values=value)
""")
mlask()
# Create a differentially private release mechanism
from relm.mechanisms import ExponentialMechanism
output_range = np.arange(2**10)
utility_function = lambda x: -abs(output_range - x)
perturbed_counts = np.empty(len(values), dtype=np.int)
for i, value in enumerate(values.astype(np.float)):
mechanism = ExponentialMechanism(epsilon=0.1,
utility_function=utility_function,
sensitivity=1.0,
output_range=output_range)
perturbed_counts[i] = mechanism.release(values=value)
mlcat("", """Visualising the Results
""")
mlask()
mlcat("", """
# Create a dataframe with both exact and perturbed counts
column_values = [age_ranges, values, perturbed_counts]
table = {k: v for (k, v) in zip(column_names, column_values)}
df = pd.DataFrame(table)
# Display the two histograms as a table
display(df.style.set_caption("Test Counts by Age Group"))
# Plot the two histograms as bar graphs
df.plot(x="Age Group", title="Test Counts by Age Group", kind="bar", rot=0)
plt.show()
""")
mlask()
# Create a dataframe with both exact and perturbed counts
column_values = [age_ranges, values, perturbed_counts]
table = {k: v for (k, v) in zip(column_names, column_values)}
df = pd.DataFrame(table)
# Display the two histograms as a table
display(df.style.set_caption("Test Counts by Age Group"))
# Plot the two histograms as bar graphs
df.plot(x="Age Group", title="Test Counts by Age Group", kind="bar", rot=0)
plt.show()
mlcat("", """Sparse Mechanisms
We currently have four mechanisms that take advantage of sparsity to
answer more queries about the data for a given privacy budget. All of
these mechanisms compare noisy query responses to a noisy threshold
value. If a noisy response does not exceed the noisy threshold, then the
mechanism reports only that the value did not exceed the threshold.
Otherwise, the mechanism reports that the value exceeded the threshold.
Furthermore, in the latter case some mechanisms release more information
about the underlying exact count. This extra information is computed
using some other differentially private mechanism and therefore imposes
some additional privacy costs.
""")
mlask()
mlcat("", """Data Wrangling
All three of our mechanims share an input format. We require a sequence
of exact query responses and a threshold value to which these responses
will be compared.
""")
mlask()
mlcat("", """
# Read the raw data
fp = '20200811_QLD_dummy_dataset_individual_v2.xlsx'
data = pd.read_excel(fp)
# Limit our attention to the onset date column
data.drop(list(data.columns[1:]), axis=1, inplace=True)
# Remove data with no onset date listed
mask = data['ONSET_DATE'].notna()
data = data[mask]
# Compute the exact query responses
queries = [(pd.Timestamp('2020-01-01') + i*pd.Timedelta('1d'),) for i in range(366)]
exact_counts = dict.fromkeys(queries, 0)
exact_counts.update(data.value_counts())
dates, values = zip(*sorted(exact_counts.items()))
values = np.array(values, dtype=np.float64)
""")
mlask()
# Read the raw data
fp = '20200811_QLD_dummy_dataset_individual_v2.xlsx'
data = pd.read_excel(fp)
# Limit our attention to the onset date column
data.drop(list(data.columns[1:]), axis=1, inplace=True)
# Remove data with no onset date listed
mask = data['ONSET_DATE'].notna()
data = data[mask]
# Compute the exact query responses
queries = [( | pd.Timestamp('2020-01-01') | pandas.Timestamp |
import pandas as pd
import numpy as np
import yfinance as yf #Yahoo Finance API
from datetime import datetime as dt, date
import time
df = pd.DataFrame()
tickers = ["^KS11", "^GSPC", "^N225", "^HSI", "^N100", "^FTSE", "^DJI"]
start_day = dt(2019, 12, 1)
today = str(date.today())
kospi = yf.download('^KS11', start=dt(2005, 1, 1), end=today)
def get_all_index_data(df, tickers, start_day, today):
for ticker in tickers:
try:
print('Stealing from Yahoo Finance ......................\n')
print('Working on a ticker: ', ticker, '......................\n')
ticker_df = yf.download(ticker, start=start_day, end=today)
time.sleep(1)
df_temp = ticker_df.reset_index()
df_temp = df_temp[['Date','Adj Close']]
df_temp = df_temp.rename(columns={'Adj Close': ticker})
df = df.join(df_temp, how='outer', rsuffix='Date')
except IndexError:
print('value error')
df = df.loc[:,~df.columns.str.contains('DateDate', case=False)]
df = df.dropna()
df.columns = df.columns.str.replace('^', '')
print('.....................Done ......................')
return df
data = get_all_index_data(df, tickers, start_day, today)
def normalize(df):
df1 = df.iloc[:, 1:].apply(lambda x: np.log(x) - np.log(x.shift(1)))
df1['Date'] = df['Date']
df1 = df1[list(df.columns)]
return df1
def plot(data):
plt.figure(figsize=(15, 10))
plt.plot(data.Date, data.KS11, label='KOSPI', color='blue')
plt.plot(data.Date, data.GSPC, label='S&P 500', color='orange')
plt.plot(data.Date, data.N225, label='Nikkei 225', color='magenta')
plt.plot(data.Date, data.HSI, label='Hang Seng Index', color='green')
plt.plot(data.Date, data.N100, label='Euro 100', color='yellow')
plt.plot(data.Date, data.FTSE, label='FTSE', color='grey')
plt.legend(loc='upper left')
#plt.savefig('SMA-KOSPI.png')
plt.show()
#Interactive Graph 시각화 with plotly
import plotly.express as px
import plotly.graph_objects as go
##로그 변화율 interactive 시각화(data_fill이용)
log_data_fill = log_diff(data_fill)
fig = go.Figure()
fig.add_trace(go.Scatter(
x=log_data_fill.index,
y=log_data_fill.FTSE,
mode='lines',
name='FTSE'))
fig.add_trace(go.Scatter(x=log_data_fill.index,
y=log_data_fill.GSPC,
mode='lines',
name='GSPC(S&P 500)'))
fig.add_trace(go.Scatter(x=log_data_fill.index,
y=log_data_fill.HSI,
mode='lines',
name='HSI(Hangseng'))
fig.add_trace(go.Scatter(x=log_data_fill.index,
y=log_data_fill.KS11,
mode='lines',
name='KS11(KOSPI)'))
fig.add_trace(go.Scatter(x=log_data_fill.index,
y=log_data_fill.N100,
mode='lines',
name='N100(EuroNext100)'))
fig.add_trace(go.Scatter(x=log_data_fill.index,
y=log_data_fill.N225,
mode='lines',
name='N225(Nikkei225)'))
#정규화 지표값 추이 interactive 시각화
standardize_data_fill = standardize(data_fill)
fig = go.Figure()
fig.add_trace(go.Scatter(
x=standardize_data_fill.index,
y=standardize_data_fill.FTSE,
mode='lines',
name='FTSE'))
fig.add_trace(go.Scatter(x=standardize_data_fill.index,
y=standardize_data_fill.GSPC,
mode='lines',
name='GSPC(S&P 500)'))
fig.add_trace(go.Scatter(x=standardize_data_fill.index,
y=standardize_data_fill.HSI,
mode='lines',
name='HSI(Hangseng'))
fig.add_trace(go.Scatter(x=standardize_data_fill.index,
y=standardize_data_fill.KS11,
mode='lines',
name='KS11(KOSPI)'))
fig.add_trace(go.Scatter(x=standardize_data_fill.index,
y=standardize_data_fill.N100,
mode='lines',
name='N100(EuroNext100)'))
fig.add_trace(go.Scatter(x=standardize_data_fill.index,
y=standardize_data_fill.N225,
mode='lines',
name='N225(Nikkei225)'))
world_aggregated = 'https://raw.githubusercontent.com/datasets/covid-19/master/data/worldwide-aggregated.csv'
countries_aggregated= 'https://raw.githubusercontent.com/datasets/covid-19/master/data/countries-aggregated.csv'
world = | pd.read_csv(world_aggregated) | pandas.read_csv |
import requests
import pandas as pd
import numpy as np
import configparser
from datetime import datetime
from dateutil import relativedelta, parser, rrule
from dateutil.rrule import WEEKLY
class WhoopClient:
'''A class to allow a user to login and store their authorization code,
then perform pulls using the code in order to access different types of data'''
def __init__(self,
auth_code=None,
whoop_id=None,
current_datetime=datetime.utcnow()):
self.auth_code = auth_code
self.whoop_id = whoop_id
self.current_datetime = current_datetime
self.start_datetime = None
self.all_data = None
self.all_activities = None
self.sport_dict = None
self.all_sleep = None
self.all_sleep_events = None
def reset(self):
self.auth_code = None
self.whoop_id = None
self.current_datetime = datetime.utcnow()
self.start_datetime = None
self.all_data = None
self.all_activities = None
self.sport_dict = None
self.all_sleep = None
self.all_sleep_events = None
def pull_api(self, url, df=False):
auth_code = self.auth_code
headers = {'authorization': auth_code}
pull = requests.get(url, headers=headers)
if pull.status_code == 200 and len(pull.content) > 1:
if df:
d = pd.json_normalize(pull.json())
return d
else:
return pull.json()
else:
return "no response"
def pull_sleep_main(self, sleep_id):
athlete_id = self.whoop_id
sleep = self.pull_api(
'https://api-7.whoop.com/users/{}/sleeps/{}'.format(
athlete_id, sleep_id))
main_df = pd.json_normalize(sleep)
return main_df
def pull_sleep_events(self, sleep_id):
athlete_id = self.whoop_id
sleep = self.pull_api(
'https://api-7.whoop.com/users/{}/sleeps/{}'.format(
athlete_id, sleep_id))
events_df = pd.json_normalize(sleep['events'])
events_df['id'] = sleep_id
return events_df
def get_authorization(self, user_ini):
'''
Function to get the authorization token and user id.
This must be completed before a user can query the api
'''
config = configparser.ConfigParser()
config.read(user_ini)
username = config['whoop']['username']
password = config['whoop']['password']
headers = {
"username": username,
"password": password,
"grant_type": "password",
"issueRefresh": False
}
auth = requests.post("https://api-7.whoop.com/oauth/token",
json=headers)
if auth.status_code == 200:
content = auth.json()
user_id = content['user']['id']
token = content['access_token']
start_time = content['user']['profile']['createdAt']
self.whoop_id = user_id
self.auth_code = 'bearer ' + token
self.start_datetime = start_time
print("Whoop: Authentication successful")
else:
print(
"Authentication failed - please double check your credentials")
def get_keydata_all(self):
'''
This function returns a dataframe of WHOOP metrics for each day of WHOOP membership.
In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information
'''
if self.start_datetime:
if self.all_data is not None:
## All data already pulled
return self.all_data
else:
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=self.current_datetime,
dtstart=start_date)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
all_data = pd.DataFrame()
for dates in date_range:
cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(
self.whoop_id, dates[1], dates[0])
data = self.pull_api(cycle_url, df=True)
all_data = pd.concat([all_data, data])
all_data.reset_index(drop=True, inplace=True)
## fixing the day column so it's not a list
all_data['days'] = all_data['days'].map(lambda d: d[0])
all_data.rename(columns={"days": 'day'}, inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols = [
'qualityDuration', 'needBreakdown.baseline',
'needBreakdown.debt', 'needBreakdown.naps',
'needBreakdown.strain', 'needBreakdown.total'
]
for sleep_col in sleep_cols:
all_data['sleep.' + sleep_col] = all_data[
'sleep.' + sleep_col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
## Making nap variable
all_data['nap_duration'] = all_data['sleep.naps'].apply(
lambda x: x[0]['qualityDuration'] / 60000
if len(x) == 1 else (sum([
y['qualityDuration'] for y in x
if y['qualityDuration'] is not None
]) / 60000 if len(x) > 1 else 0))
all_data.drop(['sleep.naps'], axis=1, inplace=True)
## dropping duplicates subsetting because of list columns
all_data.drop_duplicates(subset=['day', 'sleep.id'],
inplace=True)
self.all_data = all_data
return all_data
else:
print("Please run the authorization function first")
def get_activities_all(self):
'''
Activity data is pulled through the get_keydata functions so if the data pull is present, this function
just transforms the activity column into a dataframe of activities, where each activity is a row.
If it has not been pulled, this function runs the key data function then returns the activity dataframe'''
if self.sport_dict:
sport_dict = self.sport_dict
else:
sports = self.pull_api('https://api-7.whoop.com/sports')
sport_dict = {sport['id']: sport['name'] for sport in sports}
self.sport_dict = self.sport_dict
if self.start_datetime:
## process activity data
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull all data to process activities
data = self.get_keydata_all()
## now process activities data
act_data = pd.json_normalize(
data[data['strain.workouts'].apply(len) > 0]
['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper', 'during.lower'
]] = act_data[['during.upper',
'during.lower']].apply(pd.to_datetime)
act_data['total_minutes'] = act_data.apply(
lambda x:
(x['during.upper'] - x['during.lower']).total_seconds() / 60.0,
axis=1)
for z in range(0, 6):
act_data['zone{}_minutes'.format(
z + 1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)
act_data['sport_name'] = act_data.sportId.apply(
lambda x: sport_dict[x])
act_data['day'] = act_data['during.lower'].dt.strftime('%Y-%m-%d')
act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities = act_data
return act_data
else:
print("Whoop: Please run the authorization function first")
def get_sleep_all(self):
'''
This function returns all sleep metrics in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents one night of sleep
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull timeframe data
data = self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep is not None:
## All sleep data already pulled
return self.all_sleep
else:
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if | pd.isna(x) | pandas.isna |
#!/usr/bin/env python
import pandas as pd
pd.options.mode.chained_assignment = None
import json
import os
import yaml
try: modulepath = os.path.dirname(os.path.realpath(__file__)).replace('\\', '/') + '/'
except NameError: modulepath = 'stewi/'
output_dir = modulepath + 'output/'
data_dir = modulepath + 'data/'
reliability_table = pd.read_csv(data_dir + 'DQ_Reliability_Scores_Table3-3fromERGreport.csv',
usecols=['Source', 'Code', 'DQI Reliability Score'])
def config():
configfile = None
print(modulepath)
with open(modulepath + 'config.yaml', mode='r') as f:
configfile = yaml.load(f,Loader=yaml.FullLoader)
return configfile
inventory_metadata = {
'SourceType': 'Static File', #Other types are "Web service"
'SourceFileName':'NA',
'SourceURL':'NA',
'SourceVersion':'NA',
'SourceAquisitionTime':'NA',
'StEWI_versions_version': '0.9'
}
inventory_single_compartments = {"NEI":"air","RCRAInfo":"waste"}
def url_is_alive(url):
"""
Checks that a given URL is reachable.
:param url: A URL
:rtype: bool
"""
import urllib
request = urllib.request.Request(url)
request.get_method = lambda: 'HEAD'
try:
urllib.request.urlopen(request)
return True
except urllib.request.HTTPError:
return False
except urllib.error.URLError:
return False
def download_table(filepath, url, get_time=False, zip_dir=''):
import os.path, time
if not os.path.exists(filepath):
if url[-4:].lower() == '.zip':
import zipfile, requests, io
table_request = requests.get(url).content
zip_file = zipfile.ZipFile(io.BytesIO(table_request))
zip_file.extractall(zip_dir)
elif 'xls' in url.lower() or url.lower()[-5:] == 'excel':
import urllib, shutil
with urllib.request.urlopen(url) as response, open(filepath, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
elif 'json' in url.lower():
import pandas as pd
pd.read_json(url).to_csv(filepath, index=False)
if get_time:
try: retrieval_time = os.path.getctime(filepath)
except: retrieval_time = time.time()
return time.ctime(retrieval_time)
def set_dir(directory_name):
path = os.path.realpath(directory_name + '/').replace('\\', '/') + '/'
if os.path.exists(path): pathname = path
else:
pathname = path
os.makedirs(pathname)
return pathname
def import_table(path_or_reference, skip_lines=0, get_time=False):
import time
if '.core.frame.DataFrame' in str(type(path_or_reference)): import_file = path_or_reference
elif path_or_reference[-3:].lower() == 'csv':
import_file = pd.read_csv(path_or_reference)
elif 'xls' in path_or_reference[-4:].lower():
import_file = pd.ExcelFile(path_or_reference)
import_file = {sheet: import_file.parse(sheet, skiprows=skip_lines) for sheet in import_file.sheet_names}
if get_time:
try: retrieval_time = os.path.getctime(path_or_reference)
except: retrieval_time = time.time()
return import_file, retrieval_time
return import_file
def drop_excel_sheets(excel_dict, drop_sheets):
for s in drop_sheets:
try:
excel_dict.pop(s)
except KeyError:
continue
return excel_dict
def filter_inventory(inventory, criteria_table, filter_type, marker=None):
"""
:param inventory_df: DataFrame to be filtered
:param criteria_file: Can be a list of items to drop/keep, or a table of FlowName, FacilityID, etc. with columns
marking rows to drop
:param filter_type: drop, keep, mark_drop, mark_keep
:param marker: Non-empty fields are considered marked by default. Option to specify 'x', 'yes', '1', etc.
:return: DataFrame
"""
inventory = import_table(inventory); criteria_table = import_table(criteria_table)
if filter_type in ('drop', 'keep'):
for criteria_column in criteria_table:
for column in inventory:
if column == criteria_column:
criteria = set(criteria_table[criteria_column])
if filter_type == 'drop': inventory = inventory[~inventory[column].isin(criteria)]
elif filter_type == 'keep': inventory = inventory[inventory[column].isin(criteria)]
elif filter_type in ('mark_drop', 'mark_keep'):
standard_format = import_table(data_dir + 'flowbyfacility_format.csv')
must_match = standard_format['Name'][standard_format['Name'].isin(criteria_table.keys())]
for criteria_column in criteria_table:
if criteria_column in must_match: continue
for field in must_match:
if filter_type == 'mark_drop':
if marker is None: inventory = inventory[~inventory[field].isin(criteria_table[field][criteria_table[criteria_column] != ''])]
else: inventory = inventory[~inventory[field].isin(criteria_table[field][criteria_table[criteria_column] == marker])]
if filter_type == 'mark_keep':
if marker is None: inventory = inventory[inventory[field].isin(criteria_table[field][criteria_table[criteria_column] != ''])]
else: inventory = inventory[inventory[field].isin(criteria_table[field][criteria_table[criteria_column] == marker])]
return inventory.reset_index(drop=True)
def filter_states(inventory_df, include_states=True, include_dc=True, include_territories=False):
states_df = pd.read_csv(data_dir + 'state_codes.csv')
states_filter = | pd.DataFrame() | pandas.DataFrame |
"""Multiple Factor Analysis (MFA)"""
import itertools
import numpy as np
import pandas as pd
from sklearn import utils
from . import mca
from . import pca
class MFA(pca.PCA):
def __init__(self, groups=None, rescale_with_mean=True, rescale_with_std=True, n_components=2,
n_iter=10, copy=True, random_state=None, engine='auto'):
super().__init__(
rescale_with_mean=rescale_with_mean,
rescale_with_std=rescale_with_std,
n_components=n_components,
n_iter=n_iter,
copy=copy,
random_state=random_state,
engine=engine
)
self.groups = groups
def fit(self, X, y=None):
# Checks groups are provided
if self.groups is None:
raise ValueError('Groups have to be specified')
# Check input
utils.check_array(X, dtype=[str, np.number])
# Make sure X is a DataFrame for convenience
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
# Check group types are consistent
self.all_nums_ = {}
for name, cols in sorted(self.groups.items()):
all_num = all( | pd.api.types.is_numeric_dtype(X[c]) | pandas.api.types.is_numeric_dtype |
import inspect
import json
import os
import re
from urllib.parse import quote
from urllib.request import urlopen
import pandas as pd
import param
from .configuration import DEFAULTS
class TutorialData(param.Parameterized):
label = param.String(allow_None=True)
raw = param.Boolean()
verbose = param.Boolean()
return_meta = param.Boolean()
use_cache = param.Boolean()
_source = None
_base_url = None
_data_url = None
_description = None
def __init__(self, **kwds):
super().__init__(**kwds)
self._cache_dir = DEFAULTS["cache_kwds"]["directory"]
self._remove_href = re.compile(r"<(a|/a).*?>")
os.makedirs(self._cache_dir, exist_ok=True)
self._init_owid()
@property
def _cache_path(self):
cache_file = f"{self.label}.pkl"
return os.path.join(self._cache_dir, cache_file)
@property
def _dataset_options(self):
options = set([])
for method in dir(self):
if method.startswith("_load_") and "owid" not in method:
options.add(method.replace("_load_", ""))
return list(options) + list(self._owid_labels_df.columns)
@staticmethod
def _specify_cache(cache_path, **kwds):
if kwds:
cache_ext = "_".join(
f"{key}={val}".replace(os.sep, "") for key, val in kwds.items()
)
cache_path = f"{os.path.splitext(cache_path)[0]}_{cache_ext}.pkl"
return cache_path
def _cache_dataset(self, df, cache_path=None, **kwds):
if cache_path is None:
cache_path = self._cache_path
cache_path = self._specify_cache(cache_path, **kwds)
df.to_pickle(cache_path)
def _read_cache(self, cache_path=None, **kwds):
if not self.use_cache:
return None
if cache_path is None:
cache_path = self._cache_path
cache_path = self._specify_cache(cache_path, **kwds)
try:
return pd.read_pickle(cache_path)
except Exception:
if os.path.exists(cache_path):
os.remove(cache_path)
return None
@staticmethod
def _snake_urlify(s):
# Replace all hyphens with underscore
s = s.replace(" - ", "_").replace("-", "_")
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", "", s)
# Replace all runs of whitespace with a underscore
s = re.sub(r"\s+", "_", s)
return s.lower()
def _init_owid(self):
cache_path = os.path.join(self._cache_dir, "owid_labels.pkl")
self._owid_labels_df = self._read_cache(cache_path=cache_path)
if self._owid_labels_df is not None:
return
owid_api_url = (
"https://api.github.com/"
"repos/owid/owid-datasets/"
"git/trees/master?recursive=1"
)
with urlopen(owid_api_url) as f:
sources = json.loads(f.read().decode("utf-8"))
owid_labels = {}
owid_raw_url = "https://raw.githubusercontent.com/owid/owid-datasets/master/"
for source_tree in sources["tree"]:
path = source_tree["path"]
if ".csv" not in path and ".json" not in path:
continue
label = "owid_" + self._snake_urlify(path.split("/")[-2].strip())
if label not in owid_labels:
owid_labels[label] = {}
url = f"{owid_raw_url}/{quote(path)}"
if ".csv" in path:
owid_labels[label]["data"] = url
elif ".json" in path:
owid_labels[label]["meta"] = url
self._owid_labels_df = pd.DataFrame(owid_labels)
self._cache_dataset(self._owid_labels_df, cache_path=cache_path)
def _load_owid(self, **kwds):
self._data_url = self._owid_labels_df[self.label]["data"]
meta_url = self._owid_labels_df[self.label]["meta"]
with urlopen(meta_url) as response:
meta = json.loads(response.read().decode())
self.label = meta["title"]
self._source = (
" & ".join(source["dataPublishedBy"] for source in meta["sources"])
+ " curated by Our World in Data (OWID)"
)
self._base_url = (
" & ".join(source["link"] for source in meta["sources"])
+ " through https://github.com/owid/owid-datasets"
)
self._description = re.sub(self._remove_href, "", meta["description"])
df = self._read_cache(**kwds)
if df is None:
df = | pd.read_csv(self._data_url, **kwds) | pandas.read_csv |
import numpy as np
import pandas as pd
def main_post(s_name,orig_data):
D = 20
print("Max Moment Order", D)
d = np.genfromtxt("moments.txt", delimiter = "\t")[:,:-1]
frame = []
cell = []
moment = []
for i in range(len(d)):
f = d[i][0]
c = d[i][1]
m = d[i][2:]
ff = [f] * len(m)
cc = [c] * len(m)
frame.append(ff)
cell.append(cc)
moment.append(m)
frame_flat = [item for sublist in frame for item in sublist]
cell_flat = [item for sublist in cell for item in sublist]
moment_flat = np.array([item for sublist in moment for item in sublist]) * 255
data_l = list(zip(frame_flat,cell_flat,moment_flat))
df = pd.DataFrame(data = data_l)
df.columns = ["frame_id","cell_id","moment_value"]
a = []
for i in range(D+1):
a.append(i)
even = []
odd = []
for i in a:
if i % 2 == 0:
even.append(i)
if i % 2 != 0:
odd.append(i)
even_zm = []
even_zn = []
for i in even:
for j in even:
if j <= i:
even_zm.append(i)
even_zn.append(j)
odd_zm = []
odd_zn = []
for i in odd:
for j in odd:
if j <= i:
odd_zm.append(i)
odd_zn.append(j)
evenl = list(zip(even_zm,even_zn))
oddl = list(zip(odd_zm,odd_zn))
totall = evenl + oddl
df_index = pd.DataFrame(data = totall)
df_index.columns = ["moment","az_angle"]
df_index_s = df_index.sort_values(["moment","az_angle"], ascending = [True,True])
df_index_s = df_index_s[2:]
df_2 = pd.concat([df_index_s] * len(d))
df_2 = df_2.reset_index(drop = True)
final = | pd.concat([df, df_2],axis=1) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_offset_n_gt1(self, box_transpose_fail):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
box, transpose = box_transpose_fail
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
# FIXME: with transposing these tests fail
pi = tm.box_expected(pi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = pd.period_range("2014-05-01", periods=3, freq="2D")
expected = pd.PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# tick-like frequency with n != 1
other = three_days
rng = pd.period_range("2014-05-01", periods=6, freq=freqstr)
expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-05-04", "2014-05-18", freq="D")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_timedeltalike_daily(self, three_days):
# Tick-like 3 Days
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-04-28", "2014-05-12", freq="D")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):
other = not_daily
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=D\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="H")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):
other = not_hourly
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=H\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="H")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_add_iadd_timedeltalike_annual(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range("2019", "2029", freq="A")
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014", "2024", freq="A")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_M(self):
rng = pd.period_range("2014-01", "2016-12", freq="M")
expected = pd.period_range("2014-06", "2017-05", freq="M")
result = rng + pd.offsets.MonthEnd(5)
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014-01", "2016-12", freq="M")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_parr_add_sub_td64_nat(self, box_transpose_fail):
# GH#23320 special handling for timedelta64("NaT")
box, transpose = box_transpose_fail
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
other = np.timedelta64("NaT")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box, transpose=transpose)
expected = | tm.box_expected(expected, box, transpose=transpose) | pandas.util.testing.box_expected |
"""
This network uses the last 26 observations of gwl, tide, and rain to predict the next 18
values of gwl for well MMPS-175
"""
import pandas as pd
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import keras
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import Activation
from math import sqrt
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import random as rn
import os
matplotlib.rcParams.update({'font.size': 8})
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# def create_weights(train_labels):
# obs_mean = np.mean(train_labels, axis=-1)
# obs_mean = np.reshape(obs_mean, (n_batch, 1))
# obs_mean = np.repeat(obs_mean, n_ahead, axis=1)
# weights = (train_labels + obs_mean) / (2 * obs_mean)
# return weights
#
#
# def sq_err(y_true, y_pred):
# return K.square(y_pred - y_true)
#
#
def mse(y_true, y_pred):
return K.mean(K.square(y_pred - y_true), axis=-1)
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def pw_rmse(y_true, y_pred):
# num_rows, num_cols = K.int_shape(y_true)[0], K.int_shape(y_true)[1]
# print(num_rows, num_cols)
act_mean = K.mean(y_true, axis=-1)
# print("act_mean 1 is:", act_mean)
act_mean = K.reshape(act_mean, (n_batch, 1))
# print("act_mean is: ", act_mean)
mean_repeat = K.repeat_elements(act_mean, n_ahead, axis=1)
# print("mean_repeat is:", mean_repeat)
weights = (y_true+mean_repeat)/(2*mean_repeat)
return K.sqrt(K.mean((K.square(y_pred - y_true)*weights), axis=-1))
# configure network
n_lags = 116
n_ahead = 18
n_features = 3
n_train = 52551
n_test = 8359
n_epochs = 500
n_neurons = 10
n_batch = 52551
# load dataset
dataset_raw = read_csv("C:/Users/<NAME>/Documents/HRSD GIS/Site Data/MMPS_175_no_blanks.csv",
index_col=None, parse_dates=True, infer_datetime_format=True)
# dataset_raw = dataset_raw[0:len(dataset_raw)-1]
# split datetime column into train and test for plots
train_dates = dataset_raw[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[:n_train]
test_dates = dataset_raw[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[n_train:]
test_dates = test_dates.reset_index(drop=True)
test_dates['Datetime'] = pd.to_datetime(test_dates['Datetime'])
# drop columns we don't want to predict
dataset = dataset_raw.drop(dataset_raw.columns[[0]], axis=1)
values = dataset.values
values = values.astype('float32')
gwl = values[:, 0]
gwl = gwl.reshape(gwl.shape[0], 1)
tide = values[:, 1]
tide = tide.reshape(tide.shape[0], 1)
rain = values[:, 2]
rain = rain.reshape(rain.shape[0], 1)
# normalize features with individual scalers
gwl_scaler, tide_scaler, rain_scaler = MinMaxScaler(), MinMaxScaler(), MinMaxScaler()
gwl_scaled = gwl_scaler.fit_transform(gwl)
tide_scaled = tide_scaler.fit_transform(tide)
rain_scaled = rain_scaler.fit_transform(rain)
scaled = np.concatenate((gwl_scaled, tide_scaled, rain_scaled), axis=1)
# frame as supervised learning
reframed = series_to_supervised(scaled, n_lags, n_ahead)
values = reframed.values
# split into train and test sets
train, test = values[:n_train, :], values[n_train:, :]
# split into input and outputs
input_cols, label_cols = [], []
for i in range(values.shape[1]):
if i <= n_lags*n_features-1:
input_cols.append(i)
elif i % 3 != 0:
input_cols.append(i)
elif i % 3 == 0:
label_cols.append(i)
train_X, train_y = train[:, input_cols], train[:, label_cols] # [start:stop:increment, (cols to include)]
test_X, test_y = test[:, input_cols], test[:, label_cols]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
#create weights for peak weighted rmse loss function
# weights = create_weights(train_y)
# load model here if needed
# model = keras.models.load_model("C:/Users/<NAME>/PycharmProjects/Tensorflow/keras_models/mmps175.h5",
# custom_objects={'pw_rmse':pw_rmse})
# set random seeds for model reproducibility as suggested in:
# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# define model
model = Sequential()
model.add(LSTM(units=n_neurons, input_shape=(None, train_X.shape[2])))
# model.add(LSTM(units=n_neurons, return_sequences=True, input_shape=(None, train_X.shape[2])))
# model.add(LSTM(units=n_neurons, return_sequences=True))
# model.add(LSTM(units=n_neurons))
model.add(Dropout(.1))
model.add(Dense(input_dim=n_neurons, activation='linear', units=n_ahead))
# model.add(Activation('linear'))
model.compile(loss=pw_rmse, optimizer='adam')
tbCallBack = keras.callbacks.TensorBoard(log_dir='C:/tmp/tensorflow/keras/logs', histogram_freq=0, write_graph=True,
write_images=False)
earlystop = keras.callbacks.EarlyStopping(monitor='loss', min_delta=0.0001, patience=5, verbose=1, mode='auto')
history = model.fit(train_X, train_y, batch_size=n_batch, epochs=n_epochs, verbose=2, shuffle=False,
callbacks=[earlystop, tbCallBack])
# save model
# model.save("C:/Users/<NAME>/PycharmProjects/Tensorflow/keras_models/mmps175.h5")
# plot model history
# plt.plot(history.history['loss'], label='train')
# # plt.plot(history.history['val_loss'], label='validate')
# # plt.legend()
# # ticks = np.arange(0, n_epochs, 1) # (start,stop,increment)
# # plt.xticks(ticks)
# plt.xlabel("Epochs")
# plt.ylabel("Loss")
# plt.tight_layout()
# plt.show()
# make predictions
trainPredict = model.predict(train_X)
yhat = model.predict(test_X)
inv_trainPredict = gwl_scaler.inverse_transform(trainPredict)
inv_yhat = gwl_scaler.inverse_transform(yhat)
inv_y = gwl_scaler.inverse_transform(test_y)
inv_train_y = gwl_scaler.inverse_transform(train_y)
# save test predictions and observed
inv_yhat_df = DataFrame(inv_yhat)
inv_yhat_df.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps175_results/predicted.csv")
inv_y_df = DataFrame(inv_y)
inv_y_df.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps175_results/observed.csv")
# calculate RMSE for whole test series (each forecast step)
RMSE_forecast = []
for i in np.arange(0, n_ahead, 1):
rmse = sqrt(mean_squared_error(inv_y[:, i], inv_yhat[:, i]))
RMSE_forecast.append(rmse)
RMSE_forecast = DataFrame(RMSE_forecast)
rmse_avg = sqrt(mean_squared_error(inv_y, inv_yhat))
print('Average Test RMSE: %.3f' % rmse_avg)
RMSE_forecast.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps175_results/RMSE.csv")
# calculate RMSE for each individual time step
RMSE_timestep = []
for i in np.arange(0, inv_yhat.shape[0], 1):
rmse = sqrt(mean_squared_error(inv_y[i, :], inv_yhat[i, :]))
RMSE_timestep.append(rmse)
RMSE_timestep = DataFrame(RMSE_timestep)
# plot rmse vs forecast steps
plt.plot(RMSE_forecast, 'ko')
ticks = np.arange(0, n_ahead, 1) # (start,stop,increment)
plt.xticks(ticks)
plt.ylabel("RMSE (ft)")
plt.xlabel("Forecast Step")
plt.tight_layout()
plt.show()
# plot training predictions
plt.plot(inv_train_y[:, 0], label='actual')
plt.plot(inv_trainPredict[:, 0], label='predicted')
plt.xlabel("Timestep")
plt.ylabel("GWL (ft)")
plt.title("Training Predictions")
# ticks = np.arange(0, n_ahead, 1)
# plt.xticks(ticks)
plt.legend()
plt.tight_layout()
plt.show()
# plot test predictions for Hermine, Julia, and Matthew
dates = DataFrame(test_dates[["Datetime"]][n_lags:-n_ahead+1])
dates = dates.reset_index(inplace=False)
dates = dates.drop(columns=['index'])
dates = dates[5700:8000]
dates = dates.reset_index(inplace=False)
dates = dates.drop(columns=['index'])
dates_9 = DataFrame(test_dates[["Datetime"]][n_lags+8:-n_ahead+9])
dates_9 = dates_9.reset_index(inplace=False)
dates_9 = dates_9.drop(columns=['index'])
dates_9 = dates_9[5700:8000]
dates_9 = dates_9.reset_index(inplace=False)
dates_9 = dates_9.drop(columns=['index'])
dates_18 = DataFrame(test_dates[["Datetime"]][n_lags+17:])
dates_18 = dates_18.reset_index(inplace=False)
dates_18 = dates_18.drop(columns=['index'])
dates_18 = dates_18[5700:8000]
dates_18 = dates_18.reset_index(inplace=False)
dates_18 = dates_18.drop(columns=['index'])
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(6.5, 3))
x_ticks = np.arange(0, 2300, 168)
ax1.plot(inv_y[5700:8000, 0], 'k-', label='Obs.')
ax1.plot(inv_yhat[5700:8000, 0], 'k:', label='Pred.')
ax1.set_xticks(x_ticks)
ax1.set_xticklabels(dates['Datetime'][x_ticks].dt.strftime('%Y-%m-%d'), rotation='vertical')
ax2.plot(inv_y[5700:8000, 8], 'k-', label='Obs.')
ax2.plot(inv_yhat[5700:8000, 8], 'k:', label='Pred.')
ax2.set_xticks(x_ticks)
ax2.set_xticklabels(dates_9['Datetime'][x_ticks].dt.strftime('%Y-%m-%d'), rotation='vertical')
ax3.plot(inv_y[5700:8000, 17], 'k-', label='Obs.')
ax3.plot(inv_yhat[5700:8000, 17], 'k:', label='Pred.')
ax3.set_xticks(x_ticks)
ax3.set_xticklabels(dates_18['Datetime'][x_ticks].dt.strftime('%Y-%m-%d'), rotation='vertical')
ax1.set(ylabel="GWL (ft)", title='t+1')
ax2.set(title='t+9')
ax3.set(title='t+18')
plt.legend()
plt.tight_layout()
plt.show()
# fig.savefig('C:/Users/<NAME>/Documents/HRSD GIS/Presentation Images/Paper Figures/MMPS175_preds.tif', dpi=300)
# create dfs of timestamps, obs, and pred data to find peak values and times
obs_t1 = np.reshape(inv_y[5700:8000, 0], (2300, 1))
pred_t1 = np.reshape(inv_yhat[5700:8000, 0], (2300,1))
df_t1 = np.concatenate([obs_t1, pred_t1], axis=1)
df_t1 = | DataFrame(df_t1, index=None, columns=["obs", "pred"]) | pandas.DataFrame |
import pickle
from ds import *
import pandas as pd
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
import numpy as np
from sklearn.impute import SimpleImputer
data_as_list = []
pickle_files = ['data/dataset_0_10000.pkl', 'data/dataset_10000_20000.pkl', 'data/dataset_20000_30000.pkl', 'data/dataset_30000_40000.pkl']
for pickle_file in pickle_files:
pickle_off = open(pickle_file, "rb")
emp = pickle.load(pickle_off)
title_vec_len = emp[0].features.title.vector.shape[0]
story_vec_len = emp[0].features.story.vector.shape[0]
for dataobject in emp:
category = dataobject.features.category
goal = dataobject.features.goal
created = dataobject.features.created
title_vec = dataobject.features.title.vector
story_vec = dataobject.features.story.vector
amt_raised = dataobject.result
feature_vec = [category, goal, created]
feature_vec.extend(title_vec)
feature_vec.extend(story_vec)
feature_vec.append(amt_raised)
data_as_list.append(feature_vec)
headings = ["category", "goal", "created"]
headings.extend(["title_{}".format(i) for i in range(0, title_vec_len)])
headings.extend(["story_{}".format(i) for i in range(0, story_vec_len)])
headings.append("amt_raised")
df = pd.DataFrame(data_as_list, columns = headings)
df['category'] = pd.Categorical(df['category'])
dfDummies = pd.get_dummies(df['category'], prefix='category')
df = | pd.concat([df, dfDummies], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import datetime as dt
import os
import zipfile
from datetime import datetime, timedelta
from urllib.parse import urlparse
study_prefix = "U01"
def get_user_id_from_filename(f):
#Get user id from from file name
return(f.split(".")[3])
def get_file_names_from_zip(z, file_type=None, prefix=study_prefix):
#Extact file list
file_list = list(z.filelist)
if(filter is None):
filtered = [f.filename for f in file_list if (prefix in f.filename) and (".csv" in f.filename)]
else:
filtered = [f.filename for f in file_list if (file_type in f.filename and prefix in f.filename)]
return(filtered)
def get_data_catalog(catalog_file, data_file, data_dir, dict_dir):
dc=pd.read_csv(catalog_file)
dc=dc.set_index("Data Product Name")
dc.data_file=data_dir+data_file #add data zip file field
dc.data_dir=data_dir #add data zip file field
dc.dict_dir=dict_dir #add data distionary directory field
return(dc)
def get_data_dictionary(data_catalog, data_product_name):
dictionary_file = data_catalog.dict_dir + data_catalog.loc[data_product_name]["Data Dictionary File Name"]
dd=pd.read_csv(dictionary_file)
dd=dd.set_index("ElementName")
dd.data_file_name = data_catalog.loc[data_product_name]["Data File Name"] #add data file name pattern field
dd.name = data_product_name #add data product name field
dd.index_fields = data_catalog.loc[data_product_name]["Index Fields"] #add index fields
dd.description = data_catalog.loc[data_product_name]["Data Product Description"]
return(dd)
def get_df_from_zip(file_type,zip_file, participants):
#Get participant list from participants data frame
participant_list = list(participants["Participant ID"])
#Open data zip file
z = zipfile.ZipFile(zip_file)
#Get list of files of specified type
file_list = get_file_names_from_zip(z, file_type=file_type)
#Open file inside zip
dfs=[]
for file_name in file_list:
sid = get_user_id_from_filename(file_name)
if(sid in participant_list):
f = z.open(file_name)
file_size = z.getinfo(file_name).file_size
if file_size > 0:
df = pd.read_csv(f, low_memory=False)
df["Subject ID"] = sid
dfs.append(df)
else:
print('warning %s is empty (size = 0)' % file_name)
df = | pd.concat(dfs) | pandas.concat |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: | pd.Timestamp("2013-04-28 00:00:00") | pandas.Timestamp |
# Define functions used in the landscape-area-measurements notebook
import numpy as np
import json
import requests
import pandas as pd
import geopandas as gpd
import numpy.ma as ma
import xarray as xr
import rioxarray as rxr
import rasterio as rio
from rasterio.crs import CRS
from shapely.geometry import Polygon, shape, mapping
def clean_array_plot(xr_obj):
"""Takes a single xarray object as an input and produces a cleaned numpy
array output for plotting.
Parameters
----------
xr_obj : xarray DataArray
xarray object containing null values
Returns
----------
masked_array : numpy array
masked numpy array
"""
masked_array = ma.masked_array(xr_obj.values, xr_obj.isnull())
return masked_array
def get_cii_parcel_polygons(naip_tile):
"""Retrieves non-residential/CII parcel polygons for extent of
input NAIP tile using City of Los Angeles : LA County
Parcels API.
Parameters
----------
naip_tile : xarray DataArray
NAIP tile to use as bounds of API query
Returns
-------
cii_parcel_gdf : GeoDataFrame
GeoDataFrame of all CII parcel polygons in tile area
"""
cii_uses = ['Recreational',
'Commercial',
'Insitutitonal',
'Government',
'Industrial']
crs_wgs84 = CRS.from_string('EPSG:4326')
naip_tile_reproj = naip_tile.rio.reproject(crs_wgs84)
(xmin, ymin, xmax, ymax) = naip_tile_reproj.rio.bounds()
center_geom_str = "CENTER_LAT%20%3E%3D%20"+str(ymin)+"%20AND%20CENTER_LAT%20%3C%3D%20"+str(
ymax)+"%20AND%20CENTER_LON%20%3E%3D%20"+str(xmin)+"%20AND%20CENTER_LON%20%3C%3D%20"+str(xmax)
parcel_gdf_list = []
for use in cii_uses:
use_parcel_url = "https://public.gis.lacounty.gov/public/rest/services/LACounty_Cache/LACounty_Parcel/MapServer/0/query?where=" + \
center_geom_str+"AND%20UseType%3D'"+use + \
"'&outFields=APN,SitusCity,SitusZIP,UseType,UseDescription,LAT_LON,OBJECTID&outSR=4326&f=json"
try:
return_dict = json.loads(requests.get(use_parcel_url).text)
except ConnectionError:
print('Connection could not be made to database.')
parcel_df = pd.DataFrame(columns=[x["name"]
for x in return_dict["fields"]])
parcel_df.insert(loc=7, column='geometry', value=np.nan)
parcel_df['geometry'] = parcel_df['geometry'].astype('geometry')
for i in np.arange(0, len(return_dict['features']), 1):
att_dict = return_dict['features'][i]['attributes']
parcel_df = parcel_df.append(att_dict, ignore_index=True)
geom_dict = return_dict['features'][i]['geometry']
geom_df = pd.DataFrame(
data=[str(geom_dict['rings'])], columns=['geometry'])
poly_data = Polygon(eval(geom_df.geometry.loc[0])[0])
poly_gdf = gpd.GeoSeries(poly_data)
parcel_df.loc[i]['geometry'] = poly_gdf[0]
parcel_gdf = gpd.GeoDataFrame(parcel_df,
geometry=parcel_df['geometry'],
crs=crs_wgs84)
parcel_gdf_list.append(parcel_gdf)
cii_parcel_gdf = | pd.concat(parcel_gdf_list) | pandas.concat |
# -*- coding: utf-8 -*-
__author__ = "<NAME> (Srce Cde)"
__license__ = "GPL 3.0"
__email__ = "<EMAIL>"
__maintainer__ = "<NAME> (Srce Cde)"
from collections import defaultdict
import json
import pandas as pd
from ..helper import openURL
from ..config import YOUTUBE_COMMENT_URL, SAVE_PATH
class VideoComment:
def __init__(self, maxResults, videoId, key):
self.comments = defaultdict(list)
self.replies = defaultdict(list)
self.params = {
"part": "snippet,replies",
"maxResults": maxResults,
"videoId": videoId,
"textFormat": "plainText",
"key": key,
}
def load_comments(self, mat):
for item in mat["items"]:
comment = item["snippet"]["topLevelComment"]
self.comments["id"].append(comment["id"])
self.comments["comment"].append(comment["snippet"]["textDisplay"])
self.comments["author"].append(comment["snippet"]["authorDisplayName"])
self.comments["likecount"].append(comment["snippet"]["likeCount"])
self.comments["publishedAt"].append(comment["snippet"]["publishedAt"])
if "replies" in item.keys():
for reply in item["replies"]["comments"]:
self.replies["parentId"].append(reply["snippet"]["parentId"])
self.replies["authorDisplayName"].append(
reply["snippet"]["authorDisplayName"]
)
self.replies["replyComment"].append(reply["snippet"]["textDisplay"])
self.replies["publishedAt"].append(reply["snippet"]["publishedAt"])
self.replies["likeCount"].append(reply["snippet"]["likeCount"])
def get_video_comments(self):
url_response = json.loads(openURL(YOUTUBE_COMMENT_URL, self.params))
nextPageToken = url_response.get("nextPageToken")
self.load_comments(url_response)
while nextPageToken:
self.params.update({"pageToken": nextPageToken})
url_response = json.loads(openURL(YOUTUBE_COMMENT_URL, self.params))
nextPageToken = url_response.get("nextPageToken")
self.load_comments(url_response)
# self.create_df()
def create_df(self):
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import os
base_dir = "../input/"
train_dir = os.path.join(base_dir,"train/train")
testing_dir = os.path.join(base_dir, "test")
train = pd.read_csv("../input/train.csv")
train_dataframe = pd.read_csv("../input/train.csv")
train_dataframe["has_cactus"] = np.where(train_dataframe["has_cactus"] == 1, "yes", "no")
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.preprocessing.image import ImageDataGenerator
classifier = Sequential()
# first convolution layer
classifier.add(Conv2D(filters=16, kernel_size=(3, 3),
input_shape = (32, 32, 3), activation="relu"))
# Max pooling layer
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# second convolution layer
classifier.add(Conv2D(32, kernel_size=(3, 3),
activation="relu"))
# Max pooling layer
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Flatteing layer
classifier.add(Flatten())
# Fully connected Layer
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
classifier.compile(optimizer="adam", loss="binary_crossentropy",
metrics=["accuracy"])
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
validation_split=0.25,
zoom_range = 0.2,
horizontal_flip = True)
training_set = train_datagen.flow_from_dataframe(dataframe = train_dataframe,
directory = train_dir,
x_col="id",
y_col="has_cactus",
target_size=(32,32),
subset="training",
batch_size=25,
shuffle=True,
class_mode="binary")
val_set = train_datagen.flow_from_dataframe(dataframe = train_dataframe,
directory = train_dir,
x_col="id",
y_col="has_cactus",
target_size=(32,32),
subset="validation",
batch_size=25,
shuffle=True,
class_mode="binary")
classifier.fit_generator(training_set,
epochs = 100,
steps_per_epoch = 525,
validation_data = val_set,
validation_steps = 175)
test_datagen = ImageDataGenerator(
rescale=1./255
)
test_generator = test_datagen.flow_from_directory(
testing_dir,
target_size=(32,32),
batch_size=1,
shuffle=False,
class_mode=None
)
preds = classifier.predict_generator(
test_generator,
steps=len(test_generator.filenames)
)
image_ids = [name.split('/')[-1] for name in test_generator.filenames]
preds = preds.flatten()
data = {'id': image_ids, 'has_cactus':preds}
submission = | pd.DataFrame(data) | pandas.DataFrame |
from collections import defaultdict
import copy
import json
import numpy as np
import pandas as pd
import pickle
import scipy
import seaborn as sb
import torch
from allennlp.common.util import prepare_environment, Params
from matplotlib import pyplot as plt
from pytorch_pretrained_bert import BertTokenizer, BertModel
from scipy.stats import entropy
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import accuracy_score, mean_squared_error
from probing.globals import *
from probing.helpers import _reg_r2
from probing.tasks import ProbingTask
class Analytics:
def __init__(self, workspace):
self.directories = {d: os.path.join(workspace, d) for d in ["out", "tasks", "datasets", "configs"]}
self.scalar_mixes = None
self.tokenizer = None
self.embedder = None
# === Data statistics
def task_statistics(self):
data = []
for task_id in sorted(os.listdir(self.directories["tasks"])):
config = ProbingTask.parse_id(task_id)
stats = json.load(open(os.path.join(self.directories["tasks"], task_id, "_stats.json")))
for split in stats:
c = copy.deepcopy(config)
c["sentences"] = stats[split]["total_sentences"]
c["instances"] = stats[split]["total_instances"]
c["labels"] = stats[split]["total_labels"]
c["split"] = split
data += [c]
return pd.DataFrame(data)
def dataset_statistics(self):
def _collect_stats(sentences):
num_tokens = 0
num_sentences = 0
num_predications = 0
roles_all = 0
roles_core = 0
for s, pp in sentences:
num_tokens += len(s.tokens())
num_sentences += 1
num_predications += len(pp)
for p in pp:
roles_all += len([a for a in p.arguments])
roles_core += len([a for a in p.arguments if not p.arguments[a]["pb"].startswith("AM")])
return {"tokens": num_tokens, "sentences": num_sentences, "predicates": num_predications,
"roles_all": roles_all, "roles_core": roles_core}
rows = []
for ds in os.listdir(self.directories["datasets"]):
ds = pickle.load(open(os.path.join(self.directories["datasets"], ds), "rb"))
for split in ds:
stats = _collect_stats(ds[split].values())
stats["split"] = split
stats["dataset"] = ds.name
rows += [stats]
df = pd.DataFrame(rows)
return df
# === Scalar mix analysis
def get_mixes(self):
if self.scalar_mixes is None:
self.scalar_mixes = self._parse_scalar_mixes()
return self.scalar_mixes
@staticmethod
# Extract a single scalar mix set by layer
def _parse_scalar_mix(th, kind, softmax=True):
mix_map = {"common": "bert_embedder._scalar_mix.scalar_parameters",
"src": "bert_embedder._scalar_mix_1.scalar_parameters",
"tgt": "bert_embedder._scalar_mix_2.scalar_parameters"}
device = torch.device('cpu')
data = torch.load(os.path.join(th), map_location=device)
layers = []
for layer in range(12): # FIXME num layers to global
layers += [data[f"{mix_map[kind]}.{layer}"].item()]
return kind, scipy.special.softmax(np.array(layers)) if softmax else np.array(layers)
@staticmethod
def center_of_gravity(x):
return sum(l * x[l] for l in range(len(x)))
def _parse_scalar_mixes(self):
data = []
for exp_id in os.listdir(self.directories["out"]):
config = ProbingTask.parse_id(exp_id)
task_name = config["name"]
try:
if config["ttype"] == "unary":
mix = [self._parse_scalar_mix(os.path.join(self.directories["out"], exp_id, "best.th"), "common")]
else:
mix = [self._parse_scalar_mix(os.path.join(self.directories["out"], exp_id, "best.th"), m) for m in ["src", "tgt"]]
for kind, m in mix:
task_mix_name = task_name
# prepend regression tasks with *
if config["mtype"] == "reg":
task_mix_name = "*"+task_mix_name
# add src-tgt mix distinction
if kind != "common":
task_mix_name += " " + kind
for layer in range(12):
c = copy.deepcopy(config)
c["name"] = task_mix_name
c["layer"] = layer
c["weight"] = m[layer]
data += [c]
except Exception:
print(f"No best weights for {exp_id} (yet?). Skipping.")
return pd.DataFrame(data)
def plot_scalar_mix_by_task(self, lang, task_order=None, cbar_max=None, show_cbar=True, ax=None):
mix_df = self.get_mixes()
pvt = mix_df[mix_df["language"] == lang].copy().pivot("name", "layer", "weight")
cog = {name: self.center_of_gravity(pvt.loc[name]) for name in pvt.index}
if task_order is None: # if no task order for display, order by center of gravity
pvt = pvt.reindex([x[0] for x in sorted(cog.items(), key=lambda y: y[1])])
else:
pvt = pvt.reindex(task_order)
# set maximum value for heatmap
if cbar_max is None:
cbar_max = pvt.values.max()
ax = sb.heatmap(pvt, cmap="Oranges", vmin=0.0, vmax=cbar_max,
cbar=show_cbar, square=False,
xticklabels=[],
yticklabels=[ix + f" [{round(cog[ix], 2)}]" for ix in pvt.index], ax=ax)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
ax.set_ylabel("")
ax.set_xlabel(r'Layer $\rightarrow$')
ax.set_title(lang)
def plot_anchor_task_map(self, lang, target_tasks, anchor_tasks=None, ax=None, show_cbar=False):
mix_df = self.get_mixes()
pvt = mix_df[mix_df["language"] == lang].copy().pivot("name", "layer", "weight")
if anchor_tasks is None:
anchor_tasks = [a for a in mix_df["name"].unique() if a not in target_tasks]
kl_div = pd.DataFrame()
for a in target_tasks:
for b in anchor_tasks:
kl_div.at[a, b] = entropy(pvt.loc[a], pvt.loc[b])
ax = sb.heatmap(kl_div.T, cmap="Blues_r", cbar=show_cbar, square=True, ax=ax)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.yticks(rotation=0)
ax.set_title(lang)
plt.tight_layout()
# === Performance and error analysis
def performance_summary(self):
data = []
for exp_id in os.listdir(self.directories["out"]):
try:
row = ProbingTask.parse_id(exp_id)
metrics = json.load(open(os.path.join(self.directories["out"], exp_id, "metrics.json")))
row["best_epoch"] = metrics["best_epoch"]
row["dev_score"] = None
if "best_validation_acc" in metrics: # if classification task, take accuracy from AllenNLP metrics
row["dev_score"] = metrics["best_validation_acc"]
else:
# regression tasks need predictions to get the score, they should be generated automatically
# FIXME implement R2 as AllenNLP metric
prettyout = os.path.join(self.directories["out"], exp_id, f"predictions.pretty.dev.json")
if os.path.exists(prettyout):
row["dev_score"] = _reg_r2(prettyout)
else:
row["dev_score"] = "NEED_PREDICTIONS"
data += [row]
except Exception:
print(f"No metrics for {exp_id} (yet?). Skipping.")
df = | pd.DataFrame(data) | pandas.DataFrame |
"""Automated data download and IO."""
# Builtins
import glob
import os
import gzip
import bz2
import hashlib
import shutil
import zipfile
import sys
import math
import logging
from functools import partial, wraps
import time
import fnmatch
import urllib.request
import urllib.error
from urllib.parse import urlparse
import socket
import multiprocessing
from netrc import netrc
import ftplib
import ssl
import tarfile
# External libs
import pandas as pd
import numpy as np
import shapely.geometry as shpg
import requests
# Optional libs
try:
import geopandas as gpd
except ImportError:
pass
try:
import salem
from salem import wgs84
except ImportError:
pass
try:
import rasterio
try:
# rasterio V > 1.0
from rasterio.merge import merge as merge_tool
except ImportError:
from rasterio.tools.merge import merge as merge_tool
except ImportError:
pass
try:
ModuleNotFoundError
except NameError:
ModuleNotFoundError = ImportError
# Locals
import oggm.cfg as cfg
from oggm.exceptions import (InvalidParamsError, NoInternetException,
DownloadVerificationFailedException,
DownloadCredentialsMissingException,
HttpDownloadError, HttpContentTooShortError,
InvalidDEMError, FTPSDownloadError)
# Module logger
logger = logging.getLogger('.'.join(__name__.split('.')[:-1]))
# Github repository and commit hash/branch name/tag name on that repository
# The given commit will be downloaded from github and used as source for
# all sample data
SAMPLE_DATA_GH_REPO = 'OGGM/oggm-sample-data'
SAMPLE_DATA_COMMIT = '98f6e299ab60b04cba9eb3be382231e19baf8c9e'
GDIR_L1L2_URL = ('https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.4/'
'L1-L2_files/centerlines/')
GDIR_L3L5_URL = ('https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.4/'
'L3-L5_files/CRU/centerlines/qc3/pcp2.5/no_match/')
DEMS_GDIR_URL = ('https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.4/'
'rgitopo/')
CHECKSUM_URL = 'https://cluster.klima.uni-bremen.de/data/downloads.sha256.hdf'
CHECKSUM_VALIDATION_URL = CHECKSUM_URL + '.sha256'
CHECKSUM_LIFETIME = 24 * 60 * 60
# Web mercator proj constants
WEB_N_PIX = 256
WEB_EARTH_RADUIS = 6378137.
DEM_SOURCES = ['GIMP', 'ARCTICDEM', 'RAMP', 'TANDEM', 'AW3D30', 'MAPZEN',
'DEM3', 'ASTER', 'SRTM', 'REMA', 'ALASKA', 'COPDEM', 'NASADEM']
DEM_SOURCES_PER_GLACIER = None
_RGI_METADATA = dict()
DEM3REG = {
'ISL': [-25., -13., 63., 67.], # Iceland
'SVALBARD': [9., 35.99, 75., 84.],
'JANMAYEN': [-10., -7., 70., 72.],
'FJ': [36., 68., 79., 90.], # Franz Josef Land
'FAR': [-8., -6., 61., 63.], # Faroer
'BEAR': [18., 20., 74., 75.], # Bear Island
'SHL': [-3., 0., 60., 61.], # Shetland
# Antarctica tiles as UTM zones, large files
'01-15': [-180., -91., -90, -60.],
'16-30': [-91., -1., -90., -60.],
'31-45': [-1., 89., -90., -60.],
'46-60': [89., 189., -90., -60.],
# Greenland tiles
'GL-North': [-72., -11., 76., 84.],
'GL-West': [-62., -42., 64., 76.],
'GL-South': [-52., -40., 59., 64.],
'GL-East': [-42., -17., 64., 76.]
}
# Function
tuple2int = partial(np.array, dtype=np.int64)
lock = None
def mkdir(path, reset=False):
"""Checks if directory exists and if not, create one.
Parameters
----------
reset: erase the content of the directory if exists
Returns
-------
the path
"""
if reset and os.path.exists(path):
shutil.rmtree(path)
try:
os.makedirs(path)
except FileExistsError:
pass
return path
def del_empty_dirs(s_dir):
"""Delete empty directories."""
b_empty = True
for s_target in os.listdir(s_dir):
s_path = os.path.join(s_dir, s_target)
if os.path.isdir(s_path):
if not del_empty_dirs(s_path):
b_empty = False
else:
b_empty = False
if b_empty:
os.rmdir(s_dir)
return b_empty
def findfiles(root_dir, endswith):
"""Finds all files with a specific ending in a directory
Parameters
----------
root_dir : str
The directory to search fo
endswith : str
The file ending (e.g. '.hgt'
Returns
-------
the list of files
"""
out = []
for dirpath, dirnames, filenames in os.walk(root_dir):
for filename in [f for f in filenames if f.endswith(endswith)]:
out.append(os.path.join(dirpath, filename))
return out
def get_lock():
"""Get multiprocessing lock."""
global lock
if lock is None:
# Global Lock
if cfg.PARAMS.get('use_mp_spawn', False):
lock = multiprocessing.get_context('spawn').Lock()
else:
lock = multiprocessing.Lock()
return lock
def get_dl_verify_data(section):
"""Returns a pandas DataFrame with all known download object hashes.
The returned dictionary resolves str: cache_obj_name (without section)
to a tuple int(size) and bytes(sha256)
"""
verify_key = 'dl_verify_data_' + section
if cfg.DATA.get(verify_key) is not None:
return cfg.DATA[verify_key]
verify_file_path = os.path.join(cfg.CACHE_DIR, 'downloads.sha256.hdf')
def verify_file(force=False):
"""Check the hash file's own hash"""
if not cfg.PARAMS['has_internet']:
return
if not force and os.path.isfile(verify_file_path) and \
os.path.getmtime(verify_file_path) + CHECKSUM_LIFETIME > time.time():
return
logger.info('Checking the download verification file checksum...')
try:
with requests.get(CHECKSUM_VALIDATION_URL) as req:
req.raise_for_status()
verify_file_sha256 = req.text.split(maxsplit=1)[0]
verify_file_sha256 = bytearray.fromhex(verify_file_sha256)
except Exception as e:
verify_file_sha256 = None
logger.warning('Failed getting verification checksum: ' + repr(e))
if os.path.isfile(verify_file_path) and verify_file_sha256:
sha256 = hashlib.sha256()
with open(verify_file_path, 'rb') as f:
for b in iter(lambda: f.read(0xFFFF), b''):
sha256.update(b)
if sha256.digest() != verify_file_sha256:
logger.warning('%s changed or invalid, deleting.'
% (verify_file_path))
os.remove(verify_file_path)
else:
os.utime(verify_file_path)
if not np.any(['dl_verify_data_' in k for k in cfg.DATA.keys()]):
# We check the hash file only once per session
# no need to do it at each call
verify_file()
if not os.path.isfile(verify_file_path):
if not cfg.PARAMS['has_internet']:
return pd.DataFrame()
logger.info('Downloading %s to %s...'
% (CHECKSUM_URL, verify_file_path))
with requests.get(CHECKSUM_URL, stream=True) as req:
if req.status_code == 200:
mkdir(os.path.dirname(verify_file_path))
with open(verify_file_path, 'wb') as f:
for b in req.iter_content(chunk_size=0xFFFF):
if b:
f.write(b)
logger.info('Done downloading.')
verify_file(force=True)
if not os.path.isfile(verify_file_path):
logger.warning('Downloading and verifying checksums failed.')
return pd.DataFrame()
try:
data = pd.read_hdf(verify_file_path, key=section)
except KeyError:
data = pd.DataFrame()
cfg.DATA[verify_key] = data
return data
def _call_dl_func(dl_func, cache_path):
"""Helper so the actual call to downloads can be overridden
"""
return dl_func(cache_path)
def _cached_download_helper(cache_obj_name, dl_func, reset=False):
"""Helper function for downloads.
Takes care of checking if the file is already cached.
Only calls the actual download function when no cached version exists.
"""
cache_dir = cfg.PATHS['dl_cache_dir']
cache_ro = cfg.PARAMS['dl_cache_readonly']
# A lot of logic below could be simplified but it's also not too important
wd = cfg.PATHS.get('working_dir')
if wd:
# this is for real runs
fb_cache_dir = os.path.join(wd, 'cache')
check_fb_dir = False
else:
# Nothing have been set up yet, this is bad - find a place to write
# This should happen on read-only cluster only but still
wd = os.environ.get('OGGM_WORKDIR')
if wd is not None and os.path.isdir(wd):
fb_cache_dir = os.path.join(wd, 'cache')
else:
fb_cache_dir = os.path.join(cfg.CACHE_DIR, 'cache')
check_fb_dir = True
if not cache_dir:
# Defaults to working directory: it must be set!
if not cfg.PATHS['working_dir']:
raise InvalidParamsError("Need a valid PATHS['working_dir']!")
cache_dir = fb_cache_dir
cache_ro = False
fb_path = os.path.join(fb_cache_dir, cache_obj_name)
if not reset and os.path.isfile(fb_path):
return fb_path
cache_path = os.path.join(cache_dir, cache_obj_name)
if not reset and os.path.isfile(cache_path):
return cache_path
if cache_ro:
if check_fb_dir:
# Add a manual check that we are caching sample data download
if 'oggm-sample-data' not in fb_path:
raise InvalidParamsError('Attempting to download something '
'with invalid global settings.')
cache_path = fb_path
if not cfg.PARAMS['has_internet']:
raise NoInternetException("Download required, but "
"`has_internet` is False.")
mkdir(os.path.dirname(cache_path))
try:
cache_path = _call_dl_func(dl_func, cache_path)
except BaseException:
if os.path.exists(cache_path):
os.remove(cache_path)
raise
return cache_path
def _verified_download_helper(cache_obj_name, dl_func, reset=False):
"""Helper function for downloads.
Verifies the size and hash of the downloaded file against the included
list of known static files.
Uses _cached_download_helper to perform the actual download.
"""
path = _cached_download_helper(cache_obj_name, dl_func, reset)
try:
dl_verify = cfg.PARAMS['dl_verify']
except KeyError:
dl_verify = True
if dl_verify and path and cache_obj_name not in cfg.DL_VERIFIED:
cache_section, cache_path = cache_obj_name.split('/', 1)
data = get_dl_verify_data(cache_section)
if cache_path not in data.index:
logger.info('No known hash for %s' % cache_obj_name)
cfg.DL_VERIFIED[cache_obj_name] = True
else:
# compute the hash
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for b in iter(lambda: f.read(0xFFFF), b''):
sha256.update(b)
sha256 = sha256.digest()
size = os.path.getsize(path)
# check
data = data.loc[cache_path]
if data['size'] != size or bytes(data['sha256']) != sha256:
err = '%s failed to verify!\nis: %s %s\nexpected: %s %s' % (
path, size, sha256.hex(), data[0], data[1].hex())
raise DownloadVerificationFailedException(msg=err, path=path)
logger.info('%s verified successfully.' % path)
cfg.DL_VERIFIED[cache_obj_name] = True
return path
def _requests_urlretrieve(url, path, reporthook, auth=None, timeout=None):
"""Implements the required features of urlretrieve on top of requests
"""
chunk_size = 128 * 1024
chunk_count = 0
with requests.get(url, stream=True, auth=auth, timeout=timeout) as r:
if r.status_code != 200:
raise HttpDownloadError(r.status_code, url)
r.raise_for_status()
size = r.headers.get('content-length') or -1
size = int(size)
if reporthook:
reporthook(chunk_count, chunk_size, size)
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if not chunk:
continue
f.write(chunk)
chunk_count += 1
if reporthook:
reporthook(chunk_count, chunk_size, size)
if chunk_count * chunk_size < size:
raise HttpContentTooShortError()
def _classic_urlretrieve(url, path, reporthook, auth=None, timeout=None):
"""Thin wrapper around pythons urllib urlretrieve
"""
ourl = url
if auth:
u = urlparse(url)
if '@' not in u.netloc:
netloc = auth[0] + ':' + auth[1] + '@' + u.netloc
url = u._replace(netloc=netloc).geturl()
old_def_timeout = socket.getdefaulttimeout()
if timeout is not None:
socket.setdefaulttimeout(timeout)
try:
urllib.request.urlretrieve(url, path, reporthook)
except urllib.error.HTTPError as e:
raise HttpDownloadError(e.code, ourl)
except urllib.error.ContentTooShortError as e:
raise HttpContentTooShortError()
finally:
socket.setdefaulttimeout(old_def_timeout)
class ImplicitFTPTLS(ftplib.FTP_TLS):
""" FTP_TLS subclass that automatically wraps sockets in SSL to support
implicit FTPS.
Taken from https://stackoverflow.com/a/36049814
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sock = None
@property
def sock(self):
"""Return the socket."""
return self._sock
@sock.setter
def sock(self, value):
"""When modifying the socket, ensure that it is ssl wrapped."""
if value is not None and not isinstance(value, ssl.SSLSocket):
value = self.context.wrap_socket(value)
self._sock = value
def url_exists(url):
"""Checks if a given a URL exists or not."""
request = requests.get(url)
return request.status_code < 400
def _ftps_retrieve(url, path, reporthook, auth=None, timeout=None):
""" Wrapper around ftplib to download from FTPS server
"""
if not auth:
raise DownloadCredentialsMissingException('No authentication '
'credentials given!')
upar = urlparse(url)
# Decide if Implicit or Explicit FTPS is used based on the port in url
if upar.port == 990:
ftps = ImplicitFTPTLS()
elif upar.port == 21:
ftps = ftplib.FTP_TLS()
try:
# establish ssl connection
ftps.connect(host=upar.hostname, port=upar.port, timeout=timeout)
ftps.login(user=auth[0], passwd=auth[1])
ftps.prot_p()
logger.info('Established connection %s' % upar.hostname)
# meta for progress bar size
count = 0
total = ftps.size(upar.path)
bs = 12*1024
def _ftps_progress(data):
outfile.write(data)
nonlocal count
count += 1
reporthook(count, count*bs, total)
with open(path, 'wb') as outfile:
ftps.retrbinary('RETR ' + upar.path, _ftps_progress, blocksize=bs)
except (ftplib.error_perm, socket.timeout, socket.gaierror) as err:
raise FTPSDownloadError(err)
finally:
ftps.close()
def _get_url_cache_name(url):
"""Returns the cache name for any given url.
"""
res = urlparse(url)
return res.netloc.split(':', 1)[0] + res.path
def oggm_urlretrieve(url, cache_obj_name=None, reset=False,
reporthook=None, auth=None, timeout=None):
"""Wrapper around urlretrieve, to implement our caching logic.
Instead of accepting a destination path, it decided where to store the file
and returns the local path.
auth is expected to be either a tuple of ('username', 'password') or None.
"""
if cache_obj_name is None:
cache_obj_name = _get_url_cache_name(url)
def _dlf(cache_path):
logger.info("Downloading %s to %s..." % (url, cache_path))
try:
_requests_urlretrieve(url, cache_path, reporthook, auth, timeout)
except requests.exceptions.InvalidSchema:
if 'ftps://' in url:
_ftps_retrieve(url, cache_path, reporthook, auth, timeout)
else:
_classic_urlretrieve(url, cache_path, reporthook, auth,
timeout)
return cache_path
return _verified_download_helper(cache_obj_name, _dlf, reset)
def _progress_urlretrieve(url, cache_name=None, reset=False,
auth=None, timeout=None):
"""Downloads a file, returns its local path, and shows a progressbar."""
try:
from progressbar import DataTransferBar, UnknownLength
pbar = None
def _upd(count, size, total):
nonlocal pbar
if pbar is None:
pbar = DataTransferBar()
if not pbar.is_terminal:
pbar.min_poll_interval = 15
if pbar.max_value is None:
if total > 0:
pbar.start(total)
else:
pbar.start(UnknownLength)
pbar.update(min(count * size, total))
sys.stdout.flush()
res = oggm_urlretrieve(url, cache_obj_name=cache_name, reset=reset,
reporthook=_upd, auth=auth, timeout=timeout)
try:
pbar.finish()
except BaseException:
pass
return res
except (ImportError, ModuleNotFoundError):
return oggm_urlretrieve(url, cache_obj_name=cache_name,
reset=reset, auth=auth, timeout=timeout)
def aws_file_download(aws_path, cache_name=None, reset=False):
with get_lock():
return _aws_file_download_unlocked(aws_path, cache_name, reset)
def _aws_file_download_unlocked(aws_path, cache_name=None, reset=False):
"""Download a file from the AWS drive s3://astgtmv2/
**Note:** you need AWS credentials for this to work.
Parameters
----------
aws_path: path relative to s3://astgtmv2/
"""
while aws_path.startswith('/'):
aws_path = aws_path[1:]
if cache_name is not None:
cache_obj_name = cache_name
else:
cache_obj_name = 'astgtmv2/' + aws_path
def _dlf(cache_path):
raise NotImplementedError("Downloads from AWS are no longer supported")
return _verified_download_helper(cache_obj_name, _dlf, reset)
def file_downloader(www_path, retry_max=5, cache_name=None,
reset=False, auth=None, timeout=None):
"""A slightly better downloader: it tries more than once."""
local_path = None
retry_counter = 0
while retry_counter <= retry_max:
# Try to download
try:
retry_counter += 1
local_path = _progress_urlretrieve(www_path, cache_name=cache_name,
reset=reset, auth=auth,
timeout=timeout)
# if no error, exit
break
except HttpDownloadError as err:
# This works well for py3
if err.code == 404 or err.code == 300:
# Ok so this *should* be an ocean tile
return None
elif err.code >= 500 and err.code < 600:
logger.info("Downloading %s failed with HTTP error %s, "
"retrying in 10 seconds... %s/%s" %
(www_path, err.code, retry_counter, retry_max))
time.sleep(10)
continue
else:
raise
except HttpContentTooShortError as err:
logger.info("Downloading %s failed with ContentTooShortError"
" error %s, retrying in 10 seconds... %s/%s" %
(www_path, err.code, retry_counter, retry_max))
time.sleep(10)
continue
except DownloadVerificationFailedException as err:
if (cfg.PATHS['dl_cache_dir'] and
err.path.startswith(cfg.PATHS['dl_cache_dir']) and
cfg.PARAMS['dl_cache_readonly']):
if not cache_name:
cache_name = _get_url_cache_name(www_path)
cache_name = "GLOBAL_CACHE_INVALID/" + cache_name
retry_counter -= 1
logger.info("Global cache for %s is invalid!")
else:
try:
os.remove(err.path)
except FileNotFoundError:
pass
logger.info("Downloading %s failed with "
"DownloadVerificationFailedException\n %s\n"
"The file might have changed or is corrupted. "
"File deleted. Re-downloading... %s/%s" %
(www_path, err.msg, retry_counter, retry_max))
continue
except requests.ConnectionError as err:
if err.args[0].__class__.__name__ == 'MaxRetryError':
# if request tried often enough we don't have to do this
# this error does happen for not existing ASTERv3 files
return None
else:
# in other cases: try again
logger.info("Downloading %s failed with ConnectionError, "
"retrying in 10 seconds... %s/%s" %
(www_path, retry_counter, retry_max))
time.sleep(10)
continue
except FTPSDownloadError as err:
logger.info("Downloading %s failed with FTPSDownloadError"
" error: '%s', retrying in 10 seconds... %s/%s" %
(www_path, err.orgerr, retry_counter, retry_max))
time.sleep(10)
continue
# See if we managed (fail is allowed)
if not local_path or not os.path.exists(local_path):
logger.warning('Downloading %s failed.' % www_path)
return local_path
def locked_func(func):
"""To decorate a function that needs to be locked for multiprocessing"""
@wraps(func)
def wrapper(*args, **kwargs):
with get_lock():
return func(*args, **kwargs)
return wrapper
def file_extractor(file_path):
"""For archives with only one file inside extract the file to tmpdir."""
filename, file_extension = os.path.splitext(file_path)
# Second one for tar.gz files
f2, ex2 = os.path.splitext(filename)
if ex2 == '.tar':
filename, file_extension = f2, '.tar.gz'
bname = os.path.basename(file_path)
# This is to give a unique name to the tmp file
hid = hashlib.md5(file_path.encode()).hexdigest()[:7] + '_'
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
# Check output extension
def _check_ext(f):
_, of_ext = os.path.splitext(f)
if of_ext not in ['.nc', '.tif']:
raise InvalidParamsError('Extracted file extension not recognized'
': {}'.format(of_ext))
return of_ext
if file_extension == '.zip':
with zipfile.ZipFile(file_path) as zf:
members = zf.namelist()
if len(members) != 1:
raise RuntimeError('Cannot extract multiple files')
o_name = hid + members[0]
o_path = os.path.join(tmpdir, o_name)
of_ext = _check_ext(o_path)
if not os.path.exists(o_path):
logger.info('Extracting {} to {}...'.format(bname, o_path))
with open(o_path, 'wb') as f:
f.write(zf.read(members[0]))
elif file_extension == '.gz':
# Gzip files cannot be inspected. It's always only one file
# Decide on its name
o_name = hid + os.path.basename(filename)
o_path = os.path.join(tmpdir, o_name)
of_ext = _check_ext(o_path)
if not os.path.exists(o_path):
logger.info('Extracting {} to {}...'.format(bname, o_path))
with gzip.GzipFile(file_path) as zf:
with open(o_path, 'wb') as outfile:
for line in zf:
outfile.write(line)
elif file_extension == '.bz2':
# bzip2 files cannot be inspected. It's always only one file
# Decide on its name
o_name = hid + os.path.basename(filename)
o_path = os.path.join(tmpdir, o_name)
of_ext = _check_ext(o_path)
if not os.path.exists(o_path):
logger.info('Extracting {} to {}...'.format(bname, o_path))
with bz2.open(file_path) as zf:
with open(o_path, 'wb') as outfile:
for line in zf:
outfile.write(line)
elif file_extension in ['.tar.gz', '.tar']:
with tarfile.open(file_path) as zf:
members = zf.getmembers()
if len(members) != 1:
raise RuntimeError('Cannot extract multiple files')
o_name = hid + members[0].name
o_path = os.path.join(tmpdir, o_name)
of_ext = _check_ext(o_path)
if not os.path.exists(o_path):
logger.info('Extracting {} to {}...'.format(bname, o_path))
with open(o_path, 'wb') as f:
f.write(zf.extractfile(members[0]).read())
else:
raise InvalidParamsError('Extension not recognized: '
'{}'.format(file_extension))
# Be sure we don't overfill the folder
cfg.get_lru_handler(tmpdir, ending=of_ext).append(o_path)
return o_path
def download_with_authentication(wwwfile, key):
""" Uses credentials from a local .netrc file to download files
This is function is currently used for TanDEM-X and ASTER
Parameters
----------
wwwfile : str
path to the file to download
key : str
the machine to to look at in the .netrc file
Returns
-------
"""
# Check the cache first. Use dummy download function to assure nothing is
# tried to be downloaded without credentials:
def _always_none(foo):
return None
cache_obj_name = _get_url_cache_name(wwwfile)
dest_file = _verified_download_helper(cache_obj_name, _always_none)
# Grab auth parameters
if not dest_file:
authfile = os.path.expanduser('~/.netrc')
if not os.path.isfile(authfile):
raise DownloadCredentialsMissingException(
(authfile, ' does not exist. Add necessary credentials for ',
key, ' with `oggm_netrc_credentials. You may have to ',
'register at the respective service first.'))
try:
netrc(authfile).authenticators(key)[0]
except TypeError:
raise DownloadCredentialsMissingException(
('Credentials for ', key, ' are not in ', authfile, '. Add ',
'credentials for with `oggm_netrc_credentials`.'))
dest_file = file_downloader(
wwwfile, auth=(netrc(authfile).authenticators(key)[0],
netrc(authfile).authenticators(key)[2]))
return dest_file
def download_oggm_files():
with get_lock():
return _download_oggm_files_unlocked()
def _download_oggm_files_unlocked():
"""Checks if the demo data is already on the cache and downloads it."""
zip_url = 'https://github.com/%s/archive/%s.zip' % \
(SAMPLE_DATA_GH_REPO, SAMPLE_DATA_COMMIT)
odir = os.path.join(cfg.CACHE_DIR)
sdir = os.path.join(cfg.CACHE_DIR,
'oggm-sample-data-%s' % SAMPLE_DATA_COMMIT)
# download only if necessary
if not os.path.exists(sdir):
ofile = file_downloader(zip_url)
with zipfile.ZipFile(ofile) as zf:
zf.extractall(odir)
assert os.path.isdir(sdir)
# list of files for output
out = dict()
for root, directories, filenames in os.walk(sdir):
for filename in filenames:
if filename in out:
# This was a stupid thing, and should not happen
# TODO: duplicates in sample data...
k = os.path.join(os.path.basename(root), filename)
assert k not in out
out[k] = os.path.join(root, filename)
else:
out[filename] = os.path.join(root, filename)
return out
def _download_srtm_file(zone):
with get_lock():
return _download_srtm_file_unlocked(zone)
def _download_srtm_file_unlocked(zone):
"""Checks if the srtm data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
outpath = os.path.join(tmpdir, 'srtm_' + zone + '.tif')
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
# Did we download it yet?
wwwfile = ('http://srtm.csi.cgiar.org/wp-content/uploads/files/srtm_5x5/'
'TIFF/srtm_' + zone + '.zip')
dest_file = file_downloader(wwwfile)
# None means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(outpath):
with zipfile.ZipFile(dest_file) as zf:
zf.extractall(tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_nasadem_file(zone):
with get_lock():
return _download_nasadem_file_unlocked(zone)
def _download_nasadem_file_unlocked(zone):
"""Checks if the NASADEM data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
wwwfile = ('https://e4ftl01.cr.usgs.gov/MEASURES/NASADEM_HGT.001/'
'2000.02.11/NASADEM_HGT_{}.zip'.format(zone))
demfile = '{}.hgt'.format(zone)
outpath = os.path.join(tmpdir, demfile)
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
# Did we download it yet?
dest_file = file_downloader(wwwfile)
# None means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(outpath):
with zipfile.ZipFile(dest_file) as zf:
zf.extract(demfile, path=tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_tandem_file(zone):
with get_lock():
return _download_tandem_file_unlocked(zone)
def _download_tandem_file_unlocked(zone):
"""Checks if the tandem data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
bname = zone.split('/')[-1] + '_DEM.tif'
wwwfile = ('https://download.geoservice.dlr.de/TDM90/files/'
'{}.zip'.format(zone))
outpath = os.path.join(tmpdir, bname)
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
dest_file = download_with_authentication(wwwfile, 'geoservice.dlr.de')
# That means we tried hard but we couldn't find it
if not dest_file:
return None
elif not zipfile.is_zipfile(dest_file):
# If the TanDEM-X tile does not exist, a invalid file is created.
# See https://github.com/OGGM/oggm/issues/893 for more details
return None
# ok we have to extract it
if not os.path.exists(outpath):
with zipfile.ZipFile(dest_file) as zf:
for fn in zf.namelist():
if 'DEM/' + bname in fn:
break
with open(outpath, 'wb') as fo:
fo.write(zf.read(fn))
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_dem3_viewpano(zone):
with get_lock():
return _download_dem3_viewpano_unlocked(zone)
def _download_dem3_viewpano_unlocked(zone):
"""Checks if the DEM3 data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
outpath = os.path.join(tmpdir, zone + '.tif')
extract_dir = os.path.join(tmpdir, 'tmp_' + zone)
mkdir(extract_dir, reset=True)
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
# OK, so see if downloaded already
# some files have a newer version 'v2'
if zone in ['R33', 'R34', 'R35', 'R36', 'R37', 'R38', 'Q32', 'Q33', 'Q34',
'Q35', 'Q36', 'Q37', 'Q38', 'Q39', 'Q40', 'P31', 'P32', 'P33',
'P34', 'P35', 'P36', 'P37', 'P38', 'P39', 'P40']:
ifile = 'http://viewfinderpanoramas.org/dem3/' + zone + 'v2.zip'
elif zone in DEM3REG.keys():
# We prepared these files as tif already
ifile = ('https://cluster.klima.uni-bremen.de/~oggm/dem/'
'DEM3_MERGED/{}.tif'.format(zone))
return file_downloader(ifile)
else:
ifile = 'http://viewfinderpanoramas.org/dem3/' + zone + '.zip'
dfile = file_downloader(ifile)
# None means we tried hard but we couldn't find it
if not dfile:
return None
# ok we have to extract it
with zipfile.ZipFile(dfile) as zf:
zf.extractall(extract_dir)
# Serious issue: sometimes, if a southern hemisphere URL is queried for
# download and there is none, a NH zip file is downloaded.
# Example: http://viewfinderpanoramas.org/dem3/SN29.zip yields N29!
# BUT: There are southern hemisphere files that download properly. However,
# the unzipped folder has the file name of
# the northern hemisphere file. Some checks if correct file exists:
if len(zone) == 4 and zone.startswith('S'):
zonedir = os.path.join(extract_dir, zone[1:])
else:
zonedir = os.path.join(extract_dir, zone)
globlist = glob.glob(os.path.join(zonedir, '*.hgt'))
# take care of the special file naming cases
if zone in DEM3REG.keys():
globlist = glob.glob(os.path.join(extract_dir, '*', '*.hgt'))
if not globlist:
# Final resort
globlist = (findfiles(extract_dir, '.hgt') or
findfiles(extract_dir, '.HGT'))
if not globlist:
raise RuntimeError("We should have some files here, but we don't")
# merge the single HGT files (can be a bit ineffective, because not every
# single file might be exactly within extent...)
rfiles = [rasterio.open(s) for s in globlist]
dest, output_transform = merge_tool(rfiles)
profile = rfiles[0].profile
if 'affine' in profile:
profile.pop('affine')
profile['transform'] = output_transform
profile['height'] = dest.shape[1]
profile['width'] = dest.shape[2]
profile['driver'] = 'GTiff'
with rasterio.open(outpath, 'w', **profile) as dst:
dst.write(dest)
for rf in rfiles:
rf.close()
# delete original files to spare disk space
for s in globlist:
os.remove(s)
del_empty_dirs(tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_aster_file(zone):
with get_lock():
return _download_aster_file_unlocked(zone)
def _download_aster_file_unlocked(zone):
"""Checks if the ASTER data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
wwwfile = ('https://e4ftl01.cr.usgs.gov/ASTER_B/ASTT/ASTGTM.003/'
'2000.03.01/{}.zip'.format(zone))
outpath = os.path.join(tmpdir, zone + '_dem.tif')
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
# download from NASA Earthdata with credentials
dest_file = download_with_authentication(wwwfile, 'urs.earthdata.nasa.gov')
# That means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(outpath):
with zipfile.ZipFile(dest_file) as zf:
zf.extractall(tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_topo_file_from_cluster(fname):
with get_lock():
return _download_topo_file_from_cluster_unlocked(fname)
def _download_topo_file_from_cluster_unlocked(fname):
"""Checks if the special topo data is in the directory and if not,
download it from the cluster.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
outpath = os.path.join(tmpdir, fname)
url = 'https://cluster.klima.uni-bremen.de/data/dems/'
url += fname + '.zip'
dfile = file_downloader(url)
if not os.path.exists(outpath):
logger.info('Extracting ' + fname + '.zip to ' + outpath + '...')
with zipfile.ZipFile(dfile) as zf:
zf.extractall(tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_copdem_file(cppfile, tilename):
with get_lock():
return _download_copdem_file_unlocked(cppfile, tilename)
def _download_copdem_file_unlocked(cppfile, tilename):
"""Checks if Copernicus DEM file is in the directory, if not download it.
cppfile : name of the tarfile to download
tilename : name of folder and tif file within the cppfile
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
# tarfiles are extracted in directories per each tile
fpath = '{0}_DEM.tif'.format(tilename)
demfile = os.path.join(tmpdir, fpath)
# check if extracted file exists already
if os.path.exists(demfile):
return demfile
# Did we download it yet?
ftpfile = ('ftps://cdsdata.copernicus.eu:990/' +
'datasets/COP-DEM_GLO-90-DGED/2019_1/' +
cppfile)
dest_file = download_with_authentication(ftpfile,
'spacedata.copernicus.eu')
# None means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(demfile):
tiffile = os.path.join(tilename, 'DEM', fpath)
with tarfile.open(dest_file) as tf:
tmember = tf.getmember(tiffile)
# do not extract the full path of the file
tmember.name = os.path.basename(tf.getmember(tiffile).name)
tf.extract(tmember, tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(demfile)
cfg.get_lru_handler(tmpdir).append(demfile)
return demfile
def _download_aw3d30_file(zone):
with get_lock():
return _download_aw3d30_file_unlocked(zone)
def _download_aw3d30_file_unlocked(fullzone):
"""Checks if the AW3D30 data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
# tarfiles are extracted in directories per each tile
tile = fullzone.split('/')[1]
demfile = os.path.join(tmpdir, tile, tile + '_AVE_DSM.tif')
# check if extracted file exists already
if os.path.exists(demfile):
return demfile
# Did we download it yet?
ftpfile = ('ftp://ftp.eorc.jaxa.jp/pub/ALOS/ext1/AW3D30/release_v1804/'
+ fullzone + '.tar.gz')
try:
dest_file = file_downloader(ftpfile, timeout=180)
except urllib.error.URLError:
# This error is raised if file is not available, could be water
return None
# None means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(demfile):
from oggm.utils import robust_tar_extract
dempath = os.path.dirname(demfile)
robust_tar_extract(dest_file, dempath)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(demfile)
# this tarfile contains several files
for file in os.listdir(dempath):
cfg.get_lru_handler(tmpdir).append(os.path.join(dempath, file))
return demfile
def _download_mapzen_file(zone):
with get_lock():
return _download_mapzen_file_unlocked(zone)
def _download_mapzen_file_unlocked(zone):
"""Checks if the mapzen data is in the directory and if not, download it.
"""
bucket = 'elevation-tiles-prod'
prefix = 'geotiff'
url = 'http://s3.amazonaws.com/%s/%s/%s' % (bucket, prefix, zone)
# That's all
return file_downloader(url, timeout=180)
def get_prepro_gdir(rgi_version, rgi_id, border, prepro_level, base_url=None):
with get_lock():
return _get_prepro_gdir_unlocked(rgi_version, rgi_id, border,
prepro_level, base_url=base_url)
def get_prepro_base_url(base_url=None, rgi_version=None, border=None,
prepro_level=None):
"""Extended base url where to find the desired gdirs."""
if base_url is None:
if prepro_level <= 2:
base_url = GDIR_L1L2_URL
else:
base_url = GDIR_L3L5_URL
if not base_url.endswith('/'):
base_url += '/'
if rgi_version is None:
rgi_version = cfg.PARAMS['rgi_version']
if border is None:
border = cfg.PARAMS['border']
url = base_url
url += 'RGI{}/'.format(rgi_version)
url += 'b_{:03d}/'.format(int(border))
url += 'L{:d}/'.format(prepro_level)
return url
def _get_prepro_gdir_unlocked(rgi_version, rgi_id, border, prepro_level,
base_url=None):
url = get_prepro_base_url(rgi_version=rgi_version, border=border,
prepro_level=prepro_level, base_url=base_url)
url += '{}/{}.tar' .format(rgi_id[:8], rgi_id[:11])
tar_base = file_downloader(url)
if tar_base is None:
raise RuntimeError('Could not find file at ' + url)
return tar_base
def get_geodetic_mb_dataframe(file_path=None):
"""Fetches the reference geodetic dataframe for calibration.
Currently that's the data from Hughonnet et al 2021, corrected for
outliers and with void filled. The data preparation script is
available at
https://nbviewer.jupyter.org/urls/cluster.klima.uni-bremen.de/~oggm/geodetic_ref_mb/convert.ipynb
Parameters
----------
file_path : str
in case you have your own file to parse (check the format first!)
Returns
-------
a DataFrame with the data.
"""
# fetch the file online or read custom file
if file_path is None:
base_url = 'https://cluster.klima.uni-bremen.de/~oggm/geodetic_ref_mb/'
file_name = 'hugonnet_2021_ds_rgi60_pergla_rates_10_20_worldwide_filled.hdf'
file_path = file_downloader(base_url + file_name)
# Did we open it yet?
if file_path in cfg.DATA:
return cfg.DATA[file_path]
# If not let's go
extension = os.path.splitext(file_path)[1]
if extension == '.csv':
df = pd.read_csv(file_path, index_col=0)
elif extension == '.hdf':
df = pd.read_hdf(file_path)
# Check for missing data (old files)
if len(df.loc[df['dmdtda'].isnull()]) > 0:
raise InvalidParamsError('The reference file you are using has missing '
'data and is probably outdated (sorry for '
'that). Delete the file at '
f'{file_path} and start again.')
cfg.DATA[file_path] = df
return df
def srtm_zone(lon_ex, lat_ex):
"""Returns a list of SRTM zones covering the desired extent.
"""
# SRTM are sorted in tiles of 5 degrees
srtm_x0 = -180.
srtm_y0 = 60.
srtm_dx = 5.
srtm_dy = -5.
# quick n dirty solution to be sure that we will cover the whole range
mi, ma = np.min(lon_ex), np.max(lon_ex)
# int() to avoid Deprec warning:
lon_ex = np.linspace(mi, ma, int(np.ceil((ma - mi) + 3)))
mi, ma = np.min(lat_ex), np.max(lat_ex)
# int() to avoid Deprec warning
lat_ex = np.linspace(mi, ma, int(np.ceil((ma - mi) + 3)))
zones = []
for lon in lon_ex:
for lat in lat_ex:
dx = lon - srtm_x0
dy = lat - srtm_y0
assert dy < 0
zx = np.ceil(dx / srtm_dx)
zy = np.ceil(dy / srtm_dy)
zones.append('{:02.0f}_{:02.0f}'.format(zx, zy))
return list(sorted(set(zones)))
def _tandem_path(lon_tile, lat_tile):
# OK we have a proper tile now
# First folder level is sorted from S to N
level_0 = 'S' if lat_tile < 0 else 'N'
level_0 += '{:02d}'.format(abs(lat_tile))
# Second folder level is sorted from W to E, but in 10 steps
level_1 = 'W' if lon_tile < 0 else 'E'
level_1 += '{:03d}'.format(divmod(abs(lon_tile), 10)[0] * 10)
# Level 2 is formating, but depends on lat
level_2 = 'W' if lon_tile < 0 else 'E'
if abs(lat_tile) <= 60:
level_2 += '{:03d}'.format(abs(lon_tile))
elif abs(lat_tile) <= 80:
level_2 += '{:03d}'.format(divmod(abs(lon_tile), 2)[0] * 2)
else:
level_2 += '{:03d}'.format(divmod(abs(lon_tile), 4)[0] * 4)
# Final path
out = (level_0 + '/' + level_1 + '/' +
'TDM1_DEM__30_{}{}'.format(level_0, level_2))
return out
def tandem_zone(lon_ex, lat_ex):
"""Returns a list of TanDEM-X zones covering the desired extent.
"""
# Files are one by one tiles, so lets loop over them
# For higher lats they are stored in steps of 2 and 4. My code below
# is probably giving more files than needed but better safe than sorry
lat_tiles = np.arange(np.floor(lat_ex[0]), np.ceil(lat_ex[1]+1e-9),
dtype=int)
zones = []
for lat in lat_tiles:
if abs(lat) < 60:
l0 = np.floor(lon_ex[0])
l1 = np.floor(lon_ex[1])
elif abs(lat) < 80:
l0 = divmod(lon_ex[0], 2)[0] * 2
l1 = divmod(lon_ex[1], 2)[0] * 2
elif abs(lat) < 90:
l0 = divmod(lon_ex[0], 4)[0] * 4
l1 = divmod(lon_ex[1], 4)[0] * 4
lon_tiles = np.arange(l0, l1+1, dtype=int)
for lon in lon_tiles:
zones.append(_tandem_path(lon, lat))
return list(sorted(set(zones)))
def _aw3d30_path(lon_tile, lat_tile):
# OK we have a proper tile now
# Folders are sorted with N E S W in 5 degree steps
# But in N and E the lower boundary is indicated
# e.g. N060 contains N060 - N064
# e.g. E000 contains E000 - E004
# but S and W indicate the upper boundary:
# e.g. S010 contains S006 - S010
# e.g. W095 contains W091 - W095
# get letters
ns = 'S' if lat_tile < 0 else 'N'
ew = 'W' if lon_tile < 0 else 'E'
# get lat/lon
lon = abs(5 * np.floor(lon_tile/5))
lat = abs(5 * np.floor(lat_tile/5))
folder = '%s%.3d%s%.3d' % (ns, lat, ew, lon)
filename = '%s%.3d%s%.3d' % (ns, abs(lat_tile), ew, abs(lon_tile))
# Final path
out = folder + '/' + filename
return out
def aw3d30_zone(lon_ex, lat_ex):
"""Returns a list of AW3D30 zones covering the desired extent.
"""
# Files are one by one tiles, so lets loop over them
lon_tiles = np.arange(np.floor(lon_ex[0]), np.ceil(lon_ex[1]+1e-9),
dtype=int)
lat_tiles = np.arange(np.floor(lat_ex[0]), np.ceil(lat_ex[1]+1e-9),
dtype=int)
zones = []
for lon in lon_tiles:
for lat in lat_tiles:
zones.append(_aw3d30_path(lon, lat))
return list(sorted(set(zones)))
def _extent_to_polygon(lon_ex, lat_ex, to_crs=None):
if lon_ex[0] == lon_ex[1] and lat_ex[0] == lat_ex[1]:
out = shpg.Point(lon_ex[0], lat_ex[0])
else:
x = [lon_ex[0], lon_ex[1], lon_ex[1], lon_ex[0], lon_ex[0]]
y = [lat_ex[0], lat_ex[0], lat_ex[1], lat_ex[1], lat_ex[0]]
out = shpg.Polygon(np.array((x, y)).T)
if to_crs is not None:
out = salem.transform_geometry(out, to_crs=to_crs)
return out
def arcticdem_zone(lon_ex, lat_ex):
"""Returns a list of Arctic-DEM zones covering the desired extent.
"""
gdf = gpd.read_file(get_demo_file('ArcticDEM_Tile_Index_Rel7_by_tile.shp'))
p = _extent_to_polygon(lon_ex, lat_ex, to_crs=gdf.crs)
gdf = gdf.loc[gdf.intersects(p)]
return gdf.tile.values if len(gdf) > 0 else []
def rema_zone(lon_ex, lat_ex):
"""Returns a list of REMA-DEM zones covering the desired extent.
"""
gdf = gpd.read_file(get_demo_file('REMA_Tile_Index_Rel1.1.shp'))
p = _extent_to_polygon(lon_ex, lat_ex, to_crs=gdf.crs)
gdf = gdf.loc[gdf.intersects(p)]
return gdf.tile.values if len(gdf) > 0 else []
def alaska_dem_zone(lon_ex, lat_ex):
"""Returns a list of Alaska-DEM zones covering the desired extent.
"""
gdf = gpd.read_file(get_demo_file('Alaska_albers_V3_tiles.shp'))
p = _extent_to_polygon(lon_ex, lat_ex, to_crs=gdf.crs)
gdf = gdf.loc[gdf.intersects(p)]
return gdf.tile.values if len(gdf) > 0 else []
def copdem_zone(lon_ex, lat_ex):
"""Returns a list of Copernicus DEM tarfile and tilename tuples
"""
# path to the lookup shapefiles
gdf = gpd.read_file(get_demo_file('RGI60_COPDEM_lookup.shp'))
# intersect with lat lon extents
p = _extent_to_polygon(lon_ex, lat_ex, to_crs=gdf.crs)
gdf = gdf.loc[gdf.intersects(p)]
# COPDEM is global, if we miss all tiles it is worth an error
if len(gdf) == 0:
raise InvalidDEMError('Could not find any Copernicus DEM tile.')
flist = []
for _, g in gdf.iterrows():
cpp = g['CPP File']
eop = g['Eop Id']
eop = eop.split(':')[-2]
assert 'Copernicus' in eop
flist.append((cpp, eop))
return flist
def dem3_viewpano_zone(lon_ex, lat_ex):
"""Returns a list of DEM3 zones covering the desired extent.
http://viewfinderpanoramas.org/Coverage%20map%20viewfinderpanoramas_org3.htm
"""
for _f in DEM3REG.keys():
if (np.min(lon_ex) >= DEM3REG[_f][0]) and \
(np.max(lon_ex) <= DEM3REG[_f][1]) and \
(np.min(lat_ex) >= DEM3REG[_f][2]) and \
(np.max(lat_ex) <= DEM3REG[_f][3]):
# test some weird inset files in Antarctica
if (np.min(lon_ex) >= -91.) and (np.max(lon_ex) <= -90.) and \
(np.min(lat_ex) >= -72.) and (np.max(lat_ex) <= -68.):
return ['SR15']
elif (np.min(lon_ex) >= -47.) and (np.max(lon_ex) <= -43.) and \
(np.min(lat_ex) >= -61.) and (np.max(lat_ex) <= -60.):
return ['SP23']
elif (np.min(lon_ex) >= 162.) and (np.max(lon_ex) <= 165.) and \
(np.min(lat_ex) >= -68.) and (np.max(lat_ex) <= -66.):
return ['SQ58']
# test some rogue Greenland tiles as well
elif (np.min(lon_ex) >= -72.) and (np.max(lon_ex) <= -66.) and \
(np.min(lat_ex) >= 76.) and (np.max(lat_ex) <= 80.):
return ['T19']
elif (np.min(lon_ex) >= -72.) and (np.max(lon_ex) <= -66.) and \
(np.min(lat_ex) >= 80.) and (np.max(lat_ex) <= 83.):
return ['U19']
elif (np.min(lon_ex) >= -66.) and (np.max(lon_ex) <= -60.) and \
(np.min(lat_ex) >= 80.) and (np.max(lat_ex) <= 83.):
return ['U20']
elif (np.min(lon_ex) >= -60.) and (np.max(lon_ex) <= -54.) and \
(np.min(lat_ex) >= 80.) and (np.max(lat_ex) <= 83.):
return ['U21']
elif (np.min(lon_ex) >= -54.) and (np.max(lon_ex) <= -48.) and \
(np.min(lat_ex) >= 80.) and (np.max(lat_ex) <= 83.):
return ['U22']
elif (np.min(lon_ex) >= -25.) and (np.max(lon_ex) <= -13.) and \
(np.min(lat_ex) >= 63.) and (np.max(lat_ex) <= 67.):
return ['ISL']
else:
return [_f]
# if the tile doesn't have a special name, its name can be found like this:
# corrected SRTMs are sorted in tiles of 6 deg longitude and 4 deg latitude
srtm_x0 = -180.
srtm_y0 = 0.
srtm_dx = 6.
srtm_dy = 4.
# quick n dirty solution to be sure that we will cover the whole range
mi, ma = np.min(lon_ex), np.max(lon_ex)
# TODO: Fabien, find out what Johannes wanted with this +3
# +3 is just for the number to become still a bit larger
# int() to avoid Deprec warning
lon_ex = np.linspace(mi, ma, int(np.ceil((ma - mi) / srtm_dy) + 3))
mi, ma = np.min(lat_ex), np.max(lat_ex)
# int() to avoid Deprec warning
lat_ex = np.linspace(mi, ma, int(np.ceil((ma - mi) / srtm_dx) + 3))
zones = []
for lon in lon_ex:
for lat in lat_ex:
dx = lon - srtm_x0
dy = lat - srtm_y0
zx = np.ceil(dx / srtm_dx)
# convert number to letter
zy = chr(int(abs(dy / srtm_dy)) + ord('A'))
if lat >= 0:
zones.append('%s%02.0f' % (zy, zx))
else:
zones.append('S%s%02.0f' % (zy, zx))
return list(sorted(set(zones)))
def aster_zone(lon_ex, lat_ex):
"""Returns a list of ASTGTMV3 zones covering the desired extent.
ASTER v3 tiles are 1 degree x 1 degree
N50 contains 50 to 50.9
E10 contains 10 to 10.9
S70 contains -69.99 to -69.0
W20 contains -19.99 to -19.0
"""
# adding small buffer for unlikely case where one lon/lat_ex == xx.0
lons = np.arange(np.floor(lon_ex[0]-1e-9), np.ceil(lon_ex[1]+1e-9))
lats = np.arange(np.floor(lat_ex[0]-1e-9), np.ceil(lat_ex[1]+1e-9))
zones = []
for lat in lats:
# north or south?
ns = 'S' if lat < 0 else 'N'
for lon in lons:
# east or west?
ew = 'W' if lon < 0 else 'E'
filename = 'ASTGTMV003_{}{:02.0f}{}{:03.0f}'.format(ns, abs(lat),
ew, abs(lon))
zones.append(filename)
return list(sorted(set(zones)))
def nasadem_zone(lon_ex, lat_ex):
"""Returns a list of NASADEM zones covering the desired extent.
NASADEM tiles are 1 degree x 1 degree
N50 contains 50 to 50.9
E10 contains 10 to 10.9
S70 contains -69.99 to -69.0
W20 contains -19.99 to -19.0
"""
# adding small buffer for unlikely case where one lon/lat_ex == xx.0
lons = np.arange(np.floor(lon_ex[0]-1e-9), np.ceil(lon_ex[1]+1e-9))
lats = np.arange(np.floor(lat_ex[0]-1e-9), np.ceil(lat_ex[1]+1e-9))
zones = []
for lat in lats:
# north or south?
ns = 's' if lat < 0 else 'n'
for lon in lons:
# east or west?
ew = 'w' if lon < 0 else 'e'
filename = '{}{:02.0f}{}{:03.0f}'.format(ns, abs(lat), ew,
abs(lon))
zones.append(filename)
return list(sorted(set(zones)))
def mapzen_zone(lon_ex, lat_ex, dx_meter=None, zoom=None):
"""Returns a list of AWS mapzen zones covering the desired extent.
For mapzen one has to specify the level of detail (zoom) one wants. The
best way in OGGM is to specify dx_meter of the underlying map and OGGM
will decide which zoom level works best.
"""
if dx_meter is None and zoom is None:
raise InvalidParamsError('Need either zoom level or dx_meter.')
bottom, top = lat_ex
left, right = lon_ex
ybound = 85.0511
if bottom <= -ybound:
bottom = -ybound
if top <= -ybound:
top = -ybound
if bottom > ybound:
bottom = ybound
if top > ybound:
top = ybound
if right >= 180:
right = 179.999
if left >= 180:
left = 179.999
if dx_meter:
# Find out the zoom so that we are close to the desired accuracy
lat = np.max(np.abs([bottom, top]))
zoom = int(np.ceil(math.log2((math.cos(lat * math.pi / 180) *
2 * math.pi * WEB_EARTH_RADUIS) /
(WEB_N_PIX * dx_meter))))
# According to this we should just always stay above 10 (sorry)
# https://github.com/tilezen/joerd/blob/master/docs/data-sources.md
zoom = 10 if zoom < 10 else zoom
# Code from planetutils
size = 2 ** zoom
xt = lambda x: int((x + 180.0) / 360.0 * size)
yt = lambda y: int((1.0 - math.log(math.tan(math.radians(y)) +
(1 / math.cos(math.radians(y))))
/ math.pi) / 2.0 * size)
tiles = []
for x in range(xt(left), xt(right) + 1):
for y in range(yt(top), yt(bottom) + 1):
tiles.append('/'.join(map(str, [zoom, x, str(y) + '.tif'])))
return tiles
def get_demo_file(fname):
"""Returns the path to the desired OGGM-sample-file.
If Sample data is not cached it will be downloaded from
https://github.com/OGGM/oggm-sample-data
Parameters
----------
fname : str
Filename of the desired OGGM-sample-file
Returns
-------
str
Absolute path to the desired file.
"""
d = download_oggm_files()
if fname in d:
return d[fname]
else:
return None
def get_wgms_files():
"""Get the path to the default WGMS-RGI link file and the data dir.
Returns
-------
(file, dir) : paths to the files
"""
download_oggm_files()
sdir = os.path.join(cfg.CACHE_DIR,
'oggm-sample-data-%s' % SAMPLE_DATA_COMMIT,
'wgms')
datadir = os.path.join(sdir, 'mbdata')
assert os.path.exists(datadir)
outf = os.path.join(sdir, 'rgi_wgms_links_20200415.csv')
outf = pd.read_csv(outf, dtype={'RGI_REG': object})
return outf, datadir
def get_glathida_file():
"""Get the path to the default GlaThiDa-RGI link file.
Returns
-------
file : paths to the file
"""
# Roll our own
download_oggm_files()
sdir = os.path.join(cfg.CACHE_DIR,
'oggm-sample-data-%s' % SAMPLE_DATA_COMMIT,
'glathida')
outf = os.path.join(sdir, 'rgi_glathida_links.csv')
assert os.path.exists(outf)
return outf
def get_rgi_dir(version=None, reset=False):
"""Path to the RGI directory.
If the RGI files are not present, download them.
Parameters
----------
version : str
'5', '6', defaults to None (linking to the one specified in cfg.PARAMS)
reset : bool
If True, deletes the RGI directory first and downloads the data
Returns
-------
str
path to the RGI directory
"""
with get_lock():
return _get_rgi_dir_unlocked(version=version, reset=reset)
def _get_rgi_dir_unlocked(version=None, reset=False):
rgi_dir = cfg.PATHS['rgi_dir']
if version is None:
version = cfg.PARAMS['rgi_version']
if len(version) == 1:
version += '0'
# Be sure the user gave a sensible path to the RGI dir
if not rgi_dir:
raise InvalidParamsError('The RGI data directory has to be'
'specified explicitly.')
rgi_dir = os.path.abspath(os.path.expanduser(rgi_dir))
rgi_dir = os.path.join(rgi_dir, 'RGIV' + version)
mkdir(rgi_dir, reset=reset)
if version == '50':
dfile = 'http://www.glims.org/RGI/rgi50_files/rgi50.zip'
elif version == '60':
dfile = 'http://www.glims.org/RGI/rgi60_files/00_rgi60.zip'
elif version == '61':
dfile = 'https://cluster.klima.uni-bremen.de/data/rgi/rgi_61.zip'
elif version == '62':
dfile = 'https://cluster.klima.uni-bremen.de/~oggm/rgi/rgi62.zip'
test_file = os.path.join(rgi_dir,
'*_rgi*{}_manifest.txt'.format(version))
if len(glob.glob(test_file)) == 0:
# if not there download it
ofile = file_downloader(dfile, reset=reset)
# Extract root
with zipfile.ZipFile(ofile) as zf:
zf.extractall(rgi_dir)
# Extract subdirs
pattern = '*_rgi{}_*.zip'.format(version)
for root, dirs, files in os.walk(cfg.PATHS['rgi_dir']):
for filename in fnmatch.filter(files, pattern):
zfile = os.path.join(root, filename)
with zipfile.ZipFile(zfile) as zf:
ex_root = zfile.replace('.zip', '')
mkdir(ex_root)
zf.extractall(ex_root)
# delete the zipfile after success
os.remove(zfile)
if len(glob.glob(test_file)) == 0:
raise RuntimeError('Could not find a manifest file in the RGI '
'directory: ' + rgi_dir)
return rgi_dir
def get_rgi_region_file(region, version=None, reset=False):
"""Path to the RGI region file.
If the RGI files are not present, download them.
Parameters
----------
region : str
from '01' to '19'
version : str
'5', '6', defaults to None (linking to the one specified in cfg.PARAMS)
reset : bool
If True, deletes the RGI directory first and downloads the data
Returns
-------
str
path to the RGI shapefile
"""
rgi_dir = get_rgi_dir(version=version, reset=reset)
f = list(glob.glob(rgi_dir + "/*/{}_*.shp".format(region)))
assert len(f) == 1
return f[0]
def get_rgi_glacier_entities(rgi_ids, version=None):
"""Get a list of glacier outlines selected from their RGI IDs.
Will download RGI data if not present.
Parameters
----------
rgi_ids : list of str
the glaciers you want the outlines for
version : str
the rgi version
Returns
-------
geopandas.GeoDataFrame
containing the desired RGI glacier outlines
"""
regions = [s.split('-')[1].split('.')[0] for s in rgi_ids]
if version is None:
version = rgi_ids[0].split('-')[0][-2:]
selection = []
for reg in sorted(np.unique(regions)):
sh = gpd.read_file(get_rgi_region_file(reg, version=version))
selection.append(sh.loc[sh.RGIId.isin(rgi_ids)])
# Make a new dataframe of those
selection = pd.concat(selection)
selection.crs = sh.crs # for geolocalisation
if len(selection) != len(rgi_ids):
raise RuntimeError('Could not find all RGI ids')
return selection
def get_rgi_intersects_dir(version=None, reset=False):
"""Path to the RGI directory containing the intersect files.
If the files are not present, download them.
Parameters
----------
version : str
'5', '6', defaults to None (linking to the one specified in cfg.PARAMS)
reset : bool
If True, deletes the intersects before redownloading them
Returns
-------
str
path to the directory
"""
with get_lock():
return _get_rgi_intersects_dir_unlocked(version=version, reset=reset)
def _get_rgi_intersects_dir_unlocked(version=None, reset=False):
rgi_dir = cfg.PATHS['rgi_dir']
if version is None:
version = cfg.PARAMS['rgi_version']
if len(version) == 1:
version += '0'
# Be sure the user gave a sensible path to the RGI dir
if not rgi_dir:
raise InvalidParamsError('The RGI data directory has to be'
'specified explicitly.')
rgi_dir = os.path.abspath(os.path.expanduser(rgi_dir))
mkdir(rgi_dir)
dfile = 'https://cluster.klima.uni-bremen.de/data/rgi/'
dfile += 'RGI_V{}_Intersects.zip'.format(version)
if version == '62':
dfile = ('https://cluster.klima.uni-bremen.de/~oggm/rgi/'
'rgi62_Intersects.zip')
odir = os.path.join(rgi_dir, 'RGI_V' + version + '_Intersects')
if reset and os.path.exists(odir):
shutil.rmtree(odir)
# A lot of code for backwards compat (sigh...)
if version in ['50', '60']:
test_file = os.path.join(odir, 'Intersects_OGGM_Manifest.txt')
if not os.path.exists(test_file):
# if not there download it
ofile = file_downloader(dfile, reset=reset)
# Extract root
with zipfile.ZipFile(ofile) as zf:
zf.extractall(odir)
if not os.path.exists(test_file):
raise RuntimeError('Could not find a manifest file in the RGI '
'directory: ' + odir)
else:
test_file = os.path.join(odir,
'*ntersect*anifest.txt'.format(version))
if len(glob.glob(test_file)) == 0:
# if not there download it
ofile = file_downloader(dfile, reset=reset)
# Extract root
with zipfile.ZipFile(ofile) as zf:
zf.extractall(odir)
# Extract subdirs
pattern = '*_rgi{}_*.zip'.format(version)
for root, dirs, files in os.walk(cfg.PATHS['rgi_dir']):
for filename in fnmatch.filter(files, pattern):
zfile = os.path.join(root, filename)
with zipfile.ZipFile(zfile) as zf:
ex_root = zfile.replace('.zip', '')
mkdir(ex_root)
zf.extractall(ex_root)
# delete the zipfile after success
os.remove(zfile)
if len(glob.glob(test_file)) == 0:
raise RuntimeError('Could not find a manifest file in the RGI '
'directory: ' + odir)
return odir
def get_rgi_intersects_region_file(region=None, version=None, reset=False):
"""Path to the RGI regional intersect file.
If the RGI files are not present, download them.
Parameters
----------
region : str
from '00' to '19', with '00' being the global file (deprecated).
From RGI version '61' onwards, please use `get_rgi_intersects_entities`
with a list of glaciers instead of relying to the global file.
version : str
'5', '6', '61'... defaults the one specified in cfg.PARAMS
reset : bool
If True, deletes the intersect file before redownloading it
Returns
-------
str
path to the RGI intersects shapefile
"""
if version is None:
version = cfg.PARAMS['rgi_version']
if len(version) == 1:
version += '0'
rgi_dir = get_rgi_intersects_dir(version=version, reset=reset)
if region == '00':
if version in ['50', '60']:
version = 'AllRegs'
region = '*'
else:
raise InvalidParamsError("From RGI version 61 onwards, please use "
"get_rgi_intersects_entities() instead.")
f = list(glob.glob(os.path.join(rgi_dir, "*", '*intersects*' + region +
'_rgi*' + version + '*.shp')))
assert len(f) == 1
return f[0]
def get_rgi_intersects_entities(rgi_ids, version=None):
"""Get a list of glacier intersects selected from their RGI IDs.
Parameters
----------
rgi_ids: list of str
list of rgi_ids you want to look for intersections for
version: str
'5', '6', '61'... defaults the one specified in cfg.PARAMS
Returns
-------
geopandas.GeoDataFrame
with the selected intersects
"""
if version is None:
version = cfg.PARAMS['rgi_version']
if len(version) == 1:
version += '0'
regions = [s.split('-')[1].split('.')[0] for s in rgi_ids]
selection = []
for reg in sorted(np.unique(regions)):
sh = gpd.read_file(get_rgi_intersects_region_file(reg,
version=version))
selection.append(sh.loc[sh.RGIId_1.isin(rgi_ids) |
sh.RGIId_2.isin(rgi_ids)])
# Make a new dataframe of those
selection = pd.concat(selection)
selection.crs = sh.crs # for geolocalisation
return selection
def is_dem_source_available(source, lon_ex, lat_ex):
"""Checks if a DEM source is available for your purpose.
This is only a very rough check! It doesn't mean that the data really is
available, but at least it's worth a try.
Parameters
----------
source : str, required
the source you want to check for
lon_ex : tuple or int, required
a (min_lon, max_lon) tuple delimiting the requested area longitudes
lat_ex : tuple or int, required
a (min_lat, max_lat) tuple delimiting the requested area latitudes
Returns
-------
True or False
"""
from oggm.utils import tolist
lon_ex = tolist(lon_ex, length=2)
lat_ex = tolist(lat_ex, length=2)
def _in_grid(grid_json, lon, lat):
i, j = cfg.DATA['dem_grids'][grid_json].transform(lon, lat,
maskout=True)
return np.all(~ (i.mask | j.mask))
if source == 'GIMP':
return _in_grid('gimpdem_90m_v01.1.json', lon_ex, lat_ex)
elif source == 'ARCTICDEM':
return _in_grid('arcticdem_mosaic_100m_v3.0.json', lon_ex, lat_ex)
elif source == 'RAMP':
return _in_grid('AntarcticDEM_wgs84.json', lon_ex, lat_ex)
elif source == 'REMA':
return _in_grid('REMA_100m_dem.json', lon_ex, lat_ex)
elif source == 'ALASKA':
return _in_grid('Alaska_albers_V3.json', lon_ex, lat_ex)
elif source == 'TANDEM':
return True
elif source == 'AW3D30':
return np.min(lat_ex) > -60
elif source == 'MAPZEN':
return True
elif source == 'DEM3':
return True
elif source == 'ASTER':
return True
elif source == 'SRTM':
return np.max(np.abs(lat_ex)) < 60
elif source == 'COPDEM':
return True
elif source == 'NASADEM':
return (np.min(lat_ex) > -56) and (np.max(lat_ex) < 60)
elif source == 'USER':
return True
elif source is None:
return True
def default_dem_source(rgi_id):
"""Current default DEM source at a given location.
Parameters
----------
rgi_id : str
the RGI id
Returns
-------
the chosen DEM source
"""
rgi_reg = 'RGI{}'.format(rgi_id[6:8])
rgi_id = rgi_id[:14]
if cfg.DEM_SOURCE_TABLE.get(rgi_reg) is None:
fp = get_demo_file('rgi62_dem_frac.h5')
cfg.DEM_SOURCE_TABLE[rgi_reg] = | pd.read_hdf(fp, key=rgi_reg) | pandas.read_hdf |
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from gensim.corpora.dictionary import Dictionary
from gensim.models import LdaModel
from shorttext.utils import standard_text_preprocessor_1
import pandas as pd
import os
dir = os.getcwd()
#Create test set corpus
test = pd.read_csv('test_set.csv')
pre = standard_text_preprocessor_1()
test['processed'] = test['response_text'].apply(pre)
test_corpus = test['processed'].apply(lambda x : x.split(' '))
dict_test = Dictionary(test_corpus)
bow_corpus_test = [dict_test.doc2bow(doc) for doc in test_corpus]
#Create training set corpus
train = | pd.read_csv('train_set.csv') | pandas.read_csv |
# Modified from
# https://github.com/bhattbhavesh91/cowin-vaccination-slot-availability
import datetime
import json
import numpy as np
import requests
import pandas as pd
import streamlit as st
from copy import deepcopy
# Faking chrome browser
browser_header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
df_18 = pd.DataFrame()
df_45 = pd.DataFrame()
st.set_page_config(layout='wide', initial_sidebar_state='collapsed')
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def load_mapping():
df = pd.read_csv("./district_list.csv")
return df
def filter_column(df, col, value):
df_temp = deepcopy(df.loc[df[col] == value, :])
return df_temp
def filter_capacity(df, col, value):
df_temp = deepcopy(df.loc[df[col] > value, :])
return df_temp
dictfilt = lambda x, y: dict([ (i,x[i]) for i in x if i in set(y) ])
mapping_df = load_mapping()
mapping_dict = pd.Series(mapping_df["district id"].values,
index = mapping_df["district name"].values).to_dict()
rename_mapping = {
'date': 'Date',
'min_age_limit': 'Minimum Age Limit',
'available_capacity': 'Available Capacity',
'vaccine': 'Vaccine',
'pincode': 'Pincode',
'name': 'Hospital Name',
'state_name' : 'State',
'district_name' : 'District',
'block_name': 'Block Name',
'fee_type' : 'Fees'
}
st.markdown("<h1 style='text-align: center; color: white;'>CoWin Vaccine Availability</h1>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center; color: yellow;'>The CoWIN APIs are geo-fenced so sometimes you may not see an output! Please try after sometime</h3>", unsafe_allow_html=True)
unique_districts = list(mapping_df["district name"].unique())
unique_districts.sort()
left_column_1, right_column_1 = st.beta_columns(2)
with right_column_1:
numdays = st.slider('Select Date Range', 0, 100, 5)
with left_column_1:
dist_inp = st.multiselect('Select District', unique_districts) #Changed to Multi select
DIST_ID = dictfilt(mapping_dict,dist_inp).values()
base = datetime.datetime.today()
date_list = [base + datetime.timedelta(days=x) for x in range(numdays)]
date_str = [x.strftime("%d-%m-%Y") for x in date_list]
final_df = None
for INP_DATE in date_str:
for distid in DIST_ID: # Added a loop for District
URL = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/calendarByDistrict?district_id={}&date={}".format(distid, INP_DATE) #Changed to Non Public API
response = requests.get(URL, headers=browser_header)
if (response.ok) and ('centers' in json.loads(response.text)):
resp_json = json.loads(response.text)['centers']
if resp_json is not None:
df = pd.DataFrame(resp_json)
if len(df):
df = df.explode("sessions")
df['min_age_limit'] = df.sessions.apply(lambda x: x['min_age_limit'])
df['vaccine'] = df.sessions.apply(lambda x: x['vaccine'])
df['available_capacity'] = df.sessions.apply(lambda x: x['available_capacity'])
df['date'] = df.sessions.apply(lambda x: x['date'])
df = df[["date", "available_capacity", "vaccine", "min_age_limit", "pincode", "name", "state_name", "district_name", "block_name", "fee_type"]]
if final_df is not None:
final_df = pd.concat([final_df, df])
else:
final_df = deepcopy(df)
else:
st.error("No rows in the data Extracted from the API")
if len(DIST_ID):
if (final_df is not None) and (len(final_df)):
final_df.drop_duplicates(inplace=True)
final_df.rename(columns=rename_mapping, inplace=True)
center_column_2a, center_column_2b = st.beta_columns(2)
with center_column_2a:
option_18 = st.checkbox('18+')
with center_column_2b:
option_45 = st.checkbox('45+')
if option_18:
df_18 = filter_column(final_df, "Minimum Age Limit", 18)
if option_45:
df_45 = filter_column(final_df, "Minimum Age Limit", 45)
if (option_18) or (option_45):
final_df = | pd.concat([df_18,df_45]) | pandas.concat |
"""
@authors: <NAME> / <NAME>
goal: edf annotation reader
Modified: <NAME>, Stanford University, 2018
"""
import re
import numpy as np
import pandas as pd
import xmltodict
def read_edf_annotations(fname, annotation_format="edf/edf+"):
"""read_edf_annotations
Parameters:
-----------
fname : str
Path to file.
Returns:
--------
annot : DataFrame
The annotations
"""
with open(fname, 'r', encoding='utf-8',
errors='ignore') as annotions_file:
tal_str = annotions_file.read()
if "edf" in annotation_format:
if annotation_format == "edf/edf+":
exp = '(?P<onset>[+\-]\d+(?:\.\d*)?)' + \
'(?:\x15(?P<duration>\d+(?:\.\d*)?))?' + \
'(\x14(?P<description>[^\x00]*))?' + '(?:\x14\x00)'
elif annotation_format == "edf++":
exp = '(?P<onset>[+\-]\d+.\d+)' + \
'(?:(?:\x15(?P<duration>\d+.\d+)))' + \
'(?:\x14\x00|\x14(?P<description>.*?)\x14\x00)'
annot = [m.groupdict() for m in re.finditer(exp, tal_str)]
good_annot = pd.DataFrame(annot)
good_annot = good_annot.query('description != ""').copy()
good_annot.loc[:, 'duration'] = good_annot['duration'].astype(float)
good_annot.loc[:, 'onset'] = good_annot['onset'].astype(float)
elif annotation_format == "xml":
annot = xmltodict.parse(tal_str)
annot = annot['PSGAnnotation']["ScoredEvents"]["ScoredEvent"]
good_annot = pd.DataFrame(annot)
return good_annot
def resample_30s(annot):
"""resample_30s: to resample annot dataframe when durations are multiple
of 30s
Parameters:
-----------
annot : pandas dataframe
the dataframe of annotations
Returns:
--------
annot : pandas dataframe
the resampled dataframe of annotations
"""
annot["start"] = annot.Start.values.astype(np.float).astype(np.int)
df_end = annot.iloc[[-1]].copy()
df_end['start'] += df_end['Duration'].values.astype(np.float)
df_end.index += 1
annot = annot.append(df_end)
annot = annot.set_index('start')
annot.index = pd.to_timedelta(annot.index, unit='s')
annot = annot.resample('30s').ffill()
annot = annot.reset_index()
annot['duration'] = 30.
onset = np.zeros(annot.shape[0])
onset[1:] = annot["duration"].values[1:].cumsum()
annot["onset"] = onset
return annot.iloc[:-1] # Return without the last row (which we inserted in order to fill the last row correctly).
def parse_hypnogram(annot_f, annotation_format="edf++"):
"""parse_hypnogram: keep only annotations related to sleep stages
Parameters:
-----------
annot_f : string
The name of the annotation file
annotation_format : string, optional (default="edf++")
The annotation format
Returns:
--------
good_annot : pandas dataframe
dataframe of annotations related to sleep stages
"""
annot = read_edf_annotations(annot_f, annotation_format=annotation_format)
if annotation_format == "edf++":
# process annot for sleep stages
annot = annot[annot.description.str.startswith('Sleep')].reset_index()
annot["idx_stage"] = np.arange(annot.shape[0])
stages = | pd.DataFrame() | pandas.DataFrame |
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
from sklearn.metrics import precision_recall_fscore_support
from statsmodels.stats.inter_rater import fleiss_kappa
__author__ = '<NAME>'
pd.set_option('max_colwidth', 999)
pd.set_option('display.max_rows', 999)
pd.set_option('display.max_columns', 999)
ALL_CATS = ('positive', 'negative', 'neutral', 'mixed')
TERNARY_CATS = ('positive', 'negative', 'neutral')
def load_dataset(*src_filenames, labels=None):
data = []
for filename in src_filenames:
with open(filename) as f:
for line in f:
d = json.loads(line)
if labels is None or d['gold_label'] in labels:
data.append(d)
return data
def get_label_distribution(*splits, dist_labels=False):
if dist_labels:
all_labels = []
for split in splits:
for d in split:
dist = d['label_distribution']
all_labels += [label for label, ids in dist.items()
for _ in range(len(ids))]
series = pd.Series(all_labels)
else:
df = pd.concat((pd.DataFrame(split) for split in splits))
series = df.gold_label
series = series.fillna("No Majority")
dist = series.value_counts(dropna=False)
dist['Total'] = dist.sum()
return dist
def get_label_model_relationship(*splits, model_colname='model_0_label'):
all_splits = sum(splits, [])
df = pd.DataFrame(all_splits)
return df.groupby(['gold_label', model_colname]).apply(len)
def get_adversarial_rate(*splits, model_colname='model_0_label', labels=None):
dist = get_label_model_relationship(*splits, model_colname=model_colname)
dist = dist.reset_index().rename(columns={0: 'examples'})
total = dist.examples.sum()
if labels is not None:
dist = dist[dist.gold_label.isin(labels)]
adversarial = dist[dist.gold_label != dist[model_colname]]
return adversarial.examples.sum(), total
def get_label_rating_relationship(*splits):
all_splits = sum(splits, [])
df = pd.DataFrame(all_splits)
return df.groupby(['gold_label', 'review_rating']).apply(len)
def get_dist_of_majority_dists(split):
data = []
for d in split:
if d['gold_label']:
dist = sorted([(len(v), k) for k, v in d['label_distribution'].items()])
c = dist[-1][0]
data.append(c)
return | pd.Series(data) | pandas.Series |
"""Functions for modeling the avalanche risk levels
"""
import sys
sys.path.append("/home/daniel/Schreibtisch/Projekte/avalanche-risk")
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
import re
from eda.functions_eda import plot_correlations, plot_missing_values
from imblearn.over_sampling import SMOTE, SMOTENC
from matplotlib import pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFE
from sklearn.impute import KNNImputer
from sklearn.linear_model import (ElasticNet, Lasso, LinearRegression,
LogisticRegression, Ridge)
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import (classification_report, confusion_matrix,
mean_squared_error, r2_score)
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import (MinMaxScaler, PowerTransformer,
StandardScaler)
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
WEATHER_FILEPATH = "../data/lawinenwarndienst/weather_data/"
def preprocess_X_values(df, agg_func):
"""Preprocess features for modeling
Args:
df (DataFrame): DataFrame with timeSeries index and numerical features
agg_func (str): Function to aggregate time series data on a daily level
Returns:
DataFrame: Preprocessed features
"""
# Aggregate with agg_func per day
df = getattr(df.groupby(df.index.date), agg_func)()
df.index = pd.to_datetime(df.index, format="%Y-%m-%d")
# Impute
knn = KNNImputer(n_neighbors = 10)
# Scale
scale = StandardScaler()
pipeline = make_pipeline(knn, scale)
out = pd.DataFrame(pipeline.fit_transform(df), columns = df.columns, index = df.index)
return out
def get_shifted_features(df, min_shift, max_shift):
"""Get a time series DataFrame which is shifted between min_shift and max_shift days backwards
Args:
df (DataFrame): DataFrame with timeSeries index and numerical features
min_shift (int): Minimum number of days to shift
max_shift (int): Maximum number of days to shift
Returns:
DataFrame: DataFrame with shifted features
"""
out = pd.DataFrame(index = df.index)
for shift in range(min_shift, max_shift+1):
data = df.shift(shift)
data.columns = df.columns + f"-{shift}"
out = out.join(data).dropna()
return out
def preprocess(X_train, y_train, agg_func, min_shift, max_shift, include_y, smote, **kwargs):
"""Wrapper function to do all feature preprocessing (aggregate, impute, scale, shift, drop NAs, smote)
Args:
X_train (DataFrame): DataFrame with timeSeries index and numerical features
y_train (pd.Series): pd.Series with timeSeries index containing the target variable
agg_func (str): Function to aggregate time series data on a daily level
min_shift (int): Minimum number of days to shift
max_shift (int): Maximum number of days to shift
include_y (bool): Should the time shifted dependent variable be added as a predictor to the model?
smote (bool): Should SMOTE be performed on the model data? This can be useful in case of an imbalanced dataset.
Returns:
X: DataFrame with preprocessed features
y: pd.Series with target variable in the same date range
"""
# Impute, Scale
X_train_prep = preprocess_X_values(X_train, agg_func = agg_func)
if include_y:
X_train_prep = X_train_prep.join(y_train)
# Shift
X_train_shifted = get_shifted_features(X_train_prep, min_shift = min_shift, max_shift = max_shift) # this function also removes NAs
# Merge to align date index
train = X_train_shifted.join(y_train).dropna(subset = y_train.columns)
X = train.iloc[:,:-1]
y = train.iloc[:,-1]
# SMOTE (important: AFTER getting the lags)
if smote:
kwargs.setdefault("k_neighbors", 5)
assert all(y.value_counts() > kwargs["k_neighbors"]), "SMOTE will fail, because some levels of y have less occurences than 'k_neighbors', which is set to 5 as a standard. Specify a lower 'k_neighbors' via **kwargs or set smote to False (and execute it with a larger dataset)."
sm = SMOTE(random_state = 10, **kwargs)
X, y = sm.fit_resample(X, y)
return X, y
def upsample(X, y, **kwargs):
"""Function to upsample a dataset by means of SMOTENC. Useful for imbalanced data.
Args:
X (DataFrame): DataFrame containing the predicting variables
y (pd.Series): pd.Series containing the target variable
Returns:
X: DataFrame containing the upsampled predicting variables
y: pd.Series containing the upsampled target variable
"""
kwargs.setdefault("k_neighbors", 5)
assert all(y.value_counts() > kwargs["k_neighbors"]), "SMOTE will fail, because some levels of y have less occurences than 'k_neighbors', which is set to 5 as a standard. Specify a lower 'k_neighbors' via **kwargs or set smote to False (and execute it with a larger dataset)."
sm = SMOTENC(random_state = 10, **kwargs)
X, y = sm.fit_resample(X, y)
return X, y
def import_preprocess_region(region, metrics, agg_func, min_shift, max_shift, include_y, smote):
"""Wrapper function to import and preprocess data from a single avalanche warning region
Args:
region (str): The region to import and preprocess. One of ["allgaeu", "ammergau", "werdenfels", "voralpen", "chiemgau", "berchtesgaden"]
metrics (list): List of strings for the predictor variables which should be retained
agg_func (str): Function to aggregate time series data on a daily level
min_shift (int): Minimum number of days to shift
max_shift (int): Maximum number of days to shift
include_y (bool): Should the time shifted dependent variable be added as a predictor to the model?
smote (bool): Should SMOTE be performed on the model data? This can be useful in case of an imbalanced dataset.
Returns:
X_train: DataFrame containing preprocessed predictor variables for training set
y_train: pd.Series containing the target variable for training
X_test: DataFrame containing preprocessed predictor variables for test set
y_test: pd.Series containing the target variable for testing
"""
data = pickle.load(open(WEATHER_FILEPATH + f"pickles/{region}.p", "rb"))
# Filter the relevant metrics
data = data[metrics]
plot = plot_missing_values(data, aspect = 0.00004)
warning_levels = pickle.load(open("../data/lawinenwarndienst/warning_levels_preprocessed.p", "rb"))
warning_levels = warning_levels[(warning_levels.low_high == 1) & (warning_levels.Zone == region)][["Warnstufe"]].astype(int)
# Train-test split based on time
time_threshold = "2017-08-01"
X_train_raw = data.loc[data.index < time_threshold]
X_test_raw = data.loc[data.index >= time_threshold]
y_train_raw = warning_levels.loc[warning_levels.index < time_threshold]
y_train_raw = y_train_raw[~y_train_raw.index.duplicated()] # drop duplicates
y_test_raw = warning_levels.loc[warning_levels.index >= time_threshold]
y_test_raw = y_test_raw[~y_test_raw.index.duplicated()] # drop duplicates
# Test for NA-only columns in X_test
na_list = X_test_raw.isna().sum() == len(X_test_raw)
assert sum(na_list) == 0, f"X_test contains a feature with only NAs: {na_list.index[list(na_list).index(True)]}"
# Preprocessing
X_train, y_train = preprocess(X_train_raw, y_train_raw, agg_func = agg_func, min_shift = min_shift, max_shift = max_shift, include_y = include_y, smote = smote)
X_test, y_test = preprocess(X_test_raw, y_test_raw, agg_func = agg_func, min_shift = min_shift, max_shift = max_shift, include_y = include_y, smote = False) # no smote for test data
return X_train, y_train, X_test, y_test
def import_preprocess_multiple_regions(regions, metrics, agg_func, min_shift, max_shift, include_y, smote):
"""[summary]
Args:
regions (list): List of strings for the regions to be imported and preprocessed. One or several of ["allgaeu", "ammergau", "werdenfels", "voralpen", "chiemgau", "berchtesgaden"]
metrics (list): List of strings for the predictor variables which should be retained
agg_func (str): Function to aggregate time series data on a daily level
min_shift (int): Minimum number of days to shift
max_shift (int): Maximum number of days to shift
include_y (bool): Should the time shifted dependent variable be added as a predictor to the model?
smote (bool): Should SMOTE be performed on the model data? This can be useful in case of an imbalanced dataset.
Returns:
X_train: DataFrame containing preprocessed predictor variables for training set
y_train: pd.Series containing the target variable for training
X_test: DataFrame containing preprocessed predictor variables for test set
y_test: pd.Series containing the target variable for testing
"""
X_train = pd.DataFrame()
X_test = pd.DataFrame()
y_train = | pd.Series() | pandas.Series |
import collections
import csv
import datetime
import fuzzywuzzy.fuzz
import fuzzywuzzy.process
import itertools
import joblib
import libsbml
import lxml
import lxml.etree
import networkx
import numpy
import os
import operator
import pickle
import re
import simstring
import sys
########################################################################
########################################################################
# Globals
# gene_map
GENE_MAP = None
# simstring
SIMSTRING_DB = None
SBO_NODES = None
#SBO_NODES = convert_xml_to_sbonodes()
########################################################################
########################################################################
def now():
return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
########################################################################
########################################################################
def exists( x, elements, test = lambda x,y : x == y):
for y in elements:
if test( x, y):
return True
return False
########################################################################
########################################################################
# remove_prefixes
PREFIXES = [ "acetylated ", "activated ", "associated ", \
"bound ", \
"catabolized ", "catalyzed ", "converted ", \
"deacetylated ", "degradated ", "demethylated ", "dephosporylated ", "deubiquinated ", "dissociated ","deactivated ", \
"expressed ", \
"methylated ", \
"positively ",\
"negatively ", \
"regulated ",\
"phosphorylated ",
"regulated ",\
"transcribed ", "translated ", \
"ubiquitinated "]
def remove_prefixes( name):
global PREFIXES
new_name = name
for prefix in PREFIXES:
if prefix != None:
new_name = new_name.replace( prefix, "")
return new_name.strip()
########################################################################
########################################################################
def compute_all_is_a( node, nodes):
all_parents = set( node["is_a"])
for parent_id in node["is_a"]:
all_parents.update( compute_all_is_a( nodes[parent_id], nodes))
return all_parents
def convert_xml_to_sbonodes( file_name = "sbo.xml", output_file_name = "sbo.pickle"):
# load nodes
nodes = {}
sbo_xml = lxml.etree.fromstring( open( file_name, "rt").read())
for term in sbo_xml.xpath( "/*[local-name()='sbo']/*[local-name()='Term']"):
id = term.find( "{http://www.biomodels.net/sbo}id").text
name = term.find( "{http://www.biomodels.net/sbo}name").text
is_a = [];
if term.find( "{http://www.biomodels.net/sbo}is_a") is not None:
is_a = [el.text for el in term.findall( "{http://www.biomodels.net/sbo}is_a")]
nodes[id] = { "id" : id, "name" : name , "is_a" : is_a }
# compute all is_a for fast lookup
is_a_all = {}
for node in nodes.itervalues():
is_a_all[node["id"]] = compute_all_is_a( node, nodes)
for node in nodes.itervalues():
node["is_a"] = is_a_all[node["id"]]
if output_file_name is not None:
pickle.dump( nodes, open( output_file_name, "wb"))
return nodes;
def sbo_is_a( sbo_1, sbo_2):
"return true if sbo_1 is_a sbo_2 (if any of them is None, return true)"
global SBO_NODES
if sbo_1 == sbo_2 or sbo_1 == None or sbo_2 == None:
return True
elif sbo_1 in SBO_NODES:
return sbo_2 in SBO_NODES[sbo_1]["is_a"];
else:
return False
def sbo_is_a2( sbo_1, sbo_2):
"Return true if is a either direction"
return sbo_is_a( sbo_1, sbo_2) or sbo_is_a( sbo_2, sbo_1)
def sbo_name( sbo_1):
global SBO_NODES
return SBO_NODES[sbo_1]["name"]
def load_sbo( file_name = "sbo.pickle"):
global SBO_NODES
SBO_NODES = pickle.load( open( file_name, "rb"))
def sbo_export_graph():
global SBO_NODES
sbo_graph = networkx.DiGraph()
for node in SBO_NODES:
sbo_graph.add_node( node)
for node in SBO_NODES.values():
for parent in node["is_a"]:
sbo_graph.add_edge( node["id"], parent)
export_all_graph( sbo_graph, "sbo_graph")
def sbo_export_graph_nodes( nodes, file_prefix = "test"):
""" exports hierarchy for SBO nodes"""
global SBO_NODES
sbo_graph = networkx.DiGraph()
all_nodes = nodes + [ parent for n in nodes for parent in compute_all_is_a( n) ]
for node in all_nodes:
sbo_graph.add_node( node)
for node in all_nodes:
for parent in node["is_a"]:
sbo_graph.add_edge( node["id"], parent)
export_all_graph( sbo_graph, file_prefix)
def get_terms( miriam_urns):
""" takes a list of miriam encoded urn, e.g. ['urn:miriam:GO:0016579', 'urn:miriam:SBO:0000330']
and returns the strings ["GO:0016579", "SBO:0000330"] """
return [ i[11:]for i in miriam_urns]
def get_sbo_terms( miriam_urns):
""" takes a list of miriam encoded urn, e.g. ['urn:miriam:GO:0016579', 'urn:miriam:SBO:0000330']
and returns the strings ["SBO:0000330"] """
return [ i[11:]for i in miriam_urns if i.startswith( "urn:miriam:SBO:")]
def get_sbo_int( miriam_urns):
""" takes a list of miriam encoded urn, e.g. ['urn:miriam:GO:0016579', 'urn:miriam:SBO:0000330']
and returns the integers [330] """
return [ int( i[15:]) for i in miriam_urns if i.startswith( "urn:miriam:SBO:")]
########################################################################
########################################################################
ST_SBO_GO_MAP = { # degradation
'acetylation': 'SBO:0000215',
'activation': 'SBO:0000170',
'association': 'SBO:0000297',
'binding': 'SBO:0000297',
'catabolism': 'GO:0009056',
'catalysis': 'SBO:0000172',
'conversion': 'SBO:0000182',
'deacetylation': 'GO:0006476',
'degradation': 'SBO:0000179',
'demethylation': 'GO:0006482',
'dephosphorylation': 'SBO:0000330',
'deubiquitination': 'GO:0016579',
'dissociation': 'SBO:0000180',
'gene_expression': 'SBO:0000205',
'inactivation': 'SBO:0000169',
'localization': 'GO:0051179',
'methylation': 'SBO:0000214',
'negative_regulation': 'SBO:0000169',
'pathway': 'SBO:0000375',
'phosphorylation': 'SBO:0000216',
'positive_regulation': 'SBO:0000170',
'protein_catabolism': 'SBO:0000179',
'regulation': 'SBO:0000168',
'transcription': 'SBO:0000183',
'translation': 'SBO:0000184',
'transport': 'SBO:0000185',
'ubiquitination': 'SBO:0000224'}
SBO_GO_ST_MAP = { v : k for k, v in ST_SBO_GO_MAP.iteritems()}
def sbo_go_name( urn_miriam):
if urn_miriam.startswith( "urn:miriam:"):
urn_miriam = urn_miriam[11:]
if urn_miriam in SBO_GO_ST_MAP:
return SBO_GO_ST_MAP[urn_miriam]
elif urn_miriam.startswith( "SBO:"):
return sbo_name( urn_miriam)
else:
return urn_miriam
def sbo_go_name_known( urn_miriam):
if urn_miriam.startswith( "urn:miriam:"):
urn_miriam = urn_miriam[11:]
if urn_miriam in SBO_GO_ST_MAP:
return True
elif urn_miriam.startswith( "SBO:"):
return True
else:
return False
########################################################################
########################################################################
def clean_name( name):
return remove_prefixes( name.lower()).strip()
def clean_name2( name):
return re.sub('[^a-zA-Z0-9-]', ' ', remove_prefixes( name.lower())).strip()
def names( graph):
return [graph.node[n].get("name") for n in graph.nodes() if graph.node[n].get("name")]
def names_clean( graph):
return [ remove_prefixes( graph.node[n].get("name").lower()) for n in graph.nodes() if graph.node[n].get("name")]
def names_clean2( graph):
return [ re.sub('[^a-zA-Z0-9-]', ' ', remove_prefixes( graph.node[n].get("name").lower())) for n in graph.nodes() if graph.node[n].get("name")]
########################################################################
########################################################################
def sort_edge_signature( signature, graph):
""" takes (species122,reaction122,"product") and returns (reaction122,species122,"product") """
if signature[2] == "reactant" and graph.node[signature[0]]["type"] != "species":
return (signature[1],signature[0],signature[2])
elif signature[2] == "product" and graph.node[signature[1]]["type"] != "species":
return (signature[1],signature[0],signature[2])
elif signature[2] == "modifier" and graph.node[signature[0]]["type"] != "species":
return (signature[1],signature[0],signature[2])
else:
return signature
def edge_signatures( graph):
signatures = set([ sort_edge_signature( (edge[0], edge[1], edge[2]["type"]), graph) for edge in graph.edges( data = True)])
assert( len(signatures) == len( graph.edges()))
return signatures
########################################################################
########################################################################
def create_gene_map( chilibot = True, hugo = True, human_entrez = False):
lists = []
print( "create_gene_map")
print("Loading data")
if chilibot:
with open( "gene_list_chilibot.txt", "rt") as f:
txt = f.read()
for line in txt.strip().split("\n"):
line = line.strip(";")
synonyms = [ line.split( "|")[0].strip()] + line.split( "|")[1].split( ";")
lists.append( set( [s.lower() for s in synonyms]))
if hugo:
with open('gene_list_hugo.txt', 'rU') as f:
csv_list = csv.reader( f, delimiter = '\t')
for row in csv_list:
lists.append( set( [ s.lower() for s in filter( bool, row) if s != ""]))
if human_entrez:
with open('gene_list_human_entrez.txt', 'r') as f:
lines = f.read().split("\n")
lines.pop(0) # remove first line
for line in lines:
synonyms = [s.lower() for s in line.strip().split("\t")]
synonyms.pop(0)
lists.append( set(synonyms))
print("Merging lists")
dict_forward = {} # maps el : value
dict_backward = {} # maps val : list of elements
new_value_counter = 0
for idx, l in enumerate(lists):
if idx % 10000 == 0:
print( "Processed %i" % idx)
new_value_counter += 1
new_value = new_value_counter
# compute overlap_values - those values overlapping
overlap_values = set()
for e in l:
if e in dict_forward:
overlap_values.add( dict_forward[e])
elements = set(l) # initialize elements with known values
if overlap_values != set():
new_value = new_value_counter
new_value_counter += 1
# update elements with known values
for val in overlap_values:
elements.update( dict_backward[val])
# update dict_forward
for e in elements:
dict_forward[e] = new_value
# update dict_backward
for val in overlap_values:
del dict_backward[val]
dict_backward[new_value] = elements
else: # no overlap found, just add elements to dicts
for e in elements:
dict_forward[e] = new_value
dict_backward[new_value] = elements
lists = list(dict_backward.values())
print("Merging lists finished (%i total sets)" % len( lists))
print("Computing gene map")
gene_map = {}
for l in lists:
listt = [ re.sub('[^a-zA-Z0-9-]', ' ', e.lower()) for e in l if e != ""]
if listt != []:
val = listt[0]
for l in listt:
gene_map[l] = val
print("Computing gene map (%i total names/genes)" % len( gene_map))
print("Exporting gene map")
pickle.dump( gene_map, open( "gene_map.pickle", "wb"))
return gene_map
def create_simstring_txt( gene_map):
""" Creates gene_list.txt for usage in simstring db
use: simstring -b -d gene_list.simstring < gene_list.txt
afterwards to create simstring"""
print( "create_simstring_txt")
with open( "gene_list.txt", "wt") as f:
f.write( "\n".join( gene_map.keys() + list( set( gene_map.values()))))
def create_simstring_db():
""" Creates simstring database
use: simstring -b -d gene_list.simstring < gene_list.txt"""
import commands
print( "create_simstring_db")
ret = commands.getstatusoutput('simstring -b -d gene_list.simstring < gene_list.txt')
print( ret)
print( "create_simstring_db finished")
def create_gene_map_AND_simstring_db():
gene_map = create_gene_map()
# gene_map = pickle.load( open( "gene_map.pickle", "rb"))
create_simstring_txt( gene_map)
create_simstring_db()
#######################
def map_gene_fuzzywuzzy( name, threshold = 90):
global GENE_MAP
assert(GENE_MAP)
clean_name = clean_name2( name)
if GENE_MAP.get( clean_name):
return set( [GENE_MAP[clean_name]])
else:
results = set()
for k in GENE_MAP.keys():
if fuzzywuzzy.fuzz.ratio( clean_name, k) > threshold:
results.add( GENE_MAP[k])
if results != set():
return results
else:
return None
def map_gene_simstring( name):
"retrieves gene_map results by simstring matching and lookup"
global GENE_MAP, SIMSTRING_DB
assert( GENE_MAP and SIMSTRING_DB)
clean_name = clean_name2( name)
if GENE_MAP.get( clean_name):
return set( [GENE_MAP[clean_name]])
else:
results = SIMSTRING_DB.retrieve( clean_name)
if results:
return set( [GENE_MAP[r] for r in results])
else:
return None
def export_mapping( mapping, file_name):
with open( file_name, "wt") as f:
f.write( "\n".join( [ "{} : {}".format( k, ",".join( [str(v) for v in values])) for k, values in mapping.itervalues()]))
def compute_simstring_coverage( names, thresholds = [ i/10.0 for i in range(1, 10)], measure = simstring.cosine):
results = []
for t in thresholds:
db = simstring.reader( 'gene_list.simstring')
db.measure = measure
db.threshold = t
results.append( [ True for n in names if map_gene_simstring(n, db)].count( True) / float( len( names)))
return results
########################################################################
########################################################################
def export_graph( graph, graph_name, prog = "dot"):
agraph = networkx.to_agraph( graph)
## "neato"|"dot"|"twopi"|"circo"|"fdp"|"nop"
agraph.layout( prog = prog)
file_name = graph_name + "_" + prog + ".pdf"
agraph.draw( file_name)
print( "Exported {}".format( file_name))
def export_all_graph( graph, graph_name):
for prog in ["neato", "dot", "twopi", "circo", "fdp"]:
export_graph( graph, graph_name, prog = prog)
########################################################################
########################################################################
def load_sbml( file_name):
reader = libsbml.SBMLReader()
document = reader.readSBML( file_name)
print( "Loaded {} ({} errors)".format( file_name, document.getNumErrors()))
return document
def get_participants_species( species, prefix, model):
""" Takes an SBML species and returns its participants (mTOR)"""
annotation = species.getAnnotation()
if annotation == None:
return []
# retrieve path
annotation_path_names = [ 'RDF', 'Participants']
current_state = annotation
for name in annotation_path_names:
last_state = current_state
current_state = None
for i in xrange( last_state.getNumChildren()):
if last_state.getChild(i).getName() == name:
current_state = last_state.getChild(i)
if current_state == None:
break
# retrieve participants
participants = []
if current_state != None:
for idx in range( current_state.getNumChildren()):
child = current_state.getChild( idx)
if child.getName() != 'Participant':
sys.stderr.write( "\nERROR: unexpected participant xml name {}".format( prefix + species.getId()))
sys.stderr.flush()
elif child.getAttrValue("participant") == "":
sys.stderr.write( "\nERROR: unexpected participant attribute value {}".format( prefix + species.getId()))
sys.stderr.flush()
elif model.getSpecies( child.getAttrValue("participant")) == None:
sys.stderr.write( "\nERROR: participant {} does not exist in model (species: {})".format( child.getAttrValue("participant"), prefix + species.getId()))
sys.stderr.flush()
else:
participants.append( child.getAttrValue("participant"))
return participants
def create_graph( model, prefix = "", ignore_participant_graph = False,
skip_uris = ["urn:miriam:reactome", "urn:miriam:pubmed", "urn:miriam:ec"]):
graph = networkx.Graph();
# add species
for species in model.getListOfSpecies():
bqbiol_is = []
bqbiol_has_part = []
bqbiol_has_version = []
if species.getCVTerms() != None:
for term in species.getCVTerms():
uris = [ term.getResourceURI( idx) for idx in xrange( term.getNumResources()) if not any( term.getResourceURI( idx).startswith(s) for s in skip_uris)]
if term.getBiologicalQualifierType() in [libsbml.BQB_IS, libsbml.BQB_IS_HOMOLOG_TO]:
bqbiol_is.extend( uris)
elif term.getBiologicalQualifierType() == libsbml.BQB_HAS_PART:
bqbiol_has_part.extend( uris)
elif term.getBiologicalQualifierType() == libsbml.BQB_HAS_VERSION:
bqbiol_has_version.extend( uris)
sbo = species.getSBOTerm()
if sbo == -1:
sbo = None;
sbo_str = None;
else:
sbo_str = "SBO:{0:07d}".format( sbo)
annotation = {}
for prefix in PREFIXES:
annotation[ prefix.strip()] = species.getName().count( prefix)
if species.getCompartment() == "default":
compartment = None
compartment_id = None
else:
compartment = model.getCompartment( species.getCompartment()).getName().lower().strip()
compartment_id = species.getCompartment()
node_data = { "type" : "species",
"id" : prefix + species.getId(),
"name" : species.getName(),
"compartment" : compartment,
"compartment_id" : compartment_id,
"bqbiol_is" : tuple( sorted( set( bqbiol_is))),
"bqbiol_has_part" : tuple( sorted( set( bqbiol_has_part))),
"bqbiol_has_version" : tuple( sorted( set( bqbiol_has_version))),
"sbo" : sbo,
"sbo_str" : sbo_str,
"participants" : [],
"participant_ids" : [],
"annotation" : annotation};
graph.add_node( prefix + species.getId(), node_data)
# add species reactions
for reaction in model.getListOfReactions():
bqbiol_is = []
bqbiol_has_part = []
bqbiol_has_version = []
if reaction.getCVTerms() != None:
for term in reaction.getCVTerms():
uris = [ term.getResourceURI( idx) for idx in xrange( term.getNumResources()) if not any( term.getResourceURI( idx).startswith(s) for s in skip_uris)]
if term.getBiologicalQualifierType() in [libsbml.BQB_IS, libsbml.BQB_IS_HOMOLOG_TO]:
bqbiol_is.extend( uris)
elif term.getBiologicalQualifierType() == libsbml.BQB_HAS_PART:
bqbiol_has_part.extend( uris)
elif term.getBiologicalQualifierType() == libsbml.BQB_HAS_VERSION:
bqbiol_has_version.extend( uris)
sbo = reaction.getSBOTerm()
if sbo == -1:
sbo = None;
sbo_str = None;
else:
sbo_str = "SBO:{0:07d}".format( sbo)
bqbiol_is.append( "urn:miriam:SBO:{0:07d}".format( sbo))
graph.add_node( prefix + reaction.getId(),
{ "type" : "reaction",
"id" : prefix + reaction.getId(),
"local_id" : reaction.getId(),
"name" : reaction.getName(),
"compartment" : reaction.getCompartment(),
"bqbiol_is" : tuple( sorted( set( bqbiol_is))),
"bqbiol_has_part" : tuple( sorted( set( bqbiol_has_part))),
"bqbiol_has_version" : tuple( sorted( set( bqbiol_has_version))),
"sbo" : sbo,
"sbo_str" : sbo_str} )
# add edges
for i in xrange( model.getNumReactions()):
reaction = model.getReaction(i);
for r in xrange( reaction.getNumReactants()):
graph.add_edge( prefix + reaction.getId(), prefix + reaction.getReactant(r).getSpecies(), type = "reactant")
for p in xrange( reaction.getNumProducts()):
graph.add_edge( prefix + reaction.getId(), prefix + reaction.getProduct(p).getSpecies(), type = "product")
for m in xrange( reaction.getNumModifiers()):
graph.add_edge( prefix + reaction.getId(), prefix + reaction.getModifier(m).getSpecies(), type = "modifier")
if ignore_participant_graph:
return graph
else:
# participant graph
participant_graph = networkx.DiGraph()
graph_w_participant_edges = graph.copy()
# add participant links
for i in xrange( model.getNumSpecies()):
species = model.getSpecies(i);
graph_node = graph.node[ prefix + species.getId()]
for participant in get_participants_species( species, prefix, model):
# add participant graph edge
participant_graph.add_edge( prefix + species.getId(), prefix + participant, type = "participant")
graph_w_participant_edges.add_edge( prefix + species.getId(), prefix + participant, type = "participant")
# add participant node information to
graph_node["participant_ids"].append( prefix + participant)
graph_node["participants"].append( graph.node[prefix + participant])
graph_node["bqbiol_has_part"] = tuple( sorted( set( list( graph.node[prefix + participant]["bqbiol_has_part"]) + list( graph_node["bqbiol_has_part"]))))
return graph, participant_graph, graph_w_participant_edges
########################################################################
def bqbiol_is_map( graph):
"returns a dictionary mapping of uri to node ids {uri : set( node ids)}"
signature_map = {}
for i in graph.nodes():
node = graph.node[i]
if signature_map.get( node["bqbiol_is"]) == None:
signature_map[node["bqbiol_is"]] = [i]
else:
signature_map[node["bqbiol_is"]].append( i)
return signature_map
def get_all_bqbiol_is_uris( graph):
""" Returns all bqbiol_is uris from a graph """
unique_ids = set()
for n in graph.nodes( data = True):
if n[1].get("bqbiol_is") and n[1].get("bqbiol_is") != ():
unique_ids.update( n[1].get("bqbiol_is"))
return unique_ids
########################################################################
########################################################################
def find_nodes( graph, attribute, value):
return [ n[1] for n in graph.nodes( data = True ) if n[1].get( attribute) != None and n[1][attribute] == value]
########################################################################
########################################################################
def filter_graph_remove_species_wo_bqbiol_is( graph):
"Remove species without bqbiol_is"
graph_cpy = graph.copy()
remove_nodes = []
for node in graph_cpy.nodes( data = True):
# require nothing for reaction
n = node[1]
if n["type"] != "reaction" and n["bqbiol_is"] == ():
remove_nodes.append( node[0])
graph_cpy.remove_nodes_from( remove_nodes)
graph_cpy.name = graph.name + "-REMOVED-SPECIES-WO-BQBIOL-IS"
graph_cpy.file_name = None
return graph_cpy
def filter_graph_remove_isolated_nodes( graph):
"Remove nodes without connections"
graph_cpy = graph.copy()
graph_cpy.remove_nodes_from( networkx.isolates( graph))
graph_cpy.name = graph.name + "-NO-ISOLATED-NODES"
graph_cpy.file_name = None
return graph_cpy
def filter_graph_remove_isolated_participants( graph):
"""Remove nodes without connections that are participants -
keep isolated nodes that are not participatns"""
graph_cpy = graph.copy()
isolates = set( networkx.isolates( graph))
participants = set( [p for parts in [ [p ["id"] for p in n[1].get("participants")] for n in graph.nodes( data = True) if n[1].get("participants")] for p in parts ])
graph_cpy.remove_nodes_from( isolates.intersection( participants))
graph_cpy.name = graph.name + "-NO-ISOLATED-NODES"
graph_cpy.file_name = None
return graph_cpy
def filter_graph_remove_reactions_wo_sbo( graph):
"Remove reactions without bqbiol_is"
graph_cpy = graph.copy()
remove_nodes = []
for node in graph_cpy.nodes( data = True):
# require nothing for reaction
n = node[1]
if n["type"] != "species" and n["sbo"] == None:
remove_nodes.append( node[0])
graph_cpy.remove_nodes_from( remove_nodes)
graph_cpy.name = graph.name + "-REMOVED-REACTIONS-WO-SBO"
graph_cpy.file_name = None
return graph_cpy
def filter_reactions( graph):
"remove all nodes that are NOT a reaction"
graph_cpy = graph.copy()
non_reaction_ids = [ n[0] for n in graph_cpy.nodes( data = True) if n[1]["type"] != "reaction"]
graph_cpy.remove_nodes_from( non_reaction_ids)
graph_cpy.name = graph.name + "-REACTIONS"
graph_cpy.file_name = None
return graph_cpy
def filter_reactions_sbo( graph):
"Remove all nodes that are NOT reactions with SBO"
graph_cpy = graph.copy()
non_reaction_ids = [ n[0] for n in graph_cpy.nodes( data = True) if n[1]["type"] != "reaction" or n[1]["sbo"] == None]
graph_cpy.remove_nodes_from( non_reaction_ids)
graph_cpy.name = graph.name + "-SBO-REACTIONS"
graph_cpy.file_name = None
return graph_cpy
def filter_species( graph):
"Remove all nodes that are NOT species"
graph_cpy = graph.copy()
non_species_ids = [ n[0] for n in graph_cpy.nodes( data = True) if n[1]["type"] != "species"]
graph_cpy.remove_nodes_from( non_species_ids)
graph_cpy.name = graph.name + "-SPECIES"
graph_cpy.file_name = None
return graph_cpy
def filter_species_bqbiol_is( graph):
"Remove all nodes that are NOT species with bqbiol_is"
graph_cpy = graph.copy()
non_bqbiol_is_species_ids = [ n[0] for n in graph_cpy.nodes( data = True) if n[1]["type"] != "species" or n[1]["bqbiol_is"] == ()]
graph_cpy.remove_nodes_from( non_bqbiol_is_species_ids)
graph_cpy.name = graph.name + "-BQBIOL-IS-SPECIES"
graph_cpy.file_name = None
return graph_cpy
def filter_species_complex( graph):
"Removes all nodes that are not complex - don't have participants"
graph_cpy = graph.copy()
non_complexes = [n[0] for n in graph.nodes( data = True) if not n[1].get("participants")]
graph_cpy.remove_nodes_from( non_complexes)
graph_cpy.name = graph.name + "-COMPLEXES"
graph_cpy.file_name = None
return graph_cpy
def filter_species_complex2( graph):
"REmoves all nodes that are not complex - do not have sbo == 253"
graph_cpy = graph.copy()
non_complexes = [n[0] for n in graph.nodes( data = True) if not n[1].get("sbo") or n[1]["sbo"] != 253]
graph_cpy.remove_nodes_from( non_complexes)
graph_cpy.name = graph.name + "-COMPLEXES"
graph_cpy.file_name = None
return graph_cpy
########################################################################
########################################################################
def run_analysis( graph, export_file = None):
""" Collects some simple statistics about the graph """
import pandas
print("%s:%s: run_analysis" % (now(), graph.name))
species = filter_species( graph)
reactions = filter_reactions( graph)
edges = [n[2] for n in graph.edges( data = True)]
isolated_nodes = networkx.isolates( graph)
print("%s:%s: Computing statistics" % (now(), graph.name))
d = {"name" : graph.name,
"# nodes" : len( graph.nodes()),
"# species" : len( species.nodes()),
"# reactions" : len( reactions.nodes()),
"# edges" : len( edges),
"# edges reactant" : len( [ e for e in edges if e["type"] == "reactant"]),
"# edges product" : len( [ e for e in edges if e["type"] == "product"]),
"# edges modifier" : len( [ e for e in edges if e["type"] == "modifier"]),
"# compartments" : len(set( [species.node[s]["compartment_id"] for s in species.nodes() if species.node[s]["compartment_id"]])),
"# unique compartment names" : len(set( [species.node[s]["compartment"] for s in species.nodes() if species.node[s]["compartment"]])),
"# isolated nodes" : len(isolated_nodes),
"# isolated subgraphs" : len( list( networkx.connected_component_subgraphs( graph)))}
data = pandas.Series(d)
print("%s:%s: Results" % (now(), graph.name))
print( data)
if export_file:
print("%s:%s: Exporting %s" % (now(), graph.name, export_file))
data.to_pickle( export_file)
print("%s:%s: Computing isolated nodes" % (now(), graph.name))
isolates = set( networkx.isolates( graph))
participants = set( [p for parts in [ [p ["id"] for p in n[1].get("participants")] for n in graph.nodes( data = True) if n[1].get("participants")] for p in parts ])
real_isolates = isolates.difference( participants) # we have to discount those that are participants in a complex
d["isolates # nodes"] = len( real_isolates)
d["isolates # species"] = len( [n for n in real_isolates if graph.node[n]["type"] == "species"])
d["isolates # reactions"] = len( [n for n in real_isolates if graph.node[n]["type"] == "reaction"])
print("%s:%s: Computing subgraphs" % (now(), graph.name))
# compute new graph with participant links
participant_edges = []
subgraphs = None
for n1 in graph.nodes(data=True):
if "participants" in n1[1] and n1[1]["participants"] != []:
participant_edges.extend( [(n1[1]["id"], n2["id"]) for n2 in n1[1]["participants"]])
if participant_edges != []:
graph = graph.copy()
[graph.add_edge( e[0], e[1], type = "participant") for e in participant_edges]
subgraphs = list( networkx.connected_component_subgraphs( graph))
elif subgraphs == None:
subgraphs = list( networkx.connected_component_subgraphs( graph))
nr_nodes = [ len( s.nodes()) for s in subgraphs]
nr_edges = [ len( s.edges()) for s in subgraphs]
d["subgraphs # subgraphs"] = len( subgraphs)
d["subgraphs # nodes min"] = min(nr_nodes)
d["subgraphs # nodes mean"] = numpy.mean( nr_nodes)
d["subgraphs # nodes median"] = numpy.median( nr_nodes)
d["subgraphs # nodes max"] = max( nr_nodes)
d["subgraphs nodes histogram"] = collections.Counter( nr_nodes)
d["subgraphs # edges min"] = min(nr_nodes)
d["subgraphs # edges mean"] = numpy.mean( nr_nodes)
d["subgraphs # edges median"] = numpy.median( nr_nodes)
d["subgraphs # edges max"] = max( nr_nodes)
d["subgraphs edges histogram"] = collections.Counter( nr_edges)
data = pandas.Series(d)
print("%s:%s: Results" % (now(), graph.name))
print( data)
if export_file:
print("%s:%s: Exporting %s" % (now(), graph.name, export_file))
data.to_pickle( export_file)
return data
########################################################################
########################################################################
def run_analysis_signatures( graph, export_file = None, d = {}):
""" Collects some statistics about the graphs names, bqbiol is signatures etc
This takes a long time at this point. Use carefully"""
print("%s:%s: run_analysis_signatures" % (now(), graph.name))
import pandas
species = filter_species( graph)
reactions = filter_reactions( graph)
if not "name" in d.keys():
d["name"] = graph.name
## names
print("%s:%s: Computing name statistics" % (now(), graph.name))
species_names = [ species.node[n]["name"].lower() for n in species if species.node[n]["name"] != ""]
d["species % have name"] = 100. * len(species_names) / len( species.nodes())
d["species # unique names"] = len(set(species_names))
species_clean_names = set([ clean_name(species.node[n]["name"]) for n in species if species.node[n]["name"] != ""])
d["species # unique clean names"] = len(species_clean_names)
species_clean_names2 = set([ clean_name2(species.node[n]["name"]) for n in species if species.node[n]["name"] != ""])
d["species # unique clean names2"] = len(species_clean_names2)
similar_names = []
for name in species_clean_names2:
similar = filter( lambda n: fuzzywuzzy.fuzz.ratio( name, n) > 90, species_clean_names2)
similar_names.append( set( [name] + similar))
similar_names = merge( similar_names)
d["species # similar unique clean names2"] = len( similar_names)
print("%s:%s: Computing bqbiol_is statistics species" % (now(), graph.name))
species_bqbiolis = [ species.node[n]["bqbiol_is"] for n in species if species.node[n]["bqbiol_is"]]
species_bqbiolis_signature_unique = set( species_bqbiolis)
species_bqbiolis_terms = set( [ b for n in species for b in species.node[n]["bqbiol_is"]])
d["species % have bqbiol_is"] = 100. * len( species_bqbiolis) / float( len(species))
d["species # unique bqbiol_is signatures"] = len( species_bqbiolis_signature_unique)
d["species # unique bqbiol_is terms"] = len( species_bqbiolis_terms)
species_bqbiol_has_part = [ species.node[n]["bqbiol_has_part"] for n in species if species.node[n]["bqbiol_has_part"]]
species_bqbiol_has_part_signature_unique = set( species_bqbiol_has_part)
species_bqbiol_has_part_terms = set( [ b for n in species for b in species.node[n]["bqbiol_has_part"]])
d["species % have bqbiol_has_part"] = 100* len( species_bqbiol_has_part) / float( len(species))
d["species # unique bqbiol_has_part signatures"] = len( species_bqbiol_has_part_signature_unique)
d["species # unique bqbiol_has_part terms"] = len( species_bqbiol_has_part_terms)
print("%s:%s: Computing bqbiol_is statistics reactions" % (now(), graph.name))
reactions_uri = [ reactions.node[n]["bqbiol_is"] for n in reactions if reactions.node[n]["bqbiol_is"]]
reactions_uri_signature_unique = set( reactions_uri)
reactions_bqbiol_terms = [ b for n in reactions for b in reactions.node[n]["bqbiol_is"]]
reactions_bqbiol_terms_known = [ t for t in reactions_bqbiol_terms if sbo_go_name_known(t)]
reactions_bqbiol_terms_set = set( reactions_bqbiol_terms)
reactions_bqbiol_terms_known_set = set(reactions_bqbiol_terms_known)
unknown_terms = reactions_bqbiol_terms_set.difference( reactions_bqbiol_terms_known_set)
d["reactions % have bqbiol_is"] = 100* len( reactions_uri) / float( len(reactions))
d["reactions # unique bqbiol_is signatures"] = len( reactions_uri_signature_unique)
d["reactions # unique bqbiol_is terms"] = len(reactions_bqbiol_terms_set)
d["reactions # unique bqbiol_is terms known SBO/GO terms"] = len(reactions_bqbiol_terms_set)
d["reactions # unique bqbiol_is terms unknown SBO/GO terms"] = len( unknown_terms)
d["reactions bqbiol_is terms histogram"] = collections.Counter( reactions_bqbiol_terms_known)
data = pandas.Series(d)
print("%s:%s: Results" % (now(), graph.name))
print( data)
if export_file:
print("%s:%s: Exporting %s" % (now(), graph.name, export_file))
data.to_pickle( export_file)
########################################################################
########################################################################
def run_analysis_isolated_nodes( graph):
print( "\n\nrun_analysis_isolated_nodes(%s)" % graph.name)
isolates = set( networkx.isolates( graph))
participants = set( [p for parts in [ [p ["id"] for p in n[1].get("participants")] for n in graph.nodes( data = True) if n[1].get("participants")] for p in parts ])
real_isolates = isolates.difference( participants) # we have to discount those that are participants in a complex
print( "{} isolated nodes (ignoring participant nodes) ({} isolated species, {} isolated reactions)".format(
len( real_isolates),
len( [n for n in real_isolates if graph.node[n]["type"] == "species"]),
len( [n for n in real_isolates if graph.node[n]["type"] == "reaction"])))
print( "{} isolated nodes (including isolated participant nodes) ({} isolated species, {} isolated reactions)".format(
len( isolates),
len( [n for n in isolates if graph.node[n]["type"] == "species"]),
len( [n for n in isolates if graph.node[n]["type"] == "reaction"])))
########################################################################
########################################################################
def run_analysis_subgraphs( graph, subgraphs = None):
""" Compute some statistics for subgraphs: min,max,median """
print( "run_analysis_subgraphs( %s)" % graph.name)
# compute new graph with participant links
participant_edges = []
for n1 in graph.nodes(data=True):
if "participants" in n1[1] and n1[1]["participants"] != []:
participant_edges.extend( [(n1[1]["id"], n2["id"]) for n2 in n1[1]["participants"]])
if participant_edges != []:
graph = graph.copy()
[graph.add_edge( e[0], e[1], type = "participant") for e in participant_edges]
subgraphs = list( networkx.connected_component_subgraphs( graph))
elif subgraphs == None:
subgraphs = list( networkx.connected_component_subgraphs( graph))
nr_nodes = [ len( s.nodes()) for s in subgraphs]
nr_edges = [ len( s.edges()) for s in subgraphs]
print( "{} # subgraphs".format( len( subgraphs)))
print( "{}/{}/{}/{} min/mean/median/max # nodes per subgraph".format( min(nr_nodes), numpy.mean( nr_nodes), numpy.median( nr_nodes), max( nr_nodes)))
print( "{}/{}/{}/{} min/mean/median/max # edges per subgraph".format( min(nr_edges), numpy.mean( nr_edges), numpy.median( nr_nodes), max( nr_edges)))
print()
print( "# nodes per subgraph statistics: {}".format( collections.Counter( nr_nodes)))
print( "# edges per subgraph statistics: {}".format( collections.Counter( nr_edges)))
subgraphs_no_isolates = [ s for s in subgraphs if len(s.nodes()) > 1]
nr_nodes_subgraphs_no_isolates = [ len( s.nodes()) for s in subgraphs_no_isolates]
nr_edges_subgraphs_no_isolates = [ len( s.edges()) for s in subgraphs_no_isolates]
print( "\n--\n")
print( "{} # subgraphs no isolated nodes".format( len( subgraphs_no_isolates)))
print( "{}/{}/{}/{} min/mean/median/max # nodes per subgraphs no isolated nodes".format( min( nr_nodes_subgraphs_no_isolates), numpy.mean( nr_nodes_subgraphs_no_isolates), numpy.median( nr_nodes_subgraphs_no_isolates), max( nr_nodes_subgraphs_no_isolates)))
print( "{}/{}/{}/{} min/mean/median/max # edges per subgraphs no isolated nodes".format( min( nr_edges_subgraphs_no_isolates), numpy.mean( nr_edges_subgraphs_no_isolates), numpy.median( nr_edges_subgraphs_no_isolates), max( nr_edges_subgraphs_no_isolates)))
print()
print( "# nodes per subgraph (no isolated nodes) statistics: {}".format( collections.Counter( nr_nodes_subgraphs_no_isolates)))
print( "# edges per subgraph (no isolated nodes) statistics: {}".format( collections.Counter( nr_edges_subgraphs_no_isolates)))
########################################################################
########################################################################
def run_analysis_complex_participants( graph, participant_graph):
node_dict = { n[0] : n[1] for n in graph.nodes(data = True) }
edges_dict = { n: [] for n in node_dict.keys()}
for e in graph.edges( data = True):
edges_dict[e[0]].append( (e[1],e[2]["type"]))
edges_dict[e[1]].append( (e[0],e[2]["type"]))
reaction_participants = set( [ p for e in graph.edges() for p in e])
# sbo complexes
complexes = [n[1] for n in graph.nodes() if n[1]["type"] == "species" and n[1]["sbo"] == 253]
complexes_ids = set( [ c["id"] for c in complexes])
assert( len( complexes) == len( complexes_ids))
print( "{} total # of complexes (sbo == 253)".format( len( complexes)))
# complexes based on Participant edge
complexes2 = set( [ e[0] for e in participant_graph.edges()])
complexes2_participant = set( [ e[1] for e in participant_graph.edges()]) # participants of complexes
print( "{} total # of complexes (complex in a complex relationship with some participant)".format( len( complexes2)))
print( "{} total # of unique participants".format( len( complexes2_participant)))
# complexes part of reaction
complexes_in_reaction = complexes_ids.intersection( reaction_participants)
complexes_not_in_reaction = complexes_ids.difference( reaction_participants)
print( "{}/{} of complexes are part of a reaction ({}/{} are not)".format(
len( complexes_in_reaction),
len( complexes_ids),
len( complexes_not_in_reaction),
len( complexes_ids)))
# participants part of reaction
complexes_participant_in_reaction = complexes2_participant.intersection( reaction_participants)
complexes_participant_not_in_reaction = complexes2_participant.difference( reaction_participants)
print( "{}/{} of participants are part of a reaction ({}/{} are not)".format(
len( complexes_participant_in_reaction),
len( complexes2_participant),
len( complexes_participant_not_in_reaction),
len( complexes2_participant)))
complexes_participants_in_other_complexes = complexes_ids.intersection( complexes2_participant)
print( "{} complexes participate in other complexes".format( len( complexes_participants_in_other_complexes)))
multiple_complex_edge_participant = [n for n, c in collections.Counter( [ e[1] for e in participant_graph.edges()]).items() if c > 1]
print( "{} participants participate in multiple complexes".format( len(multiple_complex_edge_participant)))
## some annotation information
complexes_wo_bqbiol_is = [ c for c in complexes_ids if graph.node[c]["bqbiol_is"] == ()]
print( "{}/{} complexes w/o bqbiol_is".format( len( complexes_wo_bqbiol_is), len( complexes_ids)))
participants_wo_bqbiol_is = [ p for p in complexes2_participant if graph.node[p]["bqbiol_is"] == ()]
print( "{}/{} participants w/o bqbiol_is".format( len( participants_wo_bqbiol_is), len( complexes2_participant)))
########################################################################
########################################################################
def precision_recall_f_score( tp, fp, fn):
if len( tp) == 0 and len( fp) == 0:
precision = 0
else:
precision = len( tp) / float( len( tp) + len( fp))
if len( tp) == 0 and len( fn) == 0:
recall = 0
else:
recall = len( tp) / float( len( tp) + len( fn))
if precision == 0 and recall == 0:
f_score = 0.0
else:
f_score = 2.0 * (precision * recall) / (precision + recall)
return precision, recall, f_score
########################################################################
########################################################################
def set_overlap( set_1, set_2, equal_fn):
r_1 = set()
r_2 = set()
for e1 in set_1:
e2s = filter( lambda e2: equal_fn( e1, e2), set_2)
if e2s:
r_2 = r_2.union( e2s)
r_1.add( e1)
return r_1, r_2
def list_overlap( list_1, list_2, equal_fn):
""" Returns indices of overlapping elements"""
indices_1 = set()
indices_2 = set()
for i_1, e1 in enumerate( list_1):
is_2 = [i for i, e2 in enumerate(list_2) if equal_fn( e1, e2)]
if is_2 != []:
indices_2.update( is_2)
indices_1.add( i_1)
return indices_1, indices_2
def list_intersect( list_1, list_2):
l_1 = list_1[:]
l_2 = list_2[:]
result = []
while len(l_1) > 0:
e1 = l_1.pop()
try:
idx = l_2.index( e1)
except:
idx = None
if idx != None:
l_2.remove( e1)
result.append( e1)
return result
assert( list_intersect([1,2,3],[4,5]) == [])
assert( list_intersect([1,2,3],[1,5]) == [1])
assert( list_intersect([1,2,3,1],[1,5]) == [1])
assert( list_intersect([1,2,3,1],[1,1]) == [1,1])
def list_difference( list_1, list_2):
l_1 = list_1[:]
for e2 in list_2:
try:
l_1.remove(e2)
except:
pass
return l_1
assert( list_difference([1,2,3,1],[5,6]) == [1,2,3,1])
assert( list_difference([1,2,3,1],[1,6]) == [2,3,1])
assert( list_difference([1,2,3,1],[1,1,6]) == [2,3])
def list_find( el, listt, equal_fn):
for el2 in listt:
if equal_fn( el, el2):
return el2
return None
def list_difference2( list_1, list_2, equal_fn):
"returns those elements of list_1 which are not in list_2 according to equal_fn"
result = []
for e in list_1:
if not list_find( e, list_2, equal_fn):
result.append( e)
return result
def list_reduce2( list_1, equal_fn):
result = []
elements_remaining = list_1[:]
while elements_remaining:
el = elements_remaining.pop()
result.append( el)
new_elements_remaining = []
for el2 in elements_remaining:
if not equal_fn( el, el2):
new_elements_remaining.append( el2)
elements_remaining = new_elements_remaining
return result
assert( list_reduce2([1,"1",2,"2"], lambda e1, e2: str( e1) == str( e2)) == ['2','1'])
def merge( sets):
"merges sets which are disjoint"
merged = 1
while merged:
merged = 0
results = []
while sets:
common, rest = sets[0], sets[1:]
sets = []
for x in rest:
if x.isdisjoint(common):
sets.append(x)
else:
merged = 1
common |= x
results.append(common)
sets = results
return sets
def analyse_set_overlap( set_1, set_2, equal_fn = operator.eq):
res_1, res_2 = set_overlap( set_1, set_2, equal_fn)
if len( set_2) == 0:
precision = 0
else:
precision = 100.0 * len( res_2) / float( len( set_2))
if len( set_1) == 0:
recall = 0
else:
recall = 100.0 * len( res_1) / float( len( set_1))
if precision == 0 and recall == 0:
f_score = 0.0
else:
f_score = 2.0 * (precision * recall) / (precision + recall)
return res_1, res_2, precision, recall, f_score
def analyse_list_overlap( list_1, list_2, equal_fn = operator.eq):
res_1, res_2 = list_overlap( list_1, list_2, equal_fn)
if len( list_2) == 0:
precision = 0
else:
precision = 100.0 * len( res_2) / float( len( list_2))
if len( list_1) == 0:
recall = 0
else:
recall = 100.0 * len( res_1) / float( len( list_1))
if precision == 0 and recall == 0:
f_score = 0.0
else:
f_score = 2.0 * (precision * recall) / (precision + recall)
return res_1, res_2, precision, recall, f_score
def tuple_eq_empty_not_eq( t_1, t_2):
""" those which are empty are in fact not equal"""
return len( t_1) > 0 and t_1 == t_2
def tuple_overlaps( t_1, t_2):
return len( set(t_1).intersection( t_2)) > 0
def tuple_overlaps_sbo_is_a( t_1, t_2):
if tuple_overlaps( t_1, t_2):
return True
else:
sbo_terms_1 = get_sbo_terms( t_1)
sbo_terms_2 = get_sbo_terms( t_2)
for s1 in sbo_terms_1:
for s2 in sbo_terms_2:
if sbo_is_a2( s1, s2):
return True
def name_approx_equal( n1, n2):
return fuzzywuzzy.fuzz.ratio( n1, n2) > 90
########################################################################
########################################################################
def nm_name_equal( n1, n2):
"Checks if name is the same"
return n1["name"].lower() == n2["name"].lower()
def nm_name_equal_w_participants( n1, n2):
"Checks if name and names of participants overlap"
names_1 = [n1["name"].lower()] + [ p["name"].lower() for p in n1["participants"]]
names_2 = [n2["name"].lower()] + [ p["name"].lower() for p in n2["participants"]]
return len( set( names_1).intersection( names_2)) > 0
def nm_name_clean_equal( n1, n2):
"Checks if clean name is the same"
return remove_prefixes( n1["name"].lower()) == remove_prefixes( n2["name"].lower())
def nm_name_clean_equal_w_participants( n1, n2):
"Checks if name and names of participants overlap"
clean_names_1 = [remove_prefixes( n1["name"].lower())] + [ remove_prefixes( p["name"].lower()) for p in n1["participants"]]
clean_names_2 = [remove_prefixes( n2["name"].lower())] + [ remove_prefixes( p["name"].lower()) for p in n2["participants"]]
return len( set( clean_names_1).intersection( clean_names_2)) > 0
def nm_name_clean2_equal( n1, n2):
"Checks if clean name is the same"
return clean_name2( n1["name"]) == clean_name2( n2["name"])
def nm_name_clean_approx( n1, n2):
return fuzzywuzzy.fuzz.ratio( clean_name2( n1["name"]), clean_name2( n2["name"])) > 90
def nm_name_clean_approx_w_participants( n1, n2):
clean_names_1 = [ re.sub('[^a-zA-Z0-9-]', ' ', remove_prefixes( n1["name"].lower()))] + [ re.sub('[^a-zA-Z0-9-]', ' ', remove_prefixes( p["name"].lower())) for p in n1["participants"]]
clean_names_2 = [ re.sub('[^a-zA-Z0-9-]', ' ', remove_prefixes( n2["name"].lower()))] + [ re.sub('[^a-zA-Z0-9-]', ' ', remove_prefixes( p["name"].lower())) for p in n2["participants"]]
for name_1 in clean_names_1:
if list_find( name_1, clean_names_2, lambda name_1, name_2: fuzzywuzzy.fuzz.ratio( name_1, name_2) > 90):
return True
return False
def nm_gene_id_intersect( n1, n2):
set_1 = map_gene_simstring( n1["name"])
set_2 = map_gene_simstring( n2["name"])
return set_1 and set_2 and len( set_1.intersection( set_2)) > 0
def nm_gene_id_intersect_w_participants( n1, n2):
sets_1 = filter( bool, [map_gene_simstring(n) for n in [ n1["name"]] + [ p["name"] for p in n1["participants"]]])
sets_2 = filter( bool, [map_gene_simstring(n) for n in [ n2["name"]] + [ p["name"] for p in n2["participants"]]])
for s1 in sets_1:
for s2 in sets_2:
if len( s1.intersection( s2)) > 0:
return True
return False
def nm_name_clean_approx_OR_gene_id_intersect( n1, n2):
return nm_name_clean_approx( n1, n2) or nm_gene_id_intersect( n1, n2)
def nm_name_clean_approx_OR_gene_id_intersect_w_participants( n1, n2):
return nm_name_clean_approx_w_participants( n1, n2) or nm_gene_id_intersect_w_participants( n1, n2)
def nm_bqbiol_is_equal( n1, n2):
"Checks if the bqbiol_is are the same"
return n1["bqbiol_is"] and n2["bqbiol_is"] and n1["bqbiol_is"] == n2["bqbiol_is"]
def nm_bqbiol_is_equal_w_participants( n1, n2):
"Checks if the bqbiol_is are the same - also checks participants"
sets_1 = filter( bool, [set(n1["bqbiol_is"])] + [ set(p["bqbiol_is"]) for p in n1["participants"]])
sets_2 = filter( bool, [n2["bqbiol_is"]] + [ p["bqbiol_is"] for p in n2["participants"]])
for s1 in sets_1:
for s2 in sets_2:
if len( s1.intersection( s2)) > 0:
return True
return False
def nm_bqbiol_is_overlaps( n1, n2):
"Checks if the bqbiol_is are the same"
return n1["bqbiol_is"] and n2["bqbiol_is"] and len( set( n1["bqbiol_is"]).intersection( set( n2["bqbiol_is"]))) > 0
def nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
"Checks if the bqbiol_is are the same"
if nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["bqbiol_is"] and n2["bqbiol_is"]:
sbo_terms_1 = get_sbo_terms( n1["bqbiol_is"])
sbo_terms_2 = get_sbo_terms( n2["bqbiol_is"])
for s1 in sbo_terms_1:
for s2 in sbo_terms_2:
if sbo_is_a2( s1, s2):
return True
return False
def nm_bqbiol_is_overlaps_w_participants( n1, n2):
"Checks if the bqbiol_is overlaps - also checks participants"
set_1 = set( n1["bqbiol_is"])
if n1.get("participants"):
[set_1.update( p["bqbiol_is"]) for p in n1["participants"]]
set_2 = set( n2["bqbiol_is"])
if n2.get("participants"):
[set_2.update( p["bqbiol_is"]) for p in n2["participants"]]
if len( set_1.intersection( set_2)) > 0:
return True
else:
return False
def nm_bqbiol_is_has_part_overlaps( n1, n2):
"Checks if the bqbiol_is and bqbiol_has_part overlaps"
uris_1 = set()
if n1["bqbiol_is"]:
uris_1.update( n1["bqbiol_is"])
if n1["bqbiol_has_part"]:
uris_1.update( n1["bqbiol_has_part"])
uris_2 = set()
if n2["bqbiol_is"]:
uris_2.update( n2["bqbiol_is"])
if n1["bqbiol_has_part"]:
uris_2.update( n2["bqbiol_has_part"])
return len( uris_1.intersection( uris_2)) > 0
def nm_sbo_equal( n1, n2):
"Only works on reactions"
return n1["sbo"] and n2["sbo"] and n1["sbo"] == n2["sbo"]
def nm_sbo_is_a( n1, n2):
"Only works on reactions"
return n1["sbo_str"] and n2["sbo_str"] and sbo_is_a2( n1["sbo_str"], n2["sbo_str"])
################### name_clean + various reactions matches
def nm_name_clean_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_equal( n1, n2):
return True
else:
return False
def nm_name_clean_w_participants_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_equal_w_participants( n1, n2):
return True
else:
return False
def nm_name_clean_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_equal( n1, n2):
return True
else:
return False
def nm_name_clean_w_participants_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_equal_w_participants( n1, n2):
return True
else:
return False
def nm_name_clean_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_equal( n1, n2):
return True
else:
return False
def nm_name_clean_w_participants_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_equal_w_participants( n1, n2):
return True
else:
return False
################### name_clean_approx + various reactions matches
def nm_name_clean_approx_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_approx( n1, n2):
return True
else:
return False
def nm_name_clean_approx_w_participants_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_approx_w_participants( n1, n2):
return True
else:
return False
def nm_name_clean_approx_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_approx( n1, n2):
return True
else:
return False
def nm_name_clean_approx_w_participants_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_approx_w_participants( n1, n2):
return True
else:
return False
def nm_name_clean_approx_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_approx( n1, n2):
return True
else:
return False
def nm_name_clean_approx_w_participants_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_approx_w_participants( n1, n2):
return True
else:
return False
################### name_clean_approx or bqbiol_is_equal various reactions matches
def nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_equal( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_equal_w_participants_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx_w_participants( n1, n2) or nm_bqbiol_is_equal_w_participants( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_equal( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_equal_w_participants_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx_w_participants( n1, n2) or nm_bqbiol_is_equal_w_participants( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_equal( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_equal_w_participants_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx_w_participants( n1, n2) or nm_bqbiol_is_equal_w_participants( n1, n2)):
return True
else:
return False
################### name_clean_approx or bqbiol_is_overlaps various reactions matches
def nm_name_clean_approx_OR_bqbiol_is_overlaps_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_overlaps( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_overlaps_w_participants_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx_w_participants( n1, n2) or nm_bqbiol_is_overlaps_w_participants( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_overlaps_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_overlaps( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_overlaps_w_participants_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx_w_participants( n1, n2) or nm_bqbiol_is_overlaps_w_participants( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_overlaps_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_overlaps( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_overlaps_w_participants_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx_w_participants( n1, n2) or nm_bqbiol_is_overlaps_w_participants( n1, n2)):
return True
else:
return False
################### name_clean_approx or bqbiol_is_overlaps various reactions matches
def nm_name_clean_approx_OR_bqbiol_is_bqbiol_is_has_parts_overlaps_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_has_part_overlaps( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_bqbiol_is_has_parts_overlaps_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_has_part_overlaps( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_bqbiol_is_has_parts_overlaps_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_has_part_overlaps( n1, n2)):
return True
else:
return False
################### edge match exact
def edge_match_exact( e1, e2):
"only edges"
return e1["type"] == e2["type"]
########################################################################
########################################################################
# nodes overlap max
def compute_nodes_overlap_max( graph_1, graph_2, node_match):
""" computes a nodes in graph_2 matching with nodes in graph_1 according
to node_match
Returns - a dictionary of nodes """
nodes_2 = [ filter( lambda n2: node_match( graph_1.node[n1], graph_2.node[n2]), graph_2.nodes()) for n1 in graph_1.nodes()]
return { n1: n2 for n1, n2 in zip( graph_1.nodes(), nodes_2) if n2 }
def get_nodes_overlap_max_result_precision_recall_f_score( graph_1, graph_2, matches):
if len( graph_2) == 0:
precision = 0;
else:
precision = len( set( itertools.chain(*matches.values()))) / float( len( graph_2))
if len( graph_1) == 0:
recall = 0
else:
recall = len( matches.keys()) / float( len( graph_1))
if precision == 0 and recall == 0:
f_score = 0.0
else:
f_score = 2.0 * (precision * recall) / (precision + recall)
return 100.0 * precision, 100.0 * recall, 100.0 * f_score
def print_node_match_result( graph_1, graph_2, matches, node_match_name = "", export_matches = None):
# print results
precision, recall, f_score = get_nodes_overlap_max_result_precision_recall_f_score( graph_1, graph_2, matches)
print( "{}: {:.2f} & {:.2f} & {:.2f} node overlap (precision/recall/f-score)".format(
node_match_name, precision, recall, f_score))
# export text matches files
if export_matches:
with open( export_matches, "wt") as f:
clean_names_map = { clean_name2( graph_1.node[k]["name"]) : k for k in matches.keys()}
for n in sorted( clean_names_map.keys()):
k = clean_names_map[n]
if matches[k]:
f.write( "\n-------------------------------------------------------------\n")
f.write( n)
f.write( "\n--\n" )
names = set( [clean_name2( graph_2.node[v]["name"]) for v in matches[k]])
f.write( "\n".join(names))
def run_analysis_nodes_overlap_max( graph_1, graph_2, node_match,
export_results = False,
export_results_prefix = "results-nodes-overlap-max",
ignore_existing = False):
""" computes nodes overlap and prints statistics"""
export_file = "%s__%s__%s__%s.pickle" % (export_results_prefix, graph_1.name, graph_2.name, node_match.__name__)
if ignore_existing and os.path.exists( export_file):
print("%s:%s/%s:run_analysis_nodes_overlap_max:%s exists. using that one." % (now(),graph_1.name, graph_2.name, export_file))
data = pickle.load( open( export_file, "rb"))
graph_1, graph_2, matches = data[0], data[1], data[2]
else:
matches = compute_nodes_overlap_max( graph_1, graph_2, node_match)
print_node_match_result( graph_1, graph_2, matches, node_match_name = node_match.__name__)
if export_results and not( ignore_existing and os.path.exists( export_file)):
print("%s:%s/%s:run_analysis_nodes_overlap_max:Exporting %s" % (now(),graph_1.name, graph_2.name, export_file))
pickle.dump( [graph_1, graph_2, matches], open( export_file, "wb"))
def run_analyses_nodes_overlap_max( graph_1,
graph_2,
node_match_fns,
prefix = None,
n_jobs = None,
export_results = False,
export_results_prefix = "results-nodes-overlap-max"):
""" computes nodes overlaps according to multiple node_match_fns and prints statistics """
print( "-----------")
print( "run_analyses_nodes_overlap_max %s/%s n_jobs=%s -- %s" % (graph_1.name, graph_2.name, n_jobs, node_match_fns))
# compute the nodes of 2 that exist in 1 (ignoring edges)
if n_jobs:
with joblib.Parallel( n_jobs = n_jobs) as parallel:
parallel( joblib.delayed( run_analysis_nodes_overlap_max) ( graph_1, graph_2, fn, export_results = export_results, export_results_prefix = export_results_prefix)
for fn in node_match_fns)
else:
for nm in node_match_fns:
run_analysis_nodes_overlap_max( graph_1, graph_2, nm, export_results = export_results, export_results_prefix = export_results_prefix)
########################################################################
########################################################################
# subgraph overlap max
def match_subgraph_max( graph, subgraph, node_match, edge_match = edge_match_exact, file_name = None):
""" computes overlap for single subgraph"""
assert( subgraph or file_name)
if subgraph == None:
subgraph = pickle.load( open( file_name, "rb"))
graph_matcher = networkx.algorithms.isomorphism.GraphMatcher( graph,
subgraph,
node_match = node_match,
edge_match = edge_match)
result = list( graph_matcher.subgraph_isomorphisms_iter())
return result, subgraph
def match_graph_max( graph, file_name, node_match, edge_match = edge_match_exact):
""" computes overlap for graph loaded from a file"""
graph_2 = pickle.load( open( file_name, "rb"))
subgraphs = list( networkx.connected_component_subgraphs( graph_2))
graph_matchers = [networkx.algorithms.isomorphism.GraphMatcher( graph,
subgraph,
node_match = node_match,
edge_match = edge_match)
for subgraph in subgraphs]
results = [ (list( m.subgraph_isomorphisms_iter()), s) for m, s in zip( graph_matchers, subgraphs)]
return results
def match_subgraphs_max( graph,
subgraphs,
node_match,
edge_match = edge_match_exact,
n_jobs = None,
file_names = None):
""" computes overlap for subgraphs """
# compute the nodes of 2 that exist in 1 (ignoring edges)
assert( subgraphs or file_names)
if file_names: # use the files instead of subgraphs if possible
print( "Running match_subgraphs_max using file_names (individual graph files) n_jobs=%s" %(n_jobs))
if n_jobs:
with joblib.Parallel( n_jobs = n_jobs) as parallel:
results = parallel( joblib.delayed( match_graph_max) ( graph, file_name, node_match = node_match, edge_match = edge_match) for file_name in file_names)
else:
results = [ match_graph_max( graph, file_name, node_match = node_match, edge_match = edge_match) for file_name in file_names]
results = [r for result in results for r in result]
else:
print( "Running match_subgraphs_max using subgraphs n_jobs=%s" %(n_jobs))
if n_jobs:
with joblib.Parallel( n_jobs = n_jobs) as parallel:
results = parallel( joblib.delayed( match_subgraph_max) ( graph, subgraph, node_match, edge_match) for subgraph in subgraphs)
else:
results = [ match_subgraph_max( graph, subgraph, node_match = node_match, edge_match = edge_match) for subgraph in subgraphs]
results_matches = [r[0] for r in results]
results_subgraphs = [r[1] for r in results]
return results_matches, results_subgraphs
def subgraph_match_get_edges( subgraph, match, reverse_match, edge_signatures_1, edge_signatures_2):
""" Computes matching edges from match_subgraphs results """
m_edges = {}
for e in subgraph.edges( data = True):
# a bit of acrobatics to get around having to use digraph (which is buggy)
signature_1_1 = (reverse_match[e[0]], reverse_match[e[1]], e[2]["type"])
signature_1_2 = (reverse_match[e[1]], reverse_match[e[0]], e[2]["type"])
signature_2_1 = (e[0], e[1], e[2]["type"])
signature_2_2 = (e[1], e[0], e[2]["type"])
assert signature_1_1 in edge_signatures_1 or signature_1_2 in edge_signatures_1
assert not( signature_1_1 in edge_signatures_1 and signature_1_2 in edge_signatures_1)
assert signature_2_1 in edge_signatures_2 or signature_2_2 in edge_signatures_2
assert not( signature_2_1 in edge_signatures_2 and signature_2_2 in edge_signatures_2)
if signature_1_1 in edge_signatures_1:
signature_1 = signature_1_1
else:
signature_1 = signature_1_2
if signature_2_1 in edge_signatures_2:
signature_2 = signature_2_1
else:
signature_2 = signature_2_2
m_edges[signature_1] = signature_2
assert signature_1 in edge_signatures_1
assert signature_2 in edge_signatures_2
return m_edges
def compute_subgraphs_overlap_max( graph_1, graph_2,
node_match,
edge_match = edge_match_exact,
subgraphs_2 = None,
n_jobs = None,
export_results = False,
export_results_prefix = "results-subgraphs-overlap-max",
file_names = None,
ignore_existing = False):
""" compute the subgraphs in graph_1 isomorph to nodes in subgraphs of 2 """
if export_results:
export_file = "%s__%s__%s__%s__%s.pickle" % (export_results_prefix, graph_1.name, graph_2.name, node_match.__name__, edge_match.__name__)
if export_results and ignore_existing and os.path.exists( export_file):
print( "%s:%s/%s:compute_subgraphs_overlap_max:results exist %s, loading" % (now(), graph_1.name, graph_2.name, export_file))
data = pickle.load( open( export_file, "rb"))
graph_1, graph_2, results_subgraphs, results_matches = data[0], data[1], data[2], data[3]
return results_matches, results_subgraphs
if graph_2 and file_names == None and subgraphs_2 == None:
subgraphs_2 = list( networkx.connected_component_subgraphs( graph_2))
# Run!
results_matches, results_subgraphs = match_subgraphs_max( graph_1, subgraphs_2, node_match = node_match, edge_match = edge_match, n_jobs = n_jobs, file_names = file_names)
# export data
if export_results:
pickle.dump( [graph_1, graph_2, results_subgraphs, results_matches],
open( "%s__%s__%s__%s__%s.pickle" % (export_results_prefix, graph_1.name, graph_2.name, node_match.__name__, edge_match.__name__), "wb"))
return results_matches, results_subgraphs
def get_subgraphs_overlap_max_results( graph_1, graph_2, results_subgraphs, results_matches,
species_1 = None, species_2 = None, reactions_1 = None, reactions_2 = None):
""" takes results from matching and computes matches for nodes, edges, species, and reactions """
if species_1 == None:
species_1 = set(filter_species( graph_1).nodes())
if species_2 == None:
species_2 = set(filter_species( graph_2).nodes())
if reactions_1 == None:
reactions_1 = set( filter_reactions( graph_1).nodes())
if reactions_2 == None:
reactions_2 = set( filter_reactions( graph_2).nodes())
# collet results for analysis
matches_nodes_1 = set()
matches_nodes_2 = set()
matches_edges_1 = set()
matches_edges_2 = set()
edge_signatures_1 = edge_signatures( graph_1)
edge_signatures_2 = edge_signatures( graph_2)
for subgraph_2, matches in zip( results_subgraphs, results_matches):
for m in matches:
matches_nodes_1 = matches_nodes_1.union( m.keys())
matches_nodes_2 = matches_nodes_2.union( m.values())
reverse_m = { v: k for k, v in m.iteritems()}
m_edges = subgraph_match_get_edges( subgraph_2, m , reverse_m, edge_signatures_1, edge_signatures_2)
matches_edges_1 = matches_edges_1.union( m_edges.keys())
matches_edges_2 = matches_edges_2.union( m_edges.values())
species_1_matches = species_1.intersection( matches_nodes_1)
species_2_matches = species_2.intersection( matches_nodes_2)
reactions_1_matches = reactions_1.intersection( matches_nodes_1)
reactions_2_matches = reactions_2.intersection( matches_nodes_2)
return matches_nodes_1, matches_nodes_2, matches_edges_1, matches_edges_2, species_1_matches, species_2_matches, reactions_1_matches, reactions_2_matches
def get_subgraphs_overlap_max_results_precision_recall_f_score(graph_1, graph_2, results_subgraphs, results_matches,
species_1 = None, species_2 = None, reactions_1 = None, reactions_2 = None):
""" Returns precision recall for nodes, species, reactions, edges as a dict """
if species_1 == None:
species_1 = set(filter_species( graph_1).nodes())
if species_2 == None:
species_2 = set(filter_species( graph_2).nodes())
if reactions_1 == None:
reactions_1 = set( filter_reactions( graph_1).nodes())
if reactions_2 == None:
reactions_2 = set( filter_reactions( graph_2).nodes())
matches_nodes_1, matches_nodes_2, matches_edges_1, matches_edges_2, species_1_matches, species_2_matches, reactions_1_matches, reactions_2_matches = \
get_subgraphs_overlap_max_results( graph_1, graph_2, results_subgraphs, results_matches, species_1, species_2, reactions_1, reactions_2)
result = {}
precision = 100. * len( matches_nodes_2) / float( len( graph_2.nodes()))
recall = 100. * len( matches_nodes_1) / float( len( graph_1.nodes()))
result["node precision"] = precision
result["node recall"] = recall
if precision + recall == 0:
result["node f-score"] = 0
else:
result["node f-score"] = 2.0 * (precision * recall) / (precision + recall)
precision = 100. * len( species_2_matches) / float( len( species_2))
recall = 100. * len( species_1_matches) / float( len( species_1))
result["species precision"] = precision
result["species recall"] = recall
if precision + recall == 0:
result["species f-score"] = 0
else:
result["species f-score"] = 2.0 * (precision * recall) / (precision + recall)
precision = 100. * len( reactions_2_matches) / float( len( reactions_2))
recall = 100. * len( reactions_1_matches) / float( len( reactions_1))
result["reaction precision"] = precision
result["reaction recall"] = recall
if precision + recall == 0:
result["reaction f-score"] = 0
else:
result["reaction f-score"] = 2.0 * (precision * recall) / (precision + recall)
precision = 100. * len( matches_edges_2) / float( len( graph_2.edges()))
recall = 100. * len( matches_edges_1) / float( len( graph_1.edges()))
result["edge precision"] = precision
result["edge recall"] = recall
if precision + recall == 0:
result["edge f-score"] = 0
else:
result["edge f-score"] = 2.0 * (precision * recall) / (precision + recall)
return result
def print_analysis_subgraphs_overlap_results( graph_1, graph_2, node_match,
matches_nodes_1, matches_nodes_2, matches_edges_1, matches_edges_2,
species_1_matches, species_2_matches, reactions_1_matches, reactions_2_matches,
species_1 = None, species_2 = None,
reactions_1 = None, reactions_2 = None):
if not species_1:
species_1 = set( filter_species( graph_1).nodes())
if not species_2:
species_2 = set( filter_species( graph_2).nodes())
if not reactions_1:
reactions_1 = set( filter_reactions( graph_1).nodes())
if not reactions_2:
reactions_2 = set( filter_reactions( graph_2).nodes())
## print results
print( "{} {}/{}".format( node_match.__name__, graph_1.name, graph_2.name))
precision = 100. * len( matches_nodes_2) / float( len( graph_2.nodes()))
recall = 100. * len( matches_nodes_1) / float( len( graph_1.nodes()))
f_score = 0.0
if precision + recall > 0:
f_score = 2. * precision * recall / (precision + recall)
print( "%.2f & %.2f & %.2f node" % (precision, recall, f_score))
precision = 100. * len( species_2_matches) / float( len( species_2))
recall = 100. * len( species_1_matches) / float( len( species_1))
f_score = 0.0
if precision + recall > 0:
f_score = 2. * precision * recall / (precision + recall)
print( "%.2f & %.2f & %.2f species" % (precision, recall, f_score))
precision = 100 * len( reactions_2_matches) / float( len( reactions_2))
recall = 100 * len( reactions_1_matches) / float( len( reactions_1))
f_score = 0.0
if precision + recall > 0:
f_score = 2. * precision * recall / (precision + recall)
print( "%.2f & %.2f & %.2f reaction" % (precision, recall, f_score))
precision = 100 * len( matches_edges_2) / float( len( graph_2.edges()))
recall = 100 * len( matches_edges_1) / float( len( graph_1.edges()))
f_score = 0.0
if precision + recall > 0:
f_score = 2. * precision * recall / (precision + recall)
print( "%.2f & %.2f & %.2f edge" % (precision, recall, f_score))
def print_analysis_subgraphs_overlap_results_from_file( graph_1_name,
graph_2_name,
node_match,
edge_match = edge_match_exact,
prefix = "results/results-subgraphs-overlap-max"):
# load file
[graph_1, graph_2, results_subgraphs, results_matches] \
= pickle.load( open( "%s__%s__%s__%s__%s.pickle" % ( prefix,graph_1_name, graph_2_name, node_match.__name__, edge_match.__name__), "rb"))
# process results
species_1 = set(filter_species( graph_1).nodes())
species_2 = set(filter_species( graph_2).nodes())
reactions_1 = set( filter_reactions( graph_1).nodes())
reactions_2 = set( filter_reactions( graph_2).nodes())
matches_nodes_1, matches_nodes_2, \
matches_edges_1, matches_edges_2, \
species_1_matches, species_2_matches, \
reactions_1_matches, reactions_2_matches = \
get_subgraphs_overlap_max_results( graph_1, graph_2, results_subgraphs, results_matches, \
species_1 = species_1, species_2 = species_2, reactions_1 = reactions_1, reactions_2 = reactions_2)
# print results
print_analysis_subgraphs_overlap_results( graph_1, graph_2, node_match,
matches_nodes_1, matches_nodes_2, matches_edges_1, matches_edges_2,
species_1_matches, species_2_matches, reactions_1_matches, reactions_2_matches,
species_1, species_2, reactions_1, reactions_2)
def run_analysis_subgraphs_overlap( graph_1,
graph_2,
node_match,
edge_match = edge_match_exact,
subgraphs_2 = None,
species_1 = None,
species_2 = None,
reactions_1 = None,
reactions_2 = None,
n_jobs = None,
export_results = False,
export_results_prefix = "results-subgraphs-overlap-max",
file_names = None,
print_results = True,
ignore_existing = False):
""" runs analysis for subgraphs """
print( "-----------")
print( "%s: run_analysis_subgraphs_overlap %s/%s -- %s" % (now(), graph_1.name, graph_2.name, node_match.__name__))
if subgraphs_2 == None:
subgraphs_2 = list( networkx.connected_component_subgraphs( graph_2))
if not species_1:
species_1 = set( filter_species( graph_1).nodes())
if not species_2:
species_2 = set( filter_species( graph_2).nodes())
if not reactions_1:
reactions_1 = set( filter_reactions( graph_1).nodes())
if not reactions_2:
reactions_2 = set( filter_reactions( graph_2).nodes())
results_matches, results_subgraphs \
= compute_subgraphs_overlap_max( graph_1, graph_2,
node_match = node_match,
edge_match = edge_match_exact,
subgraphs_2 = subgraphs_2,
n_jobs = n_jobs,
export_results = export_results,
export_results_prefix = export_results_prefix,
file_names = file_names,
ignore_existing = ignore_existing)
if print_results:
# process results
matches_nodes_1, matches_nodes_2, \
matches_edges_1, matches_edges_2, \
species_1_matches, species_2_matches, \
reactions_1_matches, reactions_2_matches = \
get_subgraphs_overlap_max_results( graph_1, graph_2, results_subgraphs, results_matches, \
species_1 = species_1, species_2 = species_2, reactions_1 = reactions_1, reactions_2 = reactions_2)
# print results
print_analysis_subgraphs_overlap_results( graph_1, graph_2, node_match,
matches_nodes_1, matches_nodes_2, matches_edges_1, matches_edges_2,
species_1_matches, species_2_matches, reactions_1_matches, reactions_2_matches,
species_1, species_2, reactions_1, reactions_2)
return results_matches, results_subgraphs
def run_analyses_subgraphs_overlap( graph_1,
graph_2,
node_match_fns,
subgraphs_2 = None,
export_results = False,
export_results_prefix = "results-subgraphs-overlap-max",
print_results = True):
""" runs analysis for subgraphs for multiple node_match_fns"""
print( "-----------")
print( "run_analyses_subgraphs_overlap {}/{} -- {}".format( graph_1.name, graph_2.name, node_match_fns))
if subgraphs_2 == None:
subgraphs_2 = list( networkx.connected_component_subgraphs( graph_2))
species_1 = set( filter_species( graph_1).nodes())
species_2 = set( filter_species( graph_2).nodes())
reactions_1 = set( filter_reactions( graph_1).nodes())
reactions_2 = set( filter_reactions( graph_2).nodes())
for node_match in node_match_fns:
print("\n---")
run_analysis_subgraphs_overlap( graph_1, graph_2, node_match, edge_match = edge_match_exact,
subgraphs_2 = subgraphs_2,
species_1 = species_1,
species_2 = species_2,
reactions_1 = reactions_1,
reactions_2 = reactions_2,
export_results = export_results,
export_results_prefix = export_results_prefix)
########################################################################
########################################################################
## side-by-side graphviz
def _graphviz_label( n, graph, n_id_map = {}, id_prefix = "", participant = False, show_identifier = False):
if graph.node[n].get("participants"):
n_id_map[n] = id_prefix + n
label = graph.node[n]["name"]
if show_identifier:
label += "\n" + n
label = "<table>%s%s</table>" % ("<tr><td port=\"%s\"><b>%s</b></td></tr>" % (n, label),
"".join([ _graphviz_label( p, graph, n_id_map, id_prefix = id_prefix, participant = True) for p in graph.node[n]["participant_ids"]]))
if participant:
return "<tr><td>%s</td></tr>" % label
else:
return "<%s>" % label
elif graph.node[n]["type"] == "species":
n_id_map[n] = id_prefix + n
label = graph.node[n]["name"]
if show_identifier:
label += "\n" + n
if participant:
return "<tr><td port=\"%s\">%s</td></tr>" % (n, label)
else:
return label
else:
n_id_map[n] = n
label = ", ".join( sbo_go_name(b) for b in graph.node[n]["bqbiol_is"])
if show_identifier:
label += "\n" + n
return label
def _graphviz_add_node( n, graph, graphviz_graph, n_id_map = {}, label = None, show_identifier = False, **kwargs):
""" adds a node top level (should not be participant of a complex) """
if label == None and graph.node[n].get("participants"):
label = _graphviz_label( n, graph, n_id_map, id_prefix = n + ":", show_identifier = show_identifier)
else:
label = _graphviz_label( n, graph, n_id_map, show_identifier = show_identifier)
if graph.node[n].get("participants"): # has participants
graphviz_graph.node( n, label = label, shape = "none", **kwargs)
elif graph.node[n]["type"] == "species":
graphviz_graph.node( n, label = label, shape = "rectangle", **kwargs)
else:
graphviz_graph.node( n, label = label, shape = "ellipse", **kwargs)
def _graphviz_add_edge( e, graph, graphviz_graph, n_id_map = {}, **kwargs):
""" adds an edge to the graphviz graph """
if (e[2] == "product" and not graph.node[e[0]]["type"] == "reaction") \
or (e[2] != "product" and not graph.node[e[1]]["type"] == "reaction"):
e = (e[1],e[0],e[2])
e0 = e[0]
e1 = e[1]
if e0 in n_id_map:
e0 = n_id_map[e0]
if e1 in n_id_map:
e1 = n_id_map[e1]
if e[2] == "modifier":
graphviz_graph.edge( e0, e1, arrowhead = "diamond", **kwargs)
else:
graphviz_graph.edge( e0, e1, **kwargs)
def graphviz_graph( graph, file_name = "test.dot", view = True, show_identifier = False):
""" renders a graph using dot"""
import graphviz
n_id_map = {}
participant_complex_map = { id : n for n in graph.nodes() if graph.node[n].get("participant_ids") for id in graph.node[n].get("participant_ids")}
top_nodes = set( graph.nodes()).difference( participant_complex_map.keys())
graphviz_graph = graphviz.Digraph()
[_graphviz_add_node( n, graph, graphviz_graph, n_id_map, show_identifier = show_identifier) for n in top_nodes];
[_graphviz_add_edge( (e[0], e[1], e[2]["type"]), graph, graphviz_graph, n_id_map) for e in graph.edges( data = True)];
graphviz_graph.render( file_name, view = view)
def get_top_complex( n, graph, participant_complex_map = {}):
if participant_complex_map == {}:
participant_complex_map = { id : n for n in graph.nodes() if graph.node[n].get("participant_ids") for id in graph.node[n].get("participant_ids")}
if not (n in participant_complex_map):
return n
else:
return get_top_complex( participant_complex_map[n], graph, participant_complex_map)
def graphviz_comparison_graph( graph_1, graph_2, m_nodes, m_edges, file_name = "test.dot", view = True, include_context_graph_1 = True, show_identifier = False):
""" Creates a graph visualization visualizing a match (left, right) """
import graphviz
g = graphviz.Digraph()
s1 = graphviz.Digraph( "cluster_1")
s1.body.append( "\tlabel=\"%s\"" % graph_1.name)
participant_complex_map_1 = { id : n for n in graph_1.nodes() if graph_1.node[n].get("participant_ids") for id in graph_1.node[n].get("participant_ids")}
top_complexes_1 = [ get_top_complex( n, graph_1, participant_complex_map_1) for n in m_nodes.keys()]
n_id_map_1 = {}
[_graphviz_add_node( n, graph_1, s1, n_id_map_1, show_identifier = show_identifier) for n in top_complexes_1]
[_graphviz_add_edge( e, graph_1, s1, n_id_map_1) for e in m_edges.keys()]
if include_context_graph_1:
context_edges = set([ sort_edge_signature((edge[0], edge[1], edge[2]["type"]), graph_1) for edge in graph_1.edges( m_nodes.keys(), data = True)]).difference( m_edges.keys())
context_nodes = set( [ c for e in context_edges for c in e[:2]]).difference( top_complexes_1)
# add nodes
[_graphviz_add_node( get_top_complex( n, graph_1, participant_complex_map_1), graph_1, s1, n_id_map_1, color = "grey", show_identifier = show_identifier) for n in context_nodes]
# add edges
[_graphviz_add_edge( e, graph_1, s1, n_id_map_1, color = "grey") for e in context_edges]
g.subgraph( s1)
s2 = graphviz.Digraph( "cluster_2")
s2.body.append( "\tlabel=\"%s\"" % graph_2.name)
participant_complex_map_2 = { id : n for n in graph_2.nodes() if graph_2.node[n].get("participant_ids") for id in graph_2.node[n].get("participant_ids")}
top_complexes_2 = [ get_top_complex( n, graph_2, participant_complex_map_2) for n in m_nodes.values()]
n_id_map_2 = {}
[_graphviz_add_node( n, graph_2, s2, n_id_map_2, show_identifier = show_identifier) for n in top_complexes_2]
[_graphviz_add_edge( e, graph_2, s2, n_id_map_2) for e in m_edges.values()]
g.subgraph( s2)
for n1, n2 in m_nodes.iteritems():
g.edge( n_id_map_1[n1], n_id_map_2[n2], dir = "none", style = "dotted", constraint = "false")
g.render( file_name, view = view)
#graphviz_comparison_graph( graph_1, graph_2, m_nodes, m_edges)
#graphviz_comparison_graph( graph_1, graph_2, m_nodes, m_edges, include_context_graph_1 = False)
########################################################################
########################################################################
## graphviz color overlap
def _graphviz_label2( n, graph, n_id_map = {}, id_prefix = "", matched_nodes = set(), participant = False, **kwargs):
""" Generates colored labels condition on whether a node matched """
# choose color
color = kwargs["color"]
fontcolor = kwargs["fontcolor"]
fillcolor = kwargs["fillcolor"]
show_identifier = kwargs["show_identifier"]
if n in matched_nodes:
color = kwargs["matched_color"]
fontcolor = kwargs["matched_fontcolor"]
fillcolor = kwargs["matched_fillcolor"]
# handle different node types (with or without participants etc)
if graph.node[n].get("participants"):
n_id_map[n] = id_prefix + n
label = "<table>%s%s</table>" \
% ("<tr><td port=\"%s\" color=\"%s\" bgcolor=\"%s\"><font color=\"%s\"><b>%s</b></font></td></tr>" % (n, color, fillcolor, fontcolor, graph.node[n]["name"]),
"".join([ _graphviz_label2( p, graph, n_id_map, id_prefix = id_prefix, participant = True, **kwargs) for p in graph.node[n]["participant_ids"]]))
if participant:
return "<tr><td>%s</td></tr>" % label
else:
return "<%s>" % label
elif graph.node[n]["type"] == "species":
n_id_map[n] = id_prefix + n
if participant:
return "<tr><td port=\"%s\" color=\"%s\" bgcolor=\"%s\"><font color=\"%s\">%s</font></td></tr>" % (n, color, fillcolor, fontcolor, graph.node[n]["name"])
else:
label = graph.node[n]["name"]
if show_identifier:
label += "\n" + n
return label
else:
n_id_map[n] = n
label = ", ".join( sbo_go_name(b) for b in graph.node[n]["bqbiol_is"])
if show_identifier:
label += "\n" + n
return label
def _graphviz_add_node2( n, graph, graphviz_graph, n_id_map = {}, matched_nodes = set(),
**kwargs):
# choose color
color = kwargs["color"]
fontcolor = kwargs["fontcolor"]
fillcolor = kwargs["fillcolor"]
if n in matched_nodes:
color = kwargs["matched_color"]
fontcolor = kwargs["matched_fontcolor"]
fillcolor = kwargs["matched_fillcolor"]
# compute label
if graph.node[n].get("participants"):
label = _graphviz_label2( n, graph, n_id_map, id_prefix = n + ":", matched_nodes = matched_nodes, **kwargs)
else:
label = _graphviz_label2( n, graph, n_id_map, matched_nodes = matched_nodes, **kwargs)
if graph.node[n].get("participants"): # has participants
graphviz_graph.node( n, label = label, color = color, fontcolor = fontcolor, fillcolor = fillcolor, shape = "none")
elif graph.node[n]["type"] == "species": # simple species
graphviz_graph.node( n, label = label, shape = "rectangle", color = color, fontcolor = fontcolor, fillcolor = fillcolor, style = "filled")
else: # simple reaction
graphviz_graph.node( n, label = label, shape = "ellipse", color = color, fontcolor = fontcolor, fillcolor = fillcolor, style = "filled")
def graphviz_comparison_graph2( graph_1,
matched_nodes = set(),
matched_edges = set(),
file_name = "test.dot",
view = True,
mode = "only_match", # can be only_match, context, all
matched_color = "red",
matched_fontcolor = "red",
matched_fillcolor = "white",
fontcolor = "grey",
color = "grey",
fillcolor = "white",
show_identifier = False):
""" Visualization of matched nodes and edges using different color (single graph)"""
import graphviz
g = graphviz.Digraph()
participant_complex_map_1 = { id : n for n in graph_1.nodes() if graph_1.node[n].get("participant_ids") for id in graph_1.node[n].get("participant_ids")}
top_complexes_1 = [ get_top_complex( n, graph_1, participant_complex_map_1) for n in matched_nodes]
n_id_map_1 = {}
for n in top_complexes_1:
_graphviz_add_node2( n, graph_1, g, n_id_map_1, matched_nodes,
matched_color = matched_color,
matched_fontcolor = matched_fontcolor,
matched_fillcolor = matched_fillcolor,
fontcolor = fontcolor,
color = color,
fillcolor = fillcolor,
show_identifier = show_identifier)
[_graphviz_add_edge( e, graph_1, g, n_id_map_1, color = matched_color) for e in matched_edges]
if mode == "context":
context_edges = set([ sort_edge_signature((edge[0], edge[1], edge[2]["type"]), graph_1) for edge in graph_1.edges( matched_nodes, data = True)]).difference( matched_edges)
context_nodes_complexes = set([get_top_complex( n, graph_1, participant_complex_map_1) for n in set( [ c for e in context_edges for c in e[:2]]).difference( matched_nodes)]).difference(top_complexes_1)
# add context nodes
[_graphviz_add_node2( n, graph_1, g, n_id_map_1, color = color, fontcolor = fontcolor, fillcolor = fillcolor, show_identifier = show_identifier) for n in context_nodes_complexes]
# add context edges
[_graphviz_add_edge( e, graph_1, g, color = color) for e in set(context_edges).difference( matched_edges)]
elif mode == "all":
all_top_complexes = set( [ get_top_complex( n, graph_1, participant_complex_map_1) for n in set(graph_1.nodes()).difference( matched_nodes)])
all_edges = set([ (edge[0], edge[1], edge[2]["type"]) for edge in graph_1.edges( data = True)]).difference( matched_edges)
# add context nodes
[_graphviz_add_node2( n, graph_1, g, n_id_map_1, color = color, fontcolor = fontcolor, fillcolor = fillcolor, show_identifier = show_identifier) for n in all_top_complexes]
# add context edges
[_graphviz_add_edge( e, graph_1, g, color = color) for e in all_edges]
g.render( file_name, view = view)
#graphviz_comparison_graph2( graph_1, set(m_nodes.keys()), set(m_edges.keys()))
#graphviz_comparison_graph2( graph_1, set(m_nodes.keys()), set(m_edges.keys()), mode = "all")
########################################################################
########################################################################
def subgraph_overlap_graphviz( file_name = "TARGET__NLP-ANN__nm_name_clean_approx_OR_gene_id_intersect_AND_sbo_is_a__edge_match_exact--MAX.pickle"):
""" Creates a single overlap graph from subgraph match results (color based) """
import pickle
[graph_1, graph_2, subgraphs_2, matches_list] = pickle.load( open( file_name, "rb"))
edge_signatures_1 = edge_signatures( graph_1)
edge_signatures_2 = edge_signatures( graph_2)
all_matched_nodes_1 = set()
all_matched_edges_1 = set()
for subgraph_2, matches in zip( subgraphs_2, matches_list):
for m in matches:
all_matched_nodes_1.update( m.keys())
reverse_m = { v: k for k, v in m.iteritems()}
m_edges = subgraph_match_get_edges( subgraph_2, m , reverse_m, edge_signatures_1, edge_signatures_2)
all_matched_edges_1.update( m_edges.keys())
graphviz_comparison_graph2( graph_1, all_matched_nodes_1, all_matched_edges_1, mode = "all", file_name = file_name + ".dot")
def subgraph_overlaps_graphviz( input_file = "results/results-subgraphs-overlap-max__TARGET__NLP-ANN__nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_overlaps_sbo_is_a__edge_match_exact.pickle",
output_file_prefix = "results-subgraphs-overlap-max__TARGET__NLP-ANN__nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_overlaps_sbo_is_a__edge_match_exact",
include_context_graph_1 = True,
ignore_isolated_nodes = True,
graph_1 = None,
graph_2 = None,
show_identifier = False,
reactions_1 = None,
reactions_2 = None,
graph_1_reaction_txt_mapping = None,
graph_2_reaction_txt_mapping = None):
""" Creates many overlap graph from subgraph match results (comparison graph left/right) """
import pickle
[graph_1_f, graph_2_f, subgraphs_2, matches_list] = pickle.load( open( input_file, "rb"))
if graph_1 == None:
graph_1 = graph_1_f
if graph_2 == None:
graph_2 = graph_2_f
if reactions_1 == None:
reactions_1 = set( filter_reactions( graph_1))
if reactions_2 == None:
reactions_2 = set( filter_reactions( graph_2))
edge_signatures_1 = edge_signatures( graph_1)
edge_signatures_2 = edge_signatures( graph_2)
for i, subgraph_2, matches in zip( range(len(subgraphs_2)), subgraphs_2, matches_list):
print( "Processing %i of %i" % (i, len(subgraphs_2)))
if ignore_isolated_nodes and len(subgraph_2.nodes()) < 2:
print( "Ignoring %i of %i" % (i, len(subgraphs_2)))
else:
for j, m_nodes in enumerate( matches):
print( "Processing matches %i of %i" % (j, len(matches)))
reverse_m_nodes = { v: k for k, v in m_nodes.iteritems()}
m_edges = subgraph_match_get_edges( subgraph_2, m_nodes, reverse_m_nodes, edge_signatures_1, edge_signatures_2)
output_file = "%s-%i-%i.dot" % ( output_file_prefix, i, j)
print( "Exporting %s" % output_file)
graphviz_comparison_graph( graph_1,
graph_2,
m_nodes,
m_edges,
file_name = output_file,
view = False,
show_identifier = show_identifier,
include_context_graph_1 = include_context_graph_1)
if graph_1_reaction_txt_mapping:
output_file = "%s-%i-%i-target.txt" % ( output_file_prefix, i, j)
print( "Exporting %s" % output_file)
m_reactions_1 = reactions_1.intersection( m_nodes.keys())
open( output_file, "wt").write( "\n".join( [graph_1_reaction_txt_mapping[r] for r in m_reactions_1 if r in graph_1_reaction_txt_mapping]))
if graph_2_reaction_txt_mapping:
output_file = "%s-%i-%i-nlp.txt" % ( output_file_prefix, i, j)
print( "Exporting %s" % output_file)
m_reactions_2 = reactions_2.intersection( m_nodes.values())
open( output_file, "wt").write( "\n".join( [graph_2_reaction_txt_mapping[r] for r in m_reactions_2 if r in graph_2_reaction_txt_mapping]))
########################################################################
########################################################################
## overlap SBML
def _sbml_color_all( root, color_lines = "90000000", color_bounds = "00000000"):
namespaces = {"cd" : "http://www.sbml.org/2001/ns/celldesigner", "sbml" : "http://www.sbml.org/sbml/level2"}
for line in root.xpath("//cd:line", namespaces = namespaces):
line.set( "color", color_lines)
for paint in root.xpath("//cd:paint", namespaces = namespaces):
paint.set( "color", color_bounds)
def _sbml_color_reaction( root, reaction_id, color = "ffff0000", width = "1.0"):
""" colors the reaction links to reactant and product"""
namespaces = {"cd" : "http://www.sbml.org/2001/ns/celldesigner", "sbml" : "http://www.sbml.org/sbml/level2"}
lines = root.xpath("//sbml:reaction[@id='%s']/sbml:annotation/cd:line" % reaction_id, namespaces = namespaces)
assert( len(lines) == 1)
lines[0].set( "color", color)
lines[0].set( "width", width)
def _sbml_color_reaction_modifier( root, reaction_id, modifier_id, color = "ffff0000", width = "1.0"):
namespaces = {"cd" : "http://www.sbml.org/2001/ns/celldesigner", "sbml" : "http://www.sbml.org/sbml/level2"}
lines = root.xpath("//sbml:reaction[@id='%s']/sbml:annotation/cd:listOfModification/cd:modification[@aliases='%s']/cd:line" % (reaction_id, modifier_id), namespaces = namespaces)
if len(lines) == 1:
lines[0].set( "color", color)
lines[0].set( "width", width)
else:
print( "_sbml_color_reaction_modifier:Ignoring %s/%s" % (reaction_id, modifier_id))
def _sbml_color_species( root, species_id, color = "ffff0000"):
namespaces = {"cd" : "http://www.sbml.org/2001/ns/celldesigner", "sbml" : "http://www.sbml.org/sbml/level2"}
paints = root.xpath( "//cd:speciesAlias[@id='%s']//cd:paint" % species_id, namespaces = namespaces) \
or root.xpath( "//cd:complexSpeciesAlias[@id='%s']//cd:paint" % species_id, namespaces = namespaces)
assert( len(paints) > 0)
[ p.set( "color", color) for p in paints]
def subgraph_overlaps_sbml( graph_1,
matches_nodes_1 = set(),
matches_edges_1 = set(),
inn = 'mTORPathway-celldesigner.xml',
out = 'mTORPathway-celldesigner-color.xml',
background_color_bounds = "00000000",
background_color_lines = "000000",
matched_color = "FF00FF00",
matched_line_width = "2.0"):
""" Visualization of matched species and reactions using different color (single graph)"""
print( "sbml_color_matched:Loading %s" % inn)
tree = lxml.etree.parse( inn);
root = tree.getroot()
print( "subgraph_overlaps_sbml:Coloring background")
_sbml_color_all( root, color_bounds = background_color_bounds, color_lines = background_color_lines)
# color species
print( "subgraph_overlaps_sbml:Coloring matched species")
for n in set( filter_species( graph_1).nodes()).intersection( matches_nodes_1):
_sbml_color_species( root, n, color = matched_color)
print( "subgraph_overlaps_sbml:Coloring matched reactions")
matched_reactions = set( filter_reactions( graph_1).nodes()).intersection( matches_nodes_1)
modifier_edges = filter( lambda e: e[2] == "modifier", matches_edges_1)
matched_modifiers = { r : [e[0] for e in modifier_edges if e[1] == r] for r in matched_reactions}
for r in matched_reactions:
_sbml_color_reaction( root, r, color = matched_color, width = matched_line_width)
for m in matched_modifiers[r]:
_sbml_color_reaction_modifier( root, r, m, color = matched_color, width = matched_line_width)
print( "subgraph_overlaps_sbml:Outputting %s" % out)
tree.write( out, encoding='utf-8', xml_declaration = True)
########################################################################
########################################################################
# initialize
def initialize():
global SBO_NODES, GENE_MAP, SIMSTRING_DB
print( "Initializing networkx_analysis.py")
print( "Loading SBO")
SBO_NODES = pickle.load( open( "sbo.pickle", "rb"))
print( "Loading GENE_MAP")
GENE_MAP = pickle.load( open( "gene_map.pickle", "rb"))
print( "Loading SIMSTRING_DB")
SIMSTRING_DB = simstring.reader( 'gene_list.simstring')
SIMSTRING_DB.measure = simstring.cosine
SIMSTRING_DB.threshold = 0.9
def load_pathway( name,
input_file,
output_file,
output_file_participant_graph = None,
output_file_w_participant_edges = None,
ending = ".xml",
pickle_graph = True,
prefix = ""):
# load data
print( "Loading %s" % (input_file))
sbml = load_sbml( input_file)
model = sbml.getModel();
if model == None:
print( "Error loading %s" % (input_file))
return;
graph, participant_graph, graph_w_participant_edges = create_graph( model, prefix = prefix)
graph.name = name
graph.source_file_name = input_file
graph.file_name = output_file
if pickle_graph == True:
print( "Saving networkx as " + output_file)
pickle_output_file = output_file
networkx.write_gpickle( graph, pickle_output_file)
if output_file_participant_graph:
print( "Saving participant_graph networkx as " + output_file_participant_graph)
networkx.write_gpickle( participant_graph, output_file_participant_graph)
if output_file_w_participant_edges:
print( "Saving graph with participant edges networkx as " + output_file_w_participant_edges)
networkx.write_gpickle( graph_w_participant_edges, output_file_w_participant_edges)
return graph, participant_graph, graph_w_participant_edges
########################################################################
########################################################################
# PROCESSING SIGNATURES
def run_analysis_bqbiol_is_signatures( bqbiol_is_1, bqbiol_is_2,
name_1 = "name_1", name_2 = "name_2", type = "species",
equal_fns = [operator.eq]):
bqbiol_is_terms_set_1 = set( [b for t in bqbiol_is_1 for b in t])
bqbiol_is_set_1 = set(bqbiol_is_1)
bqbiol_is_terms_set_2 = set( [b for t in bqbiol_is_2 for b in t])
bqbiol_is_set_2 = set(bqbiol_is_2)
data = []
res_1, res_2, precision, recall, f_score = analyse_set_overlap( bqbiol_is_terms_set_1, bqbiol_is_terms_set_2)
print("%s:%s/%s:%s unique bqbiol_is terms equal: %.2f & %.2f & %.2f precision/recall/fscore" % (now(), name_1, name_2, type,
precision, recall, f_score))
data.append({ "graph_1" : name_1, "graph_2" : name_2, "unique" : True, "type" : type,
"reduction" : "bqbiol_is terms", "eq" : "eq",
"precision" : precision, "recall" : recall, "f-score" : f_score})
for eq_fun in equal_fns:
res_1, res_2, precision, recall, f_score = analyse_set_overlap( bqbiol_is_set_1, bqbiol_is_set_2, eq_fun)
print("%s:%s/%s:%s unique bqbiol_is signatures %s: %.2f & %.2f & %.2f precision/recall/fscore" % (now(), name_1, name_2, type, eq_fun.__name__,
precision, recall, f_score))
data.append({ "graph_1" : name_1, "graph_2" : name_2, "unique" : True, "type" : type,
"reduction" : "bqbiol_is signatures", "eq" : eq_fun.__name__,
"precision" : precision, "recall" : recall, "f-score" : f_score})
res_1, res_2, precision, recall, f_score = analyse_list_overlap( bqbiol_is_1, bqbiol_is_2, eq_fun)
print("%s:%s/%s:%s bqbiol_is signatures %s: %.2f & %.2f & %.2f precision/recall/fscore" % (now(), name_1, name_2, type, eq_fun.__name__,
precision, recall, f_score))
data.append({ "graph_1" : name_1, "graph_2" : name_2, "unique" : False, "type" : type,
"reduction" : "bqbiol_is signatures", "eq" : eq_fun.__name__,
"precision" : precision, "recall" : recall, "f-score" : f_score})
return data
def run_analysis_species_signatures( graph_1, graph_2, species_1 = None, species_2 = None):
import pandas
print("%s:%s/%s:run_analysis_species_signatures" % (now(), graph_1.name, graph_2.name))
if species_1 == None:
print("%s:%s/%s:run_analysis_species_signatures:filtering species graph_1" % (now(), graph_1.name, graph_2.name))
species_1 = filter_species( graph_1)
if species_2 == None:
print("%s:%s/%s:run_analysis_species_signatures:filtering species graph_2" % (now(), graph_1.name, graph_2.name))
species_2 = filter_species( graph_2)
data = []
print("%s:%s/%s:run_analysis_species_signatures:names" % (now(), graph_1.name, graph_2.name))
for reduction_fun, equality_fn in zip( [clean_name, clean_name2, clean_name2], [operator.eq, operator.eq, name_approx_equal]):
source_target = ([ reduction_fun( graph_1.node[n]["name"]) for n in species_1],
[ reduction_fun( graph_2.node[n]["name"]) for n in species_2])
res_1, res_2, precision, recall, f_score = analyse_set_overlap( set(source_target[0]), set(source_target[1]), equality_fn)
print("%s:%s/%s: species unique overlap %s/%s: %.2f & %.2f & %.2f precision/recall/fscore" % (now(), graph_1.name, graph_2.name, reduction_fun.__name__, equality_fn.__name__,
precision, recall, f_score))
data.append({ "graph_1" : graph_1.name, "graph_2" : graph_2.name, "unique" : True, "type" : "species",
"reduction" : reduction_fun.__name__, "eq" : equality_fn.__name__,
"precision" : precision, "recall" : recall, "f-score" : f_score})
res_1, res_2, precision, recall, f_score = analyse_list_overlap( source_target[0], source_target[1], equality_fn)
print("%s:%s/%s: species overlap %s/%s: %.2f & %.2f & %.2f precision/recall/fscore" % (now(), graph_1.name, graph_2.name, reduction_fun.__name__, equality_fn.__name__,
precision, recall, f_score))
data.append({ "graph_1" : graph_1.name, "graph_2" : graph_2.name, "unique" : False, "type" : "species",
"reduction" : reduction_fun.__name__, "eq" : equality_fn.__name__,
"precision" : precision, "recall" : recall, "f-score" : f_score})
# BQBIOL_IS
print("%s:%s/%s:run_analysis_species_signatures:running bqbiol_is" % (now(), graph_1.name, graph_2.name))
data.extend( run_analysis_bqbiol_is_signatures( bqbiol_is_1 = [ graph_1.node[n]["bqbiol_is"] for n in species_1],
bqbiol_is_2 = [ graph_2.node[n]["bqbiol_is"] for n in species_2],
name_1 = graph_1.name, name_2 = graph_2.name, type = "species",
equal_fns = [ tuple_eq_empty_not_eq, tuple_overlaps]))
data_p = | pandas.DataFrame(data) | pandas.DataFrame |
"""
calcimpy
Input impedance calculation program for air column ( wind instruments ).
"""
import argparse
import sys
import os.path
import numpy as np
import pandas as pd
import xmensur as xmn
import imped
__version__ = '1.1.0'
def main():
parser = argparse.ArgumentParser(description='calcimpy : input impedance calculation for air column')
parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(__version__))
parser.add_argument('-m', '--minfreq', default='0.0', help='minimum frequency to calculate, default 0 Hz.')
parser.add_argument('-M', '--maxfreq', default='2000.0', help='maximum frequency to calculate, default 2000 Hz.')
parser.add_argument('-s', '--stepfreq', default='2.5', help='step frequency for calculation, default 2.5 Hz.')
parser.add_argument('-t', '--temperature', default='24.0', help='air temperature, default 24 celsius.')
parser.add_argument('-R', '--radiation', choices=['PIPE', 'BAFFLE', 'NONE'], default='PIPE', help='type of calculation of radiation, default PIPE.')
parser.add_argument('-o', '--output', default='', help='output filename, stdout is used when "-"')
parser.add_argument('filepath')
args = parser.parse_args()
path = args.filepath
if path:
# read mensur file here
mentop = xmn.read_mensur_file(path)
# set calculation conditions
imped.set_params(temperature=float(args.temperature), minfreq=float(args.minfreq),
maxfreq=float(args.maxfreq), stepfreq=float(args.stepfreq), rad=args.radiation)
nn = (imped._Mf - imped._mf)/imped._sf + 1
ff = np.linspace(imped._mf, imped._Mf, nn, endpoint=True)
wff = np.pi*2*ff
# set file output
if args.output == '-':
fout = sys.stdout
elif args.output == '':
# default *.imp
rt, ext = os.path.splitext(path)
fout = open(rt + '.imp', 'w')
else:
fout = open(args.output, 'w')
s = mentop.df*mentop.df*np.pi/4 # section area
zz = [s * imped.input_impedance(frq, mentop) for frq in wff]
zr = np.real(zz)
zi = np.imag(zz)
mg = [0 if z == 0 else 20*np.log10(np.abs(z)) for z in zz]
dt = | pd.DataFrame() | pandas.DataFrame |
from collections import defaultdict
import glob
import os
import pickle
import re
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from common.utils import METHOD_NAME, get_latest_folder, load_compressed_pickle, mysavefig
from games.maze.maze_game import MazeGame
from games.maze.maze_level import MazeLevel
from metrics.rl.tabular.rl_agent_metric import RLAgentMetric
from metrics.rl.tabular.rl_difficulty_metric import RLDifficultyMetric
from novelty_neat.maze.neat_maze_level_generation import GenerateMazeLevelsUsingTiling
import seaborn as sns
def pretty_key(k):
if 'time' in k:
return ' '.join(map(str.title, k.split("_"))) + " (s)"
# Thanks :) https://stackoverflow.com/a/37697078
splitted = re.sub('([A-Z][a-z]+)', r' \1', re.sub('([A-Z]+)', r' \1', k)).split()
return ' '.join(splitted)
def analyse_104_with_line_graph():
"""
This plots a line graph of experiment 104, as well as using some data from experiment 107.
The x-axis will be level size and the y-axis the metrics, specifically time and maybe some others.
"""
data = load_compressed_pickle(get_latest_folder('../results/experiments/104b/runs/*/data.pbz2'))
def get_mean_standard_for_one_point_in_directga(width, mean_dic, std_dic):
path = f'../results/experiments/experiment_107_a/Maze/DirectGA/2021-10-25_20-03-03/{width}/*/*/*/*/*/*/*.p'
li = glob.glob(path)
print(len(li), path)
assert len(li) == 5
metrics = defaultdict(lambda: [])
all_levels = []
for p in li:
with open(p, 'rb') as f:
d = pickle.load(f)
for key in d['eval_results_single']:
metrics[key].append(d['eval_results_single'][key])
for key in ['generation_time']:
# DON'T divide by 100 here, as this was for 1 level. The experiment.py already normalised it.
metrics[key].append(d[key])
all_levels.append(d['levels'][0])
dir = f'results/maze/104/line_graph/levels_direct_ga'
for i, l in enumerate(all_levels):
os.makedirs(dir, exist_ok=True)
plt.figure(figsize=(20, 20))
plt.imshow(1 - l.map, cmap='gray', vmin=0, vmax=1)
plt.axis('off')
mysavefig(os.path.join(dir, f'{width}-{i}.png'), pad_inches=0.1, bbox_inches='tight')
plt.close()
print("Direct ", metrics.keys())
for key in metrics:
metrics[key] = np.array(metrics[key])
mean_dic[key].append(np.mean(metrics[key]))
std_dic[key].append(np.std(metrics[key]))
D = data['data']
# D[14] = data['original']
fs = data['files']
og_metrics = defaultdict(lambda: 0)
for T in data['original']:
things = T['eval_results_single']
for key in things:
og_metrics[key] += np.mean(things[key])
for key in og_metrics:
og_metrics[key] /= len(fs)
all_metrics = {
# 14: og_metrics
}
all_values_mean = defaultdict(lambda : [])
all_values_std = defaultdict(lambda : [])
all_values_mean_direct_ga = defaultdict(lambda : [])
all_values_std_direct_ga = defaultdict(lambda : [])
directga_widths = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
for w in directga_widths:
get_mean_standard_for_one_point_in_directga(w, all_values_mean_direct_ga, all_values_std_direct_ga)
widths = []
the_keys_to_use = sorted(D.keys())
for width in the_keys_to_use:
levels_to_plot = []
metrics = defaultdict(lambda: [])
widths.append(width)
for d in D[width]:
levels_to_plot.append(d['levels'][0])
for key in d['eval_results_single']:
metrics[key].append(d['eval_results_single'][key])
for key in ['generation_time']:
if width != 14:
# for 14, it was measured properly.
# the values in here were for all levels, so we norm it to one level.
metrics[key].append(d[key] / 100)
else:
metrics[key].append(d[key])
for key in metrics:
metrics[key] = np.array(metrics[key])
all_values_mean[key].append(np.mean(metrics[key]))
all_values_std[key].append(np.std(metrics[key]))
dir = 'results/maze/104/line_graph/levels'
os.makedirs(dir, exist_ok=True)
for i, l in enumerate(levels_to_plot):
l: MazeLevel
plt.figure(figsize=(20, 20))
l.show(True)
plt.axis('off')
mysavefig(os.path.join(dir, f'{width}-{i}.png'), pad_inches=0.1, bbox_inches='tight')
plt.close()
metrics_to_plot = [
'generation_time',
'SolvabilityMetric',
'CompressionDistanceMetric',
'AStarDiversityMetric',
'AStarDifficultyMetric',
'AStarEditDistanceDiversityMetric'
]
print("KEYS: ", all_values_mean.keys())
sns.set_theme()
for key in metrics_to_plot:
all_values_mean[key] = np.array(all_values_mean[key])
all_values_std[key] = np.array(all_values_std[key])
all_values_mean_direct_ga[key] = np.array(all_values_mean_direct_ga[key])
all_values_std_direct_ga[key] = np.array(all_values_std_direct_ga[key])
plt.figure()
try:
plt.plot(widths, all_values_mean[key], label=METHOD_NAME)
plt.fill_between(widths, all_values_mean[key] - all_values_std[key], all_values_mean[key] + all_values_std[key], alpha=0.5)
except Exception as e:
print("ERROR", e)
if len(all_values_mean_direct_ga[key]) == 0:
print(f"KEY = {key} does not have data for DirectGA")
else:
plt.plot(directga_widths, all_values_mean_direct_ga[key], label='DirectGA+')
plt.fill_between(directga_widths, all_values_mean_direct_ga[key] - all_values_std_direct_ga[key], all_values_mean_direct_ga[key] + all_values_std_direct_ga[key], alpha=0.5)
plt.xlabel("Level Width = Height")
pkey = pretty_key(key).replace("Metric", '').strip()
plt.ylabel(pkey)
plt.title(f"Comparing {pkey} vs Level Size. Higher is better.")
if 'time' in key.lower():
plt.title(f"Comparing {pkey} vs Level Size. Lower is better.")
plt.scatter([14, 20], [40000, 70000], marker='x', color='red', label='PCGRL (Turtle)')
plt.yscale('log')
plt.tight_layout()
# plt.show()
plt.legend()
mysavefig(f'results/maze/104/line_graph/{key}.png', bbox_inches='tight', pad_inches=0.05)
df = | pd.DataFrame(all_metrics) | pandas.DataFrame |
import tempfile
import unittest
import numpy as np
import pandas as pd
from airflow import DAG
from datetime import datetime
from mock import MagicMock, patch
import dd.api.workflow.dataset
from dd import DB
from dd.api.workflow.actions import Action
from dd.api.workflow.sql import SQLOperator
dd.api.workflow.dataset.is_ipython = lambda: True
dd.api.workflow.actions.is_ipython = lambda: True
from dd.api.contexts.distributed import AirflowContext
from dd.api.workflow.dataset import Dataset, DatasetLoad, DatasetTransformation
class TestDataset(unittest.TestCase):
def setUp(self):
self.workflow = MagicMock(spec_set=DAG("test_workflow", start_date=datetime.now()))
self.workflow.dag_id = "mock"
self.db = MagicMock()
self.db.query.result_value = None
def test_creating_dataset_should_add_task_to_workflow(self):
# Given
workflow = self.workflow
db = self.db
# When
_ = AirflowContext(workflow, db).create_dataset("table")
# Assert
workflow.add_task.assert_called_once()
def test_apply_method_should_run(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
expected_result1 = pd.DataFrame([[np.nan, 7], [6, 7]])
# With a function with only args
def my_apply_function(indf, arg1, arg2, arg3):
self.assertEqual(arg1, 1)
self.assertEqual(arg2, 2)
self.assertEqual(arg3, 3)
odf = indf.applymap(lambda t: t + arg1 + arg2 + arg3)
self.assertTrue(odf.equals(expected_result1))
# When a valid execution
new_action = dataset.apply(my_apply_function, 1, 2, 3)
# Assert
self.assertFalse(new_action.executed)
new_action.execute()
def test_apply_method_should_raise_when_invalid_number_args(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
# With a function with only args
def my_apply_function(indf, arg1, arg2, arg3):
pass
# When
new_action = dataset.apply(my_apply_function, 1, 2)
# Assert
self.assertFalse(new_action.executed)
with self.assertRaises(TypeError) as context:
new_action.execute()
possible_exceptions = ["my_apply_function() missing 1 required positional argument: 'arg3'", # msg Python 3
"my_apply_function() takes exactly 4 arguments (3 given)"] # msg Python 2
self.assertIn(str(context.exception), possible_exceptions)
# When
new_action = dataset.apply(my_apply_function)
# Assert
self.assertFalse(new_action.executed)
with self.assertRaises(TypeError) as context:
new_action.execute()
possible_exceptions = ["my_apply_function() missing 3 required positional arguments: 'arg1', 'arg2', and 'arg3'", # msg Python 3
"my_apply_function() takes exactly 4 arguments (1 given)"] # msg Python 2
self.assertIn(str(context.exception), possible_exceptions)
def test_transform_method_should_return_new_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
new_dataset = dataset.transform(lambda x: x)
# Assert
self.assertIsNot(new_dataset, dataset)
self.assertIsInstance(new_dataset, Dataset)
def test_transform_method_should_handle_optional_kwargs(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
dataset2 = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
expected_result1 = pd.DataFrame([[np.nan, 2], [1, 2]])
# With a function with only args
def my_transform_function(indf, df2, arg1=0):
return indf.applymap(lambda t: t + arg1)
# When
new_dataset = dataset.transform(my_transform_function,
arg1=1,
output_table="mytable",
datasets=[dataset2],
write_options=dict(if_exists="replace"))
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
# Finally
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result1
))
self.assertTrue(new_dataset.output_table == "mytable")
def test_transform_method_should_raise_when_invalid_number_args(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
expected_result1 = pd.DataFrame([[np.nan, 4], [3, 4]])
# With a function with only args
def my_transform_function(indf, arg1, arg2, arg3):
return indf.applymap(lambda t: t + arg1 + arg2 + arg3)
# When
new_dataset = dataset.transform(my_transform_function, 1, 2)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
with self.assertRaises(TypeError) as context:
new_dataset.execute()
possible_exceptions = ["my_transform_function() missing 1 required positional argument: 'arg3'", # msg Python 3
"my_transform_function() takes exactly 4 arguments (3 given)"] # msg Python 2
self.assertIn(str(context.exception), possible_exceptions)
# When
new_dataset = dataset.transform(my_transform_function)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
with self.assertRaises(TypeError) as context:
new_dataset.execute()
possible_exceptions = ["my_transform_function() missing 3 required positional arguments: 'arg1', 'arg2', and 'arg3'", # msg Python 3
"my_transform_function() takes exactly 4 arguments (1 given)"] # msg Python 2
self.assertIn(str(context.exception), possible_exceptions)
# When
new_dataset = dataset.transform(my_transform_function, 1, 1, 1)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
# Finally
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result1
))
def test_transform_method_should_handle_args_kwargs(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
expected_result1 = pd.DataFrame([[np.nan, 2], [1, 2]])
expected_result2 = pd.DataFrame([[np.nan, 3], [2, 3]])
# With a function with arg and kwargs
def mytransfun(indf, myarg1, mynamedarg1=1):
return indf.applymap(lambda t: t + myarg1 - mynamedarg1)
# When
new_dataset = dataset.transform(mytransfun, 2)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result1
))
# When
new_dataset = dataset.transform(mytransfun, 2, mynamedarg1=0)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result2
))
def test_transform_method_should_apply_function_to_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
dataset2 = context.create_dataset("table")
expected_result1 = pd.DataFrame([[np.nan, 2], [1, 2]])
expected_result2 = pd.DataFrame([[0.0, 1], [0.0, 1]])
# When
new_dataset = dataset.transform(lambda x: x.applymap(lambda t: t + 1))
new_dataset2 = dataset2.transform(lambda df: df.fillna(0))
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertIsNone(new_dataset2.dataframe)
self.assertFalse(new_dataset.executed)
self.assertFalse(new_dataset2.executed)
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result1
))
new_dataset2.execute()
new_dataset2.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result2
))
def test_transform_method_should_be_able_to_process_multiple_datasets(
self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset1 = context.create_dataset("table")
dataset2 = context.create_dataset("table")
mock_function = MagicMock()
mock_function.__name__ = "mock"
new_dataset = dataset1.transform(mock_function, datasets=[dataset2])
# When
new_dataset.execute()
new_dataset.collect()
# Check
args, kwargs = mock_function.call_args
self.assertTrue(args[0], dataset1)
self.assertTrue(args[1], dataset2)
def test_collect_should_return_dataframe_attribute_when_non_empty(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
initial_dataframe = pd.DataFrame([[0.0, 1], [0.0, 1]])
dataset.dataframe = initial_dataframe
# When
dataframe = dataset.collect()
# Assert
self.assertIsInstance(dataframe, pd.DataFrame)
self.assertTrue(dataframe.equals(initial_dataframe))
def test_collect_should_call_db_retrieve_table_when_empty(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
output_table = "output_table"
dataset.output_table = output_table
# When
dataset.collect()
# Assert
self.db.retrieve_table.assert_called_once_with(output_table)
def test_split_train_test_should_return_two_datasets(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
train, test = dataset.split_train_test()
# Assert
self.assertIsInstance(train, Dataset)
self.assertIsInstance(test, Dataset)
def test_join_should_return_new_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset_left = context.create_dataset("table")
dataset_right = context.create_dataset("table")
# When
join = dataset_left.join(dataset_right)
# Check
self.assertIsInstance(join, Dataset)
def test_execute_should_call_operator_execute_once(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table").transform(lambda x: x)
dataset.operator = MagicMock()
# When
dataset.execute()
dataset.execute()
# Check
dataset.operator.execute.assert_called_once()
def test_execute_with_force_should_call_operator_execute_twice(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table").transform(lambda x: x)
dataset.operator = MagicMock()
# When
dataset.execute()
dataset.execute(force=True)
# Check
self.assertEqual(dataset.operator.execute.call_count, 2)
def test_execute_when_operator_is_DDOperator_should_return_resulted_dataframe_from_operator_get_result(self):
# Given
dataset = Dataset(MagicMock(), 'output')
dataset.executed = False
dataset.operator = MagicMock()
dataset.operator.execute = lambda: 'output_table'
dataset.operator.get_result = lambda: 'Dataframe'
dataset.operator.set_upstream = None
# When
result = dataset.execute()
# Check
self.assertEqual(result, 'Dataframe')
def test_transform_with_if_exists_should_append_to_existing_table(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
new_dataset = dataset.transform(lambda x: x,
write_options=dict(if_exists="append"))
# When
new_dataset.execute()
# Check
self.assertIn("if_exists", self.db.import_dataframe.call_args[1])
self.assertEqual(self.db.import_dataframe.call_args[1]["if_exists"],
"append")
def test_select_columns_should_create_new_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
new_dataset = dataset.select_columns(["foo", "bar"])
# Check
self.assertIsInstance(new_dataset, Dataset)
self.assertIsNot(new_dataset, dataset)
def test_default_is_cached_should_match_context_auto_persistence(self):
# Given
persisted_context = MagicMock()
persisted_context.auto_persistence = True
unpersisted_context = MagicMock()
unpersisted_context.auto_persistence = False
# When
persisted_dataset = Dataset(persisted_context, "foo")
unpersisted_dataset = Dataset(unpersisted_context, "bar")
# Check
self.assertTrue(persisted_dataset.is_cached)
self.assertFalse(unpersisted_dataset.is_cached)
def test_is_cached_attribute_may_be_set_by_cache_method(self):
# Given
context = MagicMock()
context.auto_persistence = False
dataset = Dataset(context, "foo")
# When
dataset.cache()
# Check
self.assertTrue(dataset.is_cached)
# Then when
dataset.cache(boolean=False)
# Check
self.assertFalse(dataset.is_cached)
def test_memory_usage_returns_integer(self):
# Given
context = MagicMock()
context.auto_persistence = False
dataset = Dataset(context, "foo")
# When
usage = dataset.memory_usage
# Check
self.assertIsInstance(usage, int)
def test_providing_output_table_in_select_columns_must_set_output_table(
self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
new_dataset = dataset.select_columns(["foo", "bar"],
output_table="myoutput.table")
# Check
self.assertEqual(new_dataset.output_table, "myoutput.table")
def test_sql_query_should_return_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
new_dataset = dataset.sql.query("SELECT * FROM foo.bar")
# Check
self.assertIsInstance(new_dataset, Dataset)
def test_sql_query_should_call_db_query(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
qw = dataset.sql.query("SELECT * FROM foo.bar")
qw.execute() # In airflow context we force execution
qw.head()
# Check
self.db.query.assert_called_once_with("SELECT * FROM foo.bar")
def test_sql_execute_should_return_action(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
action = dataset.sql.execute("SELECT * FROM foo.bar")
# Check
self.assertIsInstance(action, Action)
def test_sql_execute_should_call_db_execute(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
action = dataset.sql.execute("SELECT * FROM foo.bar")
# When
action.execute(force=True)
# Check
self.db.execute.assert_called_once_with("SELECT * FROM foo.bar")
def test_apply_should_return_action(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
mock_function = MagicMock()
mock_function.__name__ = "mock"
# When
result = dataset.apply(mock_function)
# Check
self.assertIsInstance(result, Action)
def test_sql_should_be_SQLOperator(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
result = dataset.sql
# Check
self.assertIsInstance(result, SQLOperator)
def test_sql_should_have_same_context(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
result = dataset.sql
# Check
self.assertIs(result.context, dataset.context)
def test_multitransform_method_should_allow_multiple_output_datasets(self):
# Given
with tempfile.NamedTemporaryFile() as tmp:
workflow = DAG("test_workflow", start_date=datetime.now())
db = DB(dbtype='sqlite', filename=tmp.name)
ctx = AirflowContext(workflow, db)
# given
df = pd.DataFrame([[np.nan, 2], [1, 2]])
df.columns = map(lambda x: "num_" + str(x), df.columns)
expected_result2 = pd.DataFrame([[np.nan, 3], [2, 3]])
expected_result2.columns = map(lambda x: "num_" + str(x), expected_result2.columns)
db.import_dataframe(df, "test_num", index=False)
dataset = ctx.table("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
# then
self.assertIsNone(new_df1.dataframe)
self.assertFalse(new_df1.executed)
self.assertIsNone(new_df2.dataframe)
self.assertFalse(new_df2.executed)
# finally
new_df1.execute()
# same result
odf1 = new_df1.collect()
odf2 = new_df2.collect()
pd.testing.assert_frame_equal(odf1, df)
pd.testing.assert_frame_equal(odf2, expected_result2)
def test_multitransform_should_handle_column_method(self):
# Given
ctx = self._get_airflow_context()
ctx.db.import_dataframe(pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"]),
"test_num", index=False)
dataset = ctx.create_dataset("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
new_df1.execute()
# then columns must be equal
new_df1_cols = list(new_df1.columns)
new_df2_cols = list(new_df2.columns)
self.assertEqual(new_df1_cols, new_df2_cols)
def test_multitransform_should_handle_shape_method(self):
# Given
ctx = self._get_airflow_context()
ctx.db.import_dataframe(pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"]),
"test_num", index=False)
dataset = ctx.create_dataset("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
new_df1.execute()
# then shapes must be equal
new_df1_sh = new_df1.shape
new_df2_sh = new_df2.shape
self.assertEqual(new_df1_sh, new_df2_sh)
def test_multitransform_should_handle_memory_usage_method(self):
# Given
ctx = self._get_airflow_context()
ctx.db.import_dataframe(pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"]),
"test_num", index=False)
dataset = ctx.create_dataset("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
new_df1.execute()
# then memory usage must be equal
mu1 = new_df1.memory_usage
mu2 = new_df2.memory_usage
self.assertEqual(mu1, mu2)
def test_multitransform_should_handle_head_method(self):
# Given
ctx = self._get_airflow_context()
df = pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"])
ctx.db.import_dataframe(df,
"test_num", index=False)
dataset = ctx.create_dataset("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
new_df1.execute()
# then head must be equal
pd.testing.assert_frame_equal(new_df1.head(2), df.head(2))
pd.testing.assert_frame_equal(new_df2.head(2), df.head(2) + 1)
def test_multitransform_should_handle_sql_operator(self):
# Given
ctx = self._get_airflow_context()
df = pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"])
ctx.db.import_dataframe(df,
"test_num", index=False)
dataset = ctx.create_dataset("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
new_df1.execute()
result = ctx.db.read_sql("select * from odf1")
# then dataframe must be equal
pd.testing.assert_frame_equal(result, df)
def test_multitransform_should_handle_join_method(self):
# Given
ctx = self._get_airflow_context()
df = | pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"]) | pandas.DataFrame |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# 从TDX磁盘空间读取数据
import os
import re
from datetime import time
import pandas as pd
from pandas import DataFrame
from pytdx.reader import TdxDailyBarReader, TdxExHqDailyBarReader, TdxLCMinBarReader, BlockReader, GbbqReader
from czsc.Data.code_classify import sse_code_classify, szse_code_classify
from czsc.Setting import TDX_DIR
from czsc.Utils import util_log_info
from czsc.Data.frequency import parse_frequency_str
from czsc.Data.resample import resample_from_daily_data
from czsc.Utils.trade_date import util_get_next_day
_SH_DIR = '{}{}{}'.format(TDX_DIR, os.sep, 'vipdoc\\sh')
_SZ_DIR = '{}{}{}'.format(TDX_DIR, os.sep, 'vipdoc\\sz')
_DS_DIR = '{}{}{}'.format(TDX_DIR, os.sep, 'vipdoc\\ds')
def _get_sh_sz_list():
"""
读取上海深圳交易所行情目录的文件列表,并对市场,品种和代码分类
sh000015.day 期货 ('28', 'AP2003')
'sse' # 上海证券交易所 sh 6位数字代码
前两位 "60" A股
"90" B股
"00", "88", "99" 指数
"50", "51" 基金
"01", "10", "11", "12", "13", "14" 债券,和深圳有重合
110 可转债 对应股票代码 600
111 601
113 可转债 对应股票代码 603 沪市中小板
118 可转债 科创板
'szse' # 深圳证券交易所 sz 6位数字代码
前两位 "00", "30" A股
"20"
"39" 指数
"15", "16" 基金
"10", "11", "12", "13", "14" 债券,和深圳有重合
123 可转债 对应股票代码 300
128 可转债 对应股票代码 002
127 可转债 对应股票代码 000
pattern = "^(?P<tdx_code>[shz]{2})#(?P<code>\d{6})\.day"
"""
sh_dir = '{}{}{}'.format(_SH_DIR, os.sep, 'lday')
sh_list = os.listdir(sh_dir)
pattern = "^(?P<tdx_code>sh)(?P<code>\d{6})\.day"
data = [re.match(pattern, x) for x in sh_list]
try:
sh_df = pd.DataFrame([x.groupdict() for x in data])
except:
util_log_info("{} can't be analyzed by pattern ({}) }".format(_SH_DIR, pattern))
return None
sh_df['exchange'] = 'sse'
sh_df['instrument'] = sh_df.code.apply(sse_code_classify)
sz_dir = '{}{}{}'.format(_SZ_DIR, os.sep, 'lday')
sz_list = os.listdir(sz_dir)
pattern = "^(?P<tdx_code>sz)(?P<code>\d{6})\.day"
data = [re.match(pattern, x) for x in sz_list]
try:
sz_df = pd.DataFrame([x.groupdict() for x in data])
except:
util_log_info("{} can't be analyzed by pattern ({}) }".format(_SZ_DIR, pattern))
return None
sz_df['exchange'] = 'szse'
sz_df['instrument'] = sz_df.code.apply(szse_code_classify)
sz_df['filename'] = sz_list
sz_df['last_modified'] = sz_df['filename'].apply(lambda x: os.path.getmtime(os.path.join(sz_dir, x)))
sh_df['filename'] = sh_list
sh_df['last_modified'] = sh_df['filename'].apply(lambda x: os.path.getmtime(os.path.join(sh_dir, x)))
return pd.concat([sh_df, sz_df])
def _get_ds_list():
"""
读取扩展行情目录的文件列表,并对市场,品种和代码分类
47#TS2009.day 期货 ('28', 'AP2003')
7#IO760795.day 期权 ('7', 'IO760795')
5#V 7C0D49.day 期权 中间有空格,特殊处理
102#980001.day 102 国证指数
pattern = "^(?P<tdx_code>\d{1,3})#(?P<code>.+)\.day"
"""
DS_CODE_TO_TYPE = {
'4': {'exchange': 'czce', 'instrument': 'option'},
'5': {'exchange': 'dce', 'instrument': 'option'},
'6': {'exchange': 'shfe', 'instrument': 'option'},
'7': {'exchange': 'cffex', 'instrument': 'option'},
'8': {'exchange': 'sse', 'instrument': 'option'},
'9': {'exchange': 'szse', 'instrument': 'option'},
'27': {'exchange': 'hkse', 'instrument': 'index'}, # 香港指数
'28': {'exchange': 'czce', 'instrument': 'future'},
'29': {'exchange': 'dce', 'instrument': 'future'},
'30': {'exchange': 'shfe', 'instrument': 'future'},
'31': {'exchange': 'hkse', 'instrument': 'stock'}, # 香港主板
'33': {'exchange': 'sse szse', 'instrument': 'OEF'}, # 开放式基金
'34': {'exchange': 'sse szse', 'instrument': 'MMF'}, # 货币型基金
'44': {'exchange': 'neeq', 'instrument': 'stock'}, # 股转系统
'47': {'exchange': 'cffex', 'instrument': 'future'},
'48': {'exchange': 'hkse', 'instrument': 'stock'}, # 香港创业板
'49': {'exchange': 'hkse', 'instrument': 'TF'}, # 香港信托基金
'62': {'exchange': 'csindex', 'instrument': 'index'}, # 中证指数
'71': {'exchange': 'hkconnect', 'instrument': 'stock'}, # 港股通品种
'102': {'exchange': 'sse szse', 'instrument': 'index'},
}
ds_dir = '{}{}{}'.format(_DS_DIR, os.sep, 'lday')
ds_list = os.listdir(ds_dir)
pattern = "^(?P<tdx_code>\d{1,3})#(?P<code>.+)\.day"
data = [re.match(pattern, x) for x in ds_list]
try: # 注释条码用来显示pattern不能识别的文件名
# for i, x in enumerate(Data):
# if not x:
# util_log_info('{}'.format(ds_list[i]))
ds_df = pd.DataFrame([x.groupdict() for x in data])
except:
util_log_info("{} can't be analyzed by pattern ({})".format(_DS_DIR, pattern))
return None
ds_df['exchange'] = ds_df.tdx_code.apply(lambda x: DS_CODE_TO_TYPE[x]['exchange'] if x in DS_CODE_TO_TYPE else None)
ds_df['instrument'] = ds_df.tdx_code.apply(
lambda x: DS_CODE_TO_TYPE[x]['instrument'] if x in DS_CODE_TO_TYPE else None)
ds_df['filename'] = ds_list
ds_df['last_modified'] = ds_df['filename'].apply(lambda x: os.path.getmtime(os.path.join(ds_dir, x)))
return ds_df
def get_security_list():
securities: DataFrame = pd.concat([_get_sh_sz_list(), _get_ds_list()])
securities['last_modified'] = securities['last_modified'].apply(lambda x: pd.to_datetime(x, unit='s')) # 日期正确,小时不对
return securities.set_index('code')
SECURITY_DATAFRAME = get_security_list()
def _get_tdx_code_from_security_dataframe(code, exchange):
try:
recorder = SECURITY_DATAFRAME.loc[code]
except:
util_log_info("Can't get tdx_code from {}".format(code))
return
if isinstance(recorder, pd.Series):
return recorder['tdx_code']
try:
return recorder.loc[recorder['exchange'] == exchange].loc[code, 'tdx_code']
except:
util_log_info('Not only one {} in the list , please provide exchange or instrument'.format(code))
return recorder.tdx_code[0]
def _generate_path(code, freq, tdx_code):
# code = code.upper()
# standard_freq = standard_freq.lower()
ext = {
'D': '.day',
'5min': '.lc5',
'1min': '.lc1',
}
dir = {
'D': 'lday',
'5min': 'fzline',
'1min': '.minline',
}
try:
if tdx_code == 'sz':
dir_name = '{}{}{}'.format(_SZ_DIR, os.sep, dir[freq])
filename = tdx_code + code + ext[freq]
elif tdx_code == 'sh':
dir_name = '{}{}{}'.format(_SH_DIR, os.sep, dir[freq])
filename = tdx_code + code + ext[freq]
else:
dir_name = '{}{}{}'.format(_DS_DIR, os.sep, dir[freq])
filename = tdx_code + '#' + code + ext[freq]
except KeyError:
util_log_info('Not supported Frequency {}!'.format(freq))
return
file_path = os.path.join(dir_name, filename)
return file_path
def get_bar(code, start=None, end=None, freq='day', exchange=None):
"""
股票成交量 volume 单位是100股
"""
code = code.upper()
standard_freq = parse_frequency_str(freq)
try:
tdx_code = _get_tdx_code_from_security_dataframe(code, exchange)
except:
util_log_info("Can't get tdx_code from {}".format(code))
return
if standard_freq in ['D', 'w', 'M', 'Q', 'Y']:
file_path = _generate_path(code, 'D', tdx_code)
elif standard_freq in ['1min', '5min', '30min', '60min']:
file_path = _generate_path(code, '5min', tdx_code)
elif standard_freq in ['1min']:
file_path = _generate_path(code, '1min', tdx_code)
else:
util_log_info('Not supported frequency {}'.format(freq))
return
if not os.path.exists(file_path):
util_log_info('=={}== {} file is not exists!'.format(code, file_path))
return
# 统一freq的数据结构
if tdx_code in ['sh', 'sz']:
if standard_freq in ['D', 'w', 'M', 'Q', 'Y']:
reader = TdxDailyBarReader()
df = reader.get_df(file_path)
elif standard_freq in ['1min', '5min', '30min', '60min']:
reader = TdxLCMinBarReader()
df = reader.get_df(file_path)
else:
util_log_info('Not supported frequency {}'.format(freq))
return
else:
if standard_freq in ['D', 'w', 'M', 'Q', 'Y']:
reader = TdxExHqDailyBarReader()
df = reader.get_df(file_path)
elif standard_freq in ['1min', '5min', '30min', '60min']:
reader = TdxLCMinBarReader()
df = reader.get_df(file_path)
else:
util_log_info('Not supported frequency {}'.format(freq))
return
if len(df) < 1:
return
recorder = SECURITY_DATAFRAME.loc[code]
if isinstance(recorder, pd.DataFrame):
instrument = recorder.loc[recorder['tdx_code'] == tdx_code].loc[code, 'instrument']
exchange = recorder.loc[recorder['tdx_code'] == tdx_code].loc[code, 'exchange']
else:
instrument = recorder['instrument']
exchange = recorder['exchange']
if instrument in ['future', 'option']:
df.rename(columns={'amount': "position", "jiesuan": "settle"}, inplace=True)
if start:
start = pd.to_datetime(start)
df = df[df.index >= start]
if end:
end = pd.to_datetime(end)
if standard_freq in ['1min', '5min', '30min', '60min']:
if time(0, 0) == end.time():
end = pd.to_datetime(util_get_next_day(end))
df = df[df.index <= end]
df['date'] = df.index
df = df.assign(code=code, exchange=exchange)
if standard_freq in ['w', 'M', 'Q', 'Y']:
df = resample_from_daily_data(df, standard_freq)
return df
def get_index_block():
"""
返回股票对应的指数
block_zs.dat 对应通达信指数板块
block_gn.dat 对应通达信概念板块
block_fg.dat 对应通达信风格板块 融资融券 已高送转 近期弱势
index 为 code
columns 为指数,如果为指数成份股 则为2
:return:
"""
filename = '{}{}{}'.format(TDX_DIR, os.sep, 'T0002\\hq_cache\\block_zs.dat')
return BlockReader().get_df(filename).pivot(index='code', columns='blockname', values='block_type')
def get_concept_block():
"""
返回股票对应的指数
block_zs.dat 对应通达信指数板块
block_gn.dat 对应通达信概念板块
block_fg.dat 对应通达信风格板块 融资融券 已高送转 近期弱势
index 为 code
columns 为指数,如果为指数成份股 则为2
:return:
"""
filename = '{}{}{}'.format(TDX_DIR, os.sep, 'T0002\\hq_cache\\block_gn.dat')
return BlockReader().get_df(filename).pivot(index='code', columns='blockname', values='block_type')
def get_style_block():
"""
返回股票对应的指数
block_zs.dat 对应通达信指数板块
block_gn.dat 对应通达信概念板块
block_fg.dat 对应通达信风格板块 融资融券 已高送转 近期弱势
index 为 code
columns 为指数,如果为指数成份股 则为2
:return:
"""
filename = '{}{}{}'.format(TDX_DIR, os.sep, 'T0002\\hq_cache\\block_fg.dat')
return BlockReader().get_df(filename).pivot(index='code', columns='blockname', values='block_type')
def get_convertible_info():
"""
D:\Trade\TDX\cjzq_tdx\T0002\hq_cache\speckzzdata.txt
:return:
"""
filename = '{}{}{}'.format(TDX_DIR, os.sep, 'T0002\\hq_cache\\speckzzdata.txt')
columns = [
'exchange', 'code', 'stock_code', 'convert_price', 'current_interest', 'list_amount', 'call_price',
'redeem_price',
'convert_start', 'due_price', 'convert_end', 'convert_code', 'current_amount', 'list_date', 'convert_ratio(%)'
]
df = | pd.read_csv(filename, names=columns) | pandas.read_csv |
"""
Common routines to work with raw MS data from metabolomics experiments.
Functions
---------
detect_features(path_list) : Perform feature detection on several samples.
feature_correspondence(feature_data) : Match features across different samples
using a combination of clustering algorithms.
"""
import pandas as pd
import numpy as np
from .fileio import MSData
from .container import DataContainer
from .lcms import Roi
from . import validation
from pathlib import Path
from sklearn.cluster import DBSCAN
from sklearn import mixture
from scipy.optimize import linear_sum_assignment
from typing import Optional, Tuple, List, Dict, Union
from IPython.display import clear_output
__all__ = ["detect_features", "feature_correspondence", "make_data_container"]
def detect_features(path: Union[Path, List[str]], separation: str = "uplc",
instrument: str = "qtof", roi_params: Optional[dict] = None,
smoothing_strength: Optional[float] = 1.0,
noise_params: Optional[dict] = None,
baseline_params: Optional[dict] = None,
find_peaks_params: Optional[dict] = None,
descriptors: Optional[dict] = None,
filters: Optional[dict] = None,
verbose: bool = True
) -> Tuple[Dict[str, List[Roi]], pd.DataFrame]:
"""
Perform feature detection on LC-MS centroid samples.
Parameters
----------
path: Path or List[str]
Path can be a list of strings of absolute path representations to mzML
files in centroid mode or a Path object. Path objects can be used in
two ways: It can point to a mzML file or to a directory. in the second
case all mzML files inside the directory will be analyzed.
separation: {"uplc", "hplc"}
Analytical platform used for separation. Used to set default the values
of `detect_peak_params`, `roi_params` and `filter_params`.
instrument: {"qtof". "orbitrap"}
MS instrument used for data acquisition. Used to set default value
of `roi_params`.
roi_params: dict, optional
parameters to pass to :py:meth:`tidyms.MSData.make_roi`
smoothing_strength: positive number, optional
Width of a gaussian window used to smooth the ROI. If None, no
smoothing is applied.
find_peaks_params : dict, optional
parameters to pass to :py:func:`tidyms.peaks.detect_peaks`
descriptors : dict, optional
descriptors to pass to :py:func:`tidyms.peaks.get_peak_descriptors`
filters : dict, optional
filters to pass to :py:func:`tidyms.peaks.get_peak_descriptors`
noise_params : dict, optional
parameters to pass to :py:func:`tidyms.peaks.estimate_noise`
baseline_params : dict, optional
parameters to pass to :py:func:`tidyms.peaks.estimate_baseline`
descriptors : dict, optional
pass custom descriptors to :py:func:`tidyms.peaks.get_peak_descriptors`
filters : dict, optional
pass custom filters to :py:func:`tidyms.peaks.get_peak_descriptors`
verbose: bool
Returns
-------
roi_dict: dict
dictionary of sample names to a list of ROI.
feature_table: DataFrame
A Pandas DataFrame where each row is a feature detected in a sample and
each column is a feature descriptor. By default the following
descriptors are computed:
mz
weighted average of the m/z in the peak region.
mz std
standard deviation of the m/z in the peak region.
rt
weighted average of the retention time in the peak region.
width
Chromatographic peak width.
height
Height of the chromatographic peak minus the baseline.
area
Area of the chromatographic peak. minus the baseline area.
sample
The sample name where the feature was detected.
Also, two additional columns have information to search each feature
in its correspondent Roi:
roi_index :
index in the list of ROI where the feature was detected.
peak_index :
index of the peaks attribute of each ROI associated to the feature.
Notes
-----
Features are detected as follows:
1. Default parameters are set based on the values of the parameters
`instrument` and `separation`.
2. Regions of interest (ROI) are detected in each sample. See the
documentation of :py:meth:`tidyms.fileio.MSData.make_roi` for a detailed
description of how ROI are created from raw data.
3. Features (chromatographic peaks) are detected on each ROI. See
:py:meth:`tidyms.lcms.Chromatogram.find_peaks` for a detailed
description of how peaks are detected and how descriptors are computed.
See Also
--------
fileio.MSData.make_roi : Finds ROIs in a mzML sample.
lcms.ROI.find_peaks : Detect peaks and compute peak estimators for a ROI.
"""
# parameter validation
# validation.validate_detect_peaks_params(detect_peak_params)
validation.validate_descriptors(descriptors)
validation.validate_filters(filters)
if roi_params is None:
roi_params = dict()
path_list = _get_path_list(path)
roi_dict = dict()
ft_table_list = list()
n_samples = len(path_list)
for k, sample_path in enumerate(path_list):
sample_name = sample_path.stem
sample_path_str = str(sample_path)
ms_data = MSData(sample_path_str, ms_mode="centroid",
instrument=instrument, separation=separation)
k_roi = ms_data.make_roi(**roi_params)
if verbose:
clear_output(wait=True)
msg = "Processing sample {} ({}/{})."
msg = msg.format(sample_name, k + 1, n_samples)
print(msg)
print("Searching features in {} ROI...".format(len(k_roi)), end=" ")
k_table = _build_feature_table(k_roi,
smoothing_strength=smoothing_strength,
descriptors=descriptors,
filters=filters,
noise_params=noise_params,
baseline_params=baseline_params,
find_peaks_params=find_peaks_params)
if verbose:
msg = "Found {} features".format(k_table.shape[0])
print(msg)
k_table["sample"] = sample_name
roi_dict[sample_name] = k_roi
ft_table_list.append(k_table)
feature_table = | pd.concat(ft_table_list) | pandas.concat |
import utility_funcs as uf
import ProjectOverlayDataProcess as data
import pandas as pd
import numpy as np
import code
number_of_groups=5
def import_data(only_relevant_groups=True):
if only_relevant_groups:
members = data.get_group_membership()
relevantgroups = data.import_dataframe("relevantgroups")
cosine_similarities = data.import_dataframe("cosine_similarities")
members = members.loc[members.group_guid.isin(relevantgroups.guid),:]
return members, cosine_similarities
else:
return data.get_group_membership(), data.import_dataframe("cosine_similarities")
def calculate_group_similarities(df, groupbycol, nestcol, newcolname):
newdf = uf.nest_for_json(df, groupbycol=groupbycol,
nestcol=nestcol,
newcolname=newcolname)
length = len(newdf)
similarity_matrix = np.zeros((length, length))
for i in range(length):
for j in range(length):
similarity_matrix[i,j] = uf.list_similarites(newdf[newcolname][i],newdf[newcolname][j])
similarity_df = | pd.DataFrame(similarity_matrix) | pandas.DataFrame |
from __future__ import absolute_import, division, print_function
import argparse
import logging
import sys
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
logger = logging.getLogger('causalml')
def smd(feature, treatment):
"""Calculate the standard mean difference (SMD) of a feature between the
treatment and control groups.
The definition is available at
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3144483/#s11title
Args:
feature (pandas.Series): a column of a feature to calculate SMD for
treatment (pandas.Series): a column that indicate whether a row is in
the treatment group or not
Returns:
(float): The SMD of the feature
"""
t = feature[treatment == 1]
c = feature[treatment == 0]
return (t.mean() - c.mean()) / np.sqrt(.5 * (t.var() + c.var()))
def create_table_one(data, treatment_col, features):
"""Report balance in input features between the treatment and control groups.
References:
R's tableone at CRAN: https://github.com/kaz-yos/tableone
Python's tableone at PyPi: https://github.com/tompollard/tableone
Args:
data (pandas.DataFrame): total or matched sample data
treatment_col (str): the column name for the treatment
features (list of str): the column names of features
Returns:
(pandas.DataFrame): A table with the means and standard deviations in
the treatment and control groups, and the SMD between two groups
for the features.
"""
t1 = pd.pivot_table(data[features + [treatment_col]],
columns=treatment_col,
aggfunc=[lambda x: '{:.2f} ({:.2f})'.format(x.mean(),
x.std())])
t1.columns = t1.columns.droplevel(level=0)
t1['SMD'] = data[features].apply(
lambda x: smd(x, data[treatment_col])
).round(4)
n_row = pd.pivot_table(data[[features[0], treatment_col]],
columns=treatment_col,
aggfunc=['count'])
n_row.columns = n_row.columns.droplevel(level=0)
n_row['SMD'] = ''
n_row.index = ['n']
t1 = | pd.concat([n_row, t1], axis=0) | pandas.concat |
from sklearn.feature_extraction import DictVectorizer
import pandas as pd
import numpy as np
class LinearModel(object):
@staticmethod
def validate_options(opts):
if opts['loss'] == 'quantile':
raise NotImplementedError("Loss function 'quantile' is not implemented yet")
# if opts['opt'] == 'adagrad':
# raise NotImplementedError("optimizer 'adagrad' is not implemented yet")
# elif opts['opt'] == 'adadelta':
# raise NotImplementedError("optimizer 'adadelta' is not implemented yet")
# elif opts['opt'] == 'adam':
# raise NotImplementedError("optimizer 'adam' is not implemented yet")
if opts['penalty'] == 'rda':
raise NotImplementedError("regularization 'rda' is not implemented yet")
if opts['learning_rate'] == 'simple':
raise NotImplementedError("learning rate 'simple' is not implemented yet")
@staticmethod
def load(conn, table, feature_column='feature', weight_column='weight', bias_feature=None):
df = conn.fetch_table(table)
intercept = np.array([0.]) # (1,)
coef = np.array([[]]) # (1, n_feature)
vocabulary = {}
feature_names = []
j = 0
for i, row in df.iterrows():
feature, weight = row[feature_column], row[weight_column]
if feature == bias_feature:
intercept[0] = float(weight)
continue
coef = np.append(coef, [[weight]], axis=1)
vocabulary[feature] = j
j += 1
feature_names.append(feature)
vectorizer = DictVectorizer(separator='#')
vectorizer.vocabulary_ = vocabulary
vectorizer.feature_names_ = feature_names
return coef, intercept, vectorizer
def store(self, conn, table, vocabulary, feature_column='feature', weight_column='weight', bias_feature=None):
df = self._to_frame(vocabulary, feature_column, weight_column, bias_feature)
conn.import_frame(df, table)
def _to_frame(self, vocabulary, feature_column, weight_column, bias_feature):
data = []
for feature, index in vocabulary.items():
data.append((feature, self.coef_[0, index]))
if bias_feature is not None:
data.append((bias_feature, self.intercept_[0]))
return | pd.DataFrame.from_records(data, columns=[feature_column, weight_column]) | pandas.DataFrame.from_records |
import requests
import json
import traceback
import sqlite3
import server.app.decode_fbs as decode_fbs
import scanpy as sc
import anndata as ad
import pandas as pd
import numpy as np
import diffxpy.api as de
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import seaborn as sns
import matplotlib.patches as mpatches
from matplotlib import rcParams
import plotly.graph_objects as go
import plotly.io as plotIO
import base64
import math
from io import BytesIO
import sys
import time
import os
import re
import glob
import subprocess
strExePath = os.path.dirname(os.path.abspath(__file__))
import pprint
ppr = pprint.PrettyPrinter(depth=6)
import server.compute.diffexp_generic as diffDefault
import pickle
from pyarrow import feather
sys.setrecursionlimit(10000)
sc.settings.verbosity = 2
rcParams.update({'figure.autolayout': True})
api_version = "/api/v0.2"
import threading
jobLock = threading.Lock()
def getLock(lock):
while not lock.acquire():
time.sleep(1.0)
def freeLock(lock):
lock.release()
def route(data,appConfig):
#ppr.pprint("current working dir:%s"%os.getcwd())
data = initialization(data,appConfig)
#ppr.pprint(data)
try:
getLock(jobLock)
taskRes = distributeTask(data["method"])(data)
freeLock(jobLock)
return taskRes
except Exception as e:
freeLock(jobLock)
return 'ERROR @server: '+traceback.format_exc() # 'ERROR @server: {}, {}'.format(type(e),str(e))
#return distributeTask(data["method"])(data)
import server.app.app as app
def initialization(data,appConfig):
# obtain the server host information
data = json.loads(str(data,encoding='utf-8'))
# update the environment information
data.update(VIPenv)
# updatting the hosting data information
if appConfig.is_multi_dataset():
data["url_dataroot"]=appConfig.server_config.multi_dataset__dataroot['d']['base_url']
data['h5ad']=os.path.join(appConfig.server_config.multi_dataset__dataroot['d']['dataroot'], data["dataset"])
else:
data["url_dataroot"]=None
data["dataset"]=None
data['h5ad']=appConfig.server_config.single_dataset__datapath
# setting the plotting options
if 'figOpt' in data.keys():
setFigureOpt(data['figOpt'])
# get the var (gene) and obv index
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
data['obs_index'] = scD.get_schema()["annotations"]["obs"]["index"]
data['var_index'] = scD.get_schema()["annotations"]["var"]["index"]
return data
def setFigureOpt(opt):
sc.set_figure_params(dpi_save=int(opt['dpi']),fontsize= float(opt['fontsize']),vector_friendly=(opt['vectorFriendly'] == 'Yes'),transparent=(opt['transparent'] == 'Yes'),color_map=opt['colorMap'])
rcParams.update({'savefig.format':opt['img']})
def getObs(data):
selC = list(data['cells'].values())
cNames = ["cell%d" %i for i in selC]
## obtain the category annotation
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
selAnno = [data['obs_index']]+data['grp']
dAnno = list(scD.get_obs_keys())
anno = []
sel = list(set(selAnno)&set(dAnno))
if len(sel)>0:
tmp = scD.data.obs.loc[selC,sel].astype('str')
tmp.index = cNames
anno += [tmp]
sel = list(set(selAnno)-set(dAnno))
if len(sel)>0:
annotations = scD.dataset_config.user_annotations
if annotations:
labels = annotations.read_labels(scD)
tmp = labels.loc[list(scD.data.obs.loc[selC,data['obs_index']]),sel]
tmp.index = cNames
anno += [tmp]
obs = pd.concat(anno,axis=1)
#ppr.pprint(obs)
## update the annotation Abbreviation
combUpdate = cleanAbbr(data)
if 'abb' in data.keys():
for i in data['grp']:
obs[i] = obs[i].map(data['abb'][i])
return combUpdate, obs
def getObsNum(data):
selC = list(data['cells'].values())
cNames = ["cell%d" %i for i in selC]
## obtain the category annotation
obs = pd.DataFrame()
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
selAnno = data['grpNum']
dAnno = list(scD.get_obs_keys())
sel = list(set(selAnno)&set(dAnno))
if len(sel)>0:
obs = scD.data.obs.loc[selC,sel]
obs.index = cNames
return obs
def getVar(data):
## obtain the gene annotation
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
gInfo = scD.data.var
gInfo.index = list(gInfo[data['var_index']])
gInfo = gInfo.drop([data['var_index']],axis=1)
return gInfo
def collapseGeneSet(data,expr,gNames,cNames,fSparse):
Y = expr
if 'geneGrpColl' in data.keys() and not data['geneGrpColl']=='No' and 'geneGrp' in data.keys() and len(data['geneGrp'])>0:
data['grpLoc'] = []
data['grpID'] = []
if fSparse:
Y = pd.DataFrame.sparse.from_spmatrix(Y,columns=gNames,index=cNames)
for aN in data['geneGrp'].keys():
if data['geneGrpColl']=='mean':
Y = pd.concat([Y,Y[data['geneGrp'][aN]].mean(axis=1).rename(aN)],axis=1,sort=False)
if data['geneGrpColl']=='median':
Y = pd.concat([Y,Y[data['geneGrp'][aN]].median(axis=1).rename(aN)],axis=1,sort=False)
for gene in data['geneGrp'][aN]:
if gene in data['genes']:
data['genes'].remove(gene)
data['genes'] += [aN]
gNames = list(Y.columns)
return Y,gNames
def createData(data):
selC = list(data['cells'].values())
cNames = ["cell%d" %i for i in selC]
## onbtain the expression matrix
gNames = []
expr = []
fSparse = False
X = []
if 'genes' in data.keys():
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
if not type(scD.data.X) is np.ndarray:
fSparse = True
if len(data['genes'])>0:
fullG = list(scD.data.var[data['var_index']])
selG = sorted([fullG.index(i) for i in data['genes']]) #when data loaded backed, incremental is required
X = scD.data.X[:,selG]
gNames = [fullG[i] for i in selG] #data['genes']
else:
X = scD.data.X
gNames = list(scD.data.var[data['var_index']])
if 'figOpt' in data.keys() and data['figOpt']['scale'] == 'Yes':
X = sc.pp.scale(X,zero_center=(data['figOpt']['scaleZero'] == 'Yes'),max_value=(float(data['figOpt']['scaleMax']) if data['figOpt']['clipValue']=='Yes' else None))
X = X[selC]
if fSparse:
expr = X
else:
expr = pd.DataFrame(X,columns=gNames,index=cNames)
expr,gNames = collapseGeneSet(data,expr,gNames,cNames,fSparse)
#ppr.pprint("finished expression ...")
## obtain the embedding
embed = {}
if 'layout' in data.keys():
layout = data['layout']
if isinstance(layout,str):
layout = [layout]
if len(layout)>0:
for one in layout:
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
embed['X_%s'%one] = pd.DataFrame(scD.data.obsm['X_%s'%one][selC][:,[0,1]],columns=['%s1'%one,'%s2'%one],index=cNames)
#ppr.pprint("finished layout ...")
## obtain the category annotation
combUpdate, obs = getObs(data)
## create a custom annotation category and remove cells which are not in the selected annotation
if combUpdate and len(data['grp'])>1:
newGrp = 'Custom_combine'
combineGrp = list(data['combine'].keys());
obs[newGrp] = obs[combineGrp[0]]
for i in combineGrp:
if not i==combineGrp[0]:
obs[newGrp] += ":"+obs[i]
selC = ~obs[newGrp].str.contains("Other").to_numpy()
expr = expr[selC]
for i in embed.keys():
embed[i] = embed[i][selC]
obs = obs[selC].astype('category')
obs[newGrp].cat.set_categories(data['combineOrder'],inplace=True)
data['grp'] = [newGrp]
obs = obs.astype('category')
## empty selection
if expr.shape[0]==0 or expr.shape[1]==0:
return []
#ppr.pprint("finished obv ...")
return sc.AnnData(expr,obs,var=pd.DataFrame([],index=gNames),obsm={layout:embed[layout].to_numpy() for layout in embed.keys()})
def cleanAbbr(data):
updated = False
if 'abb' in data.keys() and 'combine' in data.keys():
if len(data['combine'])>0:
updated = True
for cate in data['abb'].keys():
if cate in data['combine'].keys():
for anName in data['abb'][cate].keys():
if not anName in data['combine'][cate]:
data['abb'][cate][anName] = "Other";
else:
if not data['abb'][cate][anName]==anName:
data['combineOrder'] = [one.replace(anName,data['abb'][cate][anName]) for one in data['combineOrder']]
else:
data['abb'][cate] = {key:"Other" for key in data['abb'][cate].keys()}
return updated
def errorTask(data):
raise ValueError('Error task!')
def distributeTask(aTask):
return {
'SGV':SGV,
'SGVcompare':SGVcompare,
'PGV':PGV,
'VIOdata':VIOdata,
'HEATplot':pHeatmap,
'HEATdata':HeatData,
'GD':GD,
'DEG':DEG,
'DOT':DOT,
'EMBED':EMBED,
'TRAK':TRACK,
'DUAL':DUAL,
'MARK': MARK,
'MINX':MINX,
'DENS':DENS,
'DENS2D':DENS2D,
'SANK':SANK,
'STACBAR':STACBAR,
'HELLO':HELLO,
'CLI':CLI,
'preDEGname':getPreDEGname,
'preDEGvolcano':getPreDEGvolcano,
'preDEGmulti':getPreDEGbubble,
'mergeMeta': mergeMeta,
'isMeta': isMeta,
'testVIPready':testVIPready,
'Description':getDesp,
'GSEAgs':getGSEA,
'SPATIAL':SPATIAL,
'saveTest':saveTest,
'getBWinfo':getBWinfo,
'plotBW':plotBW
}.get(aTask,errorTask)
def HELLO(data):
return 'Hi, connected.'
def iostreamFig(fig):
#getLock(iosLock)
figD = BytesIO()
#ppr.pprint('io located at %d'%int(str(figD).split(" ")[3].replace(">",""),0))
fig.savefig(figD,bbox_inches="tight")
#ppr.pprint(sys.getsizeof(figD))
#ppr.pprint('io located at %d'%int(str(figD).split(" ")[3].replace(">",""),0))
imgD = base64.encodebytes(figD.getvalue()).decode("utf-8")
figD.close()
#ppr.pprint("saved Fig")
#freeLock(iosLock)
if 'matplotlib' in str(type(fig)):
plt.close(fig)#'all'
return imgD
def Msg(msg):
fig = plt.figure(figsize=(5,2))
plt.text(0,0.5,msg)
ax = plt.gca()
ax.axis('off')
return iostreamFig(fig)
def SPATIAL(data):
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
#ppr.pprint(vars(scD.data.uns["spatial"]))
spatial=scD.data.uns["spatial"]
if (data['embedding'] == "get_spatial_list"):
return json.dumps({'list':list(spatial)})
library_id=list(spatial)[0]
if (data['embedding'] in list(spatial)):
library_id=data['embedding']
height, width, depth = spatial[library_id]["images"][data['resolution']].shape
embedding = 'X_'+data['embedding']
spatialxy = scD.data.obsm[embedding]
tissue_scalef = spatial[library_id]['scalefactors']['tissue_' + data['resolution'] + '_scalef']
i = data['spots']['spoti_i']
x = 0
y = 1
# from original embedding to (0,1) coordinate system (cellxgene embedding)
scalex = (data['spots']['spot0_x'] - data['spots']['spoti_x']) / (spatialxy[0][x] - spatialxy[i][x])
scaley = (data['spots']['spot0_y'] - data['spots']['spoti_y']) / (spatialxy[0][y] - spatialxy[i][y])
# image is in (-1,0,1) coordinate system, so multiplied by 2
translatex = (spatialxy[i][x]*scalex - data['spots']['spoti_x']) * 2
translatey = (spatialxy[i][y]*scaley - data['spots']['spoti_y']) * 2
scale = 1/tissue_scalef * scalex * 2
# Addtional translate in Y due to flipping of the image if needed
ppr.pprint(scalex)
ppr.pprint(scaley)
ppr.pprint(translatex)
ppr.pprint(translatey)
# from (-1,0,1) (image layer) to (0,1) coordinate system (cellxgene embedding). Overlapping (0,0) origins of both.
translatex = -(1+translatex)
if (translatey > -0.1):
flip = True
translatey = -(1+translatey) + height*scale
else:
flip = False
translatey = -(1+translatey)
returnD = [{'translatex':translatex,'translatey':translatey,'scale':scale}]
dpi=100
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
if (flip):
ax.imshow(np.flipud(spatial[library_id]["images"][data['resolution']]), interpolation='nearest')
else:
ax.imshow(spatial[library_id]["images"][data['resolution']], interpolation='nearest')
figD = BytesIO()
plt.savefig(figD, dpi=dpi)
ppr.pprint(sys.getsizeof(figD))
imgD = base64.encodebytes(figD.getvalue()).decode("utf-8")
figD.close()
plt.close(fig)
return json.dumps([returnD, imgD])
def MINX(data):
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
minV = min(scD.data.X[0])
return '%.1f'%minV
def geneFiltering(adata,cutoff,opt):
## 1. remove cells if the max expression of all genes is lower than the cutoff
if opt==1:
#sT = time.time()
#ix = adata.to_df().apply(lambda x: max(x)>float(cutoff),axis=1)
#ppr.pprint(time.time()-sT)
#sT=time.time()
df = adata.to_df()
ix = df[df>float(cutoff)].count(axis=1)>0
#ppr.pprint(time.time()-sT)
#sT = time.time()
#ix = pd.DataFrame((adata.X>float(cutoff)).sum(1)>0,index=list(adata.obs.index)).iloc[:,0]
#ppr.pprint(time.time()-sT)
adata = adata[ix,]
## 2. Set all expression level smaller than the cutoff to be NaN not for plotting without removing any cells
elif opt==2:
def cutoff(x):
return x if x>float(cutoff) else None
X = adata.to_df()
X=X.applymap(cutoff)
adata = sc.AnnData(X,adata.obs)
return adata
def SGV(data):
# figure width and heights depends on number of unique categories
# characters of category names, gene number
#ppr.pprint("SGV: creating data ...")
adata = createData(data)
#ppr.pprint("SGV: data created ...")
adata = geneFiltering(adata,data['cutoff'],1)
if len(adata)==0:
raise ValueError('No cells in the condition!')
a = list(set(list(adata.obs[data['grp'][0]])))
ncharA = max([len(x) for x in a])
w = len(a)/4+1
h = ncharA/6+2.5
ro = math.acos(10/max([15,ncharA]))/math.pi*180
##
fig = plt.figure(figsize=[w,h])
sc.pl.violin(adata,data['genes'],groupby=data['grp'][0],ax=fig.gca(),show=False)
fig.autofmt_xdate(bottom=0.2,rotation=ro,ha='right')
return iostreamFig(fig)
def SGVcompare(data):
adata = createData(data)
#adata = geneFiltering(adata,data['cutoff'],1)
if len(adata)==0:
raise ValueError('No cells in the condition!')
# plot in R
strF = ('%s/SGV%f.csv' % (data["CLItmp"],time.time()))
X=pd.concat([adata.to_df(),adata.obs[data['grp']]],axis=1,sort=False)
X[X.iloc[:,0]>=float(data['cellCutoff'])].to_csv(strF,index=False)
strCMD = " ".join(["%s/Rscript"%data['Rpath'],strExePath+'/violin.R',strF,str(data['cutoff']),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']])
#ppr.pprint(strCMD)
res = subprocess.run([strExePath+'/violin.R',strF,str(data['cutoff']),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']],capture_output=True)#
img = res.stdout.decode('utf-8')
os.remove(strF)
if 'Error' in res.stderr.decode('utf-8'):
raise SyntaxError("in R: "+res.stderr.decode('utf-8'))
return img
def VIOdata(data):
adata = createData(data)
adata = geneFiltering(adata,data['cutoff'],1)
if len(adata)==0:
raise ValueError('No cells in the condition!')
return pd.concat([adata.to_df(),adata.obs], axis=1, sort=False).to_csv()
def unique(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def updateGene(data):
grpID = []
grpLoc=[]
allG = []
if 'geneGrp' in data.keys():
for aN in data['geneGrp'].keys():
grpLoc += [(len(allG),len(allG)+len(data['geneGrp'][aN])-1)]
allG += data['geneGrp'][aN]
grpID += [aN]
data['genes'] = unique(allG+data['genes'])
data['grpLoc'] = grpLoc
data['grpID'] = grpID
def PGV(data):
# figure width and heights depends on number of unique categories
# characters of category names, gene number #pecam1 pdpn
updateGene(data)
#ppr.pprint("PGV: creating data ...")
adata = createData(data)
#ppr.pprint("PGV: data created ...")
adata = geneFiltering(adata,data['cutoff'],1)
if adata.shape[0]==0 or adata.shape[1]==0:
return Msg('No cells in the condition!')
a = list(set(list(adata.obs[data['grp'][0]])))
ncharA = max([len(x) for x in a])
w = max([3,ncharA/8])+len(data['genes'])/2+1.5
h = len(a)+0.5
swapAx = False
##
if data['by']=='Columns':
a = w
w = h
h = a
swapAx = True
if 'split_show' in data['figOpt']['scanpybranch']: #.dev140+ge9cbc5f
vp = sc.pl.stacked_violin(adata,data['genes'],groupby=data['grp'][0],return_fig=True,figsize=(w,h),swap_axes=swapAx,var_group_positions=data['grpLoc'],var_group_labels=data['grpID'])
vp.add_totals().style(yticklabels=True, cmap=data['color']).show()
#vp.add_totals().show()
fig = vp#plt.gcf()
else:
fig = plt.figure(figsize=[w,h])
axes = sc.pl.stacked_violin(adata,data['genes'],groupby=data['grp'][0],show=False,ax=fig.gca(),swap_axes=swapAx,
var_group_positions=data['grpLoc'],var_group_labels=data['grpID'])
return iostreamFig(fig)
def pHeatmap(data):
# figure width is depends on the number of categories was choose to show
# and the character length of each category term
# if the number of element in a category is smaller than 10, "Set1" or "Set3" is choosen
# if the number of element in a category is between 10 and 20, default is choosen
# if the number of element in a category is larger than 20, husl is choosen
#Xsep = createData(data,True)
#adata = sc.AnnData(Xsep['expr'],Xsep['obs'])
#sT = time.time()
adata = createData(data)
data['grp'] += data['addGrp']
#Xdata = pd.concat([adata.to_df(),adata.obs], axis=1, sort=False).to_csv()
#ppr.pprint('HEAT data reading cost %f seconds' % (time.time()-sT) )
#sT = time.time()
exprOrder = True
if data['order']!="Expression":
exprOrder = False;
adata = adata[adata.obs.sort_values(data['order']).index,]
#s = adata.obs[data['order']]
#ix = sorted(range(len(s)), key=lambda k: s[k])
#adata = adata[ix,]
colCounter = 0
colName =['Set1','Set3']
grpCol = list()
grpLegend = list()
grpWd = list()
grpLen = list()
h = 8
w = len(data['genes'])/3+0.3
for gID in data['grp']:
grp = adata.obs[gID]
Ugrp = grp.unique()
if len(Ugrp)<10:
lut = dict(zip(Ugrp,sns.color_palette(colName[colCounter%2],len(Ugrp)).as_hex()))
colCounter += 1
elif len(Ugrp)<20:
lut = dict(zip(Ugrp,sns.color_palette(n_colors=len(Ugrp)).as_hex()))
else:
lut = dict(zip(Ugrp,sns.color_palette("husl",len(Ugrp)).as_hex()))
grpCol.append(grp.map(lut))
grpLegend.append([mpatches.Patch(color=v,label=k) for k,v in lut.items()])
grpWd.append(max([len(x) for x in Ugrp]))#0.02*fW*max([len(x) for x in Ugrp])
grpLen.append(len(Ugrp)+2)
w += 2
Zscore=None
heatCol=data['color']
heatCenter=None
colTitle="Expression"
if data['norm']=='zscore':
Zscore=1
#heatCol="vlag"
heatCenter=0
colTitle="Z-score"
#ppr.pprint('HEAT data preparing cost %f seconds' % (time.time()-sT) )
#sT = time.time()
try:
g = sns.clustermap(adata.to_df(),
method="ward",row_cluster=exprOrder,z_score=Zscore,cmap=heatCol,center=heatCenter,
row_colors=pd.concat(grpCol,axis=1).astype('str'),yticklabels=False,xticklabels=True,
figsize=(w,h),colors_ratio=0.05,
cbar_pos=(.3, .95, .55, .02),
cbar_kws={"orientation": "horizontal","label": colTitle,"shrink": 0.5})
except Exception as e:
return 'ERROR: Z score calculation failed for 0 standard diviation. '+traceback.format_exc() # 'ERROR @server: {}, {}'.format(type(e),str(e))
#ppr.pprint('HEAT plotting cost %f seconds' % (time.time()-sT) )
#sT = time.time()
g.ax_col_dendrogram.set_visible(False)
#g.ax_row_dendrogram.set_visible(False)
plt.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
grpW = [1.02]
grpH = [1.2]
cumulaN = 0
cumulaMax = 0
characterW=1/40 # a character is 1/40 of heatmap width
characterH=1/40 # a character is 1/40 of heatmap height
for i in sorted(range(len(grpLen)),key=lambda k:grpLen[k]):#range(5):#
cumulaN += grpLen[i]
if cumulaN>(10+1/characterH):
grpW.append(grpW[-1]+cumulaMax)
grpH = [1.2]
cumulaN =0
cumulaMax=0
leg = g.ax_heatmap.legend(handles=grpLegend[i],frameon=True,title=data['grp'][i],loc="upper left",
bbox_to_anchor=(grpW[-1],grpH[-1]),fontsize=5)#grpW[i],0.5,0.3
#leg = g.ax_heatmap.legend(handles=grpLegend[0],frameon=True,title=data['grp'][0],loc="upper left",
# bbox_to_anchor=(1.02,1-i*0.25),fontsize=5)#grpW[i],0.5,0.
cumulaMax = max([cumulaMax,grpWd[i]*characterW])
grpH.append(grpH[-1]-grpLen[i]*characterH)
leg.get_title().set_fontsize(6)#min(grpSize)+2
g.ax_heatmap.add_artist(leg)
#ppr.pprint('HEAT post plotting cost %f seconds' % (time.time()-sT) )
return iostreamFig(g)#json.dumps([iostreamFig(g),Xdata])#)#
def HeatData(data):
adata = createData(data)
Xdata = pd.concat([adata.to_df(),adata.obs], axis=1, sort=False).to_csv()
return Xdata
def GD(data):
adata = None;
for one in data['cells'].keys():
#sT = time.time()
oneD = data.copy()
oneD.update({'cells':data['cells'][one],
'genes':[],
'grp':[]})
D = createData(oneD)
#ppr.pprint("one grp aquire data cost %f seconds" % (time.time()-sT))
D.obs['cellGrp'] = one
if adata is None:
adata = D
else:
#sT =time.time()
adata = adata.concatenate(D)
#ppr.pprint("Concatenate data cost %f seconds" % (time.time()-sT))
if adata is None:
return Msg("No cells were satisfied the condition!")
##
adata.obs.astype('category')
cutOff = 'geneN_cutoff'+data['cutoff']
#sT = time.time()
#adata.obs[cutOff] = adata.to_df().apply(lambda x: sum(x>float(data['cutoff'])),axis=1)
#ppr.pprint(time.time()-sT)
#sT = time.time()
#df = adata.to_df()
#adata.obs[cutOff] = df[df>float(data['cutoff'])].count(axis=1)
#ppr.pprint(time.time()-sT)
sT = time.time()
adata.obs[cutOff] = (adata.X >float(data['cutoff'])).sum(1)
ppr.pprint(time.time()-sT)
##
w = 3
if len(data['cells'])>1:
w += 3
fig = plt.figure(figsize=[w,4])
sc.pl.violin(adata,cutOff,groupby='cellGrp',ax=fig.gca(),show=False,rotation=0,size=2)
return iostreamFig(fig)
def getGSEA(data):
strGSEA = '%s/gsea/'%strExePath
return json.dumps(sorted([os.path.basename(i).replace(".symbols.gmt","") for i in glob.glob(strGSEA+"*.symbols.gmt")]))
def DEG(data):
adata = None;
genes = data['genes']
data['genes'] = []
comGrp = 'cellGrp'
if 'combine' in data.keys():
if data['DEmethod']=='default':
combUpdate, obs = getObs(data)
if combUpdate and len(data['grp'])>1:
obs[comGrp] = obs[data['grp'][0]]
for i in data['grp']:
if i!=data['grp'][0]:
obs[comGrp] += ":"+obs[i]
mask = [obs[comGrp].isin([data['comGrp'][i]]) for i in [0,1]]
else:
data['figOpt']['scale'] = 'No'
adata = createData(data)
comGrp = data['grp'][0]
adata = adata[adata.obs[comGrp].isin(data['comGrp'])]
else:
mask = [pd.Series(range(data['cellN'])).isin(data['cells'][one].values()) for one in data['comGrp']]
for one in data['comGrp']:
oneD = data.copy()
oneD['cells'] = data['cells'][one]
oneD['genes'] = []
oneD['grp'] = []
oneD['figOpt']['scale']='No'
#oneD = {'cells':data['cells'][one],
# 'genes':[],
# 'grp':[],
# 'figOpt':{'scale':'No'},
# 'url':data['url']}
D = createData(oneD)
D.obs[comGrp] = one
if adata is None:
adata = D
else:
adata = adata.concatenate(D)
if data['DEmethod']=='default':
if sum(mask[0]==True)<10 or sum(mask[1]==True)<10:
raise ValueError('Less than 10 cells in a group!')
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
res = diffDefault.diffexp_ttest(scD,mask[0].to_numpy(),mask[1].to_numpy(),scD.data.shape[1])# shape[cells as rows, genes as columns]
gNames = list(scD.data.var[data['var_index']])
deg = pd.DataFrame(res,columns=['gID','log2fc','pval','qval'])
gName = pd.Series([gNames[i] for i in deg['gID']],name='gene')
deg = pd.concat([deg,gName],axis=1).loc[:,['gene','log2fc','pval','qval']]
else:
if not 'AnnData' in str(type(adata)):
raise ValueError('No data extracted by user selection')
adata.obs.astype('category')
nm = None
if data['DEmethod']=='wald':
nm = 'nb'
if data['DEmethod']=='wald':
res = de.test.wald(adata,formula_loc="~1+"+comGrp,factor_loc_totest=comGrp)
elif data['DEmethod']=='t-test':
res = de.test.t_test(adata,grouping=comGrp)
elif data['DEmethod']=='rank':
res = de.test.rank_test(adata,grouping=comGrp)
else:
raise ValueError('Unknown DE methods:'+data['DEmethod'])
#res = de.test.two_sample(adata,comGrp,test=data['DEmethod'],noise_model=nm)
deg = res.summary()
deg = deg.sort_values(by=['qval']).loc[:,['gene','log2fc','pval','qval']]
deg['log2fc'] = -1 * deg['log2fc']
## plot in R
#strF = ('/tmp/DEG%f.csv' % time.time())
strF = ('%s/DEG%f.csv' % (data["CLItmp"],time.time()))
deg.to_csv(strF,index=False)
#ppr.pprint([strExePath+'/volcano.R',strF,'"%s"'%';'.join(genes),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),str(data['logFC']),data['comGrp'][1],data['comGrp'][0]])
res = subprocess.run([strExePath+'/volcano.R',strF,';'.join(genes),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),str(data['logFC']),data['comGrp'][1],data['comGrp'][0],str(data['sigFDR']),str(data['sigFC']),data['Rlib']],capture_output=True)#
if 'Error' in res.stderr.decode('utf-8'):
raise SyntaxError("in volcano.R: "+res.stderr.decode('utf-8'))
img = res.stdout.decode('utf-8')
# GSEA
GSEAimg=""
GSEAtable=pd.DataFrame()
if data['gsea']['enable']:
res = subprocess.run([strExePath+'/fgsea.R',
strF,
'%s/gsea/%s.symbols.gmt'%(strExePath,data['gsea']['gs']),
str(data['gsea']['gsMin']),
str(data['gsea']['gsMax']),
str(data['gsea']['padj']),
data['gsea']['up'],
data['gsea']['dn'],
str(data['gsea']['collapse']),
data['figOpt']['img'],
str(data['figOpt']['fontsize']),
str(data['figOpt']['dpi']),
data['Rlib']],capture_output=True)#
if 'Error' in res.stderr.decode('utf-8'):
raise SyntaxError("in fgsea.R: "+res.stderr.decode('utf-8'))
GSEAimg = res.stdout.decode('utf-8')
GSEAtable = pd.read_csv(strF)
GSEAtable['leadingEdge'] = GSEAtable['leadingEdge'].apply(lambda x:'|'.join(x.split('|')[:10]))
os.remove(strF)
#####
gInfo = getVar(data)
deg.index = deg['gene']
deg = pd.concat([deg,gInfo],axis=1,sort=False)
#return deg.to_csv()
if not data['topN']=='All':
deg = deg.iloc[range(int(data['topN'])),]
#deg.loc[:,'log2fc'] = deg.loc[:,'log2fc'].apply(lambda x: '%.2f'%x)
#deg.loc[:,'pval'] = deg.loc[:,'pval'].apply(lambda x: '%.4E'%x)
#deg.loc[:,'qval'] = deg.loc[:,'qval'].apply(lambda x: '%.4E'%x)
#ppr.pprint(GSEAtable)
#ppr.pprint(GSEAtable.sort_values('pval'))
return json.dumps([deg.to_csv(index=False),img,GSEAtable.to_csv(index=False),GSEAimg])#json.dumps([deg.values.tolist(),img])
def DOT(data):
#ppr.pprint("DOT, starting ...")
updateGene(data)
# Dot plot, The dotplot visualization provides a compact way of showing per group, the fraction of cells expressing a gene (dot size) and the mean expression of the gene in those cell (color scale). The use of the dotplot is only meaningful when the counts matrix contains zeros representing no gene counts. dotplot visualization does not work for scaled or corrected matrices in which zero counts had been replaced by other values, see http://scanpy-tutorials.readthedocs.io/en/multiomics/visualizing-marker-genes.html
data['figOpt']['scale'] = 'No';
#ppr.pprint("DOT: creating data ...")
adata = createData(data)
#ppr.pprint("DOT: data created!")
if len(adata)==0:
return Msg('No cells in the condition!')
#return adata
grp = adata.obs[data['grp'][0]].unique()
if len(grp)<10:
col = np.array(sns.color_palette('Set1',len(grp)).as_hex())
elif len(grp)<20:
col = np.array(sns.color_palette(n_colors=len(grp)).as_hex())
else:
col = np.array(sns.color_palette("husl",len(grp)).as_hex())
adata.uns[data['grp'][0]+'_colors'] = col
#ppr.pprint(sc.__version__)
if 'split_show' in data['figOpt']['scanpybranch']:#.dev140+ge9cbc5f
dp = sc.pl.dotplot(adata,data['genes'],groupby=data['grp'][0],expression_cutoff=float(data['cutoff']),mean_only_expressed=(data['mean_only_expressed'] == 'Yes'),
var_group_positions=data['grpLoc'],var_group_labels=data['grpID'],
return_fig=True)#
dp = dp.add_totals(size=1.2).legend(show_size_legend=True,width=float(data['legendW'])).style(cmap=data['color'], dot_edge_color='black', dot_edge_lw=1, size_exponent=1.5)
dp.show()
fig = dp.get_axes()['mainplot_ax'].figure
else:
sc.pl.dotplot(adata,data['genes'],groupby=data['grp'][0],show=False,expression_cutoff=float(data['cutoff']),mean_only_expressed=(data['mean_only_expressed'] == 'Yes'),var_group_positions=data['grpLoc'],var_group_labels=data['grpID'], color_map=data['color'])
fig = plt.gcf()
#ppr.pprint(adata)
return iostreamFig(fig)
def EMBED(data):
adata = createData(data)
if len(data['grpNum'])>0:
adata.obs = pd.concat([adata.obs,getObsNum(data)],axis=1)
subSize = 4
ncol = int(data['ncol'])
ngrp = len(data['grp'])
ngrpNum = len(data['grpNum'])
ngene = len(data['genes'])
nrow = ngrp+math.ceil(ngrpNum/ncol)+math.ceil(ngene/ncol)
if 'splitGrp' in data.keys():
splitName = list(adata.obs[data['splitGrp']].unique())
nsplitRow = math.ceil(len(splitName)/ncol)
nrow = ngrp+math.ceil(ngrpNum/ncol)+ngene*nsplitRow
step =11
grpCol = {gID:math.ceil(len(list(adata.obs[gID].unique()))/step) for gID in data['grp']}
rcParams['figure.constrained_layout.use'] = False
fig = plt.figure(figsize=(ncol*subSize,subSize*nrow))
gs = fig.add_gridspec(nrow,ncol,wspace=0.2)
for i in range(ngrp):
grpName = adata.obs[data['grp'][i]].value_counts().to_dict()
grpPalette = None
plotOrder = None
dotSize = None
if len(grpName)==2 and max(grpName.values())/min(grpName.values())>10:
grpPalette = {max(grpName,key=grpName.get):'#c0c0c030',min(grpName,key=grpName.get):'#de2d26ff'}
plotOrder = min(grpName,key=grpName.get) #list(grpPalette.keys()) #
grpPalette = [grpPalette[k] for k in list(adata.obs[data['grp'][i]].cat.categories)]
dotSize = adata.obs.apply(lambda x: 360000/adata.shape[1] if x['HIVcell']==plotOrder else 120000/adata.shape[1],axis=1).tolist()
ax = sc.pl.embedding(adata,data['layout'],color=data['grp'][i],ax=fig.add_subplot(gs[i,0]),show=False,palette=grpPalette,groups=plotOrder,size=dotSize)
if grpCol[data['grp'][i]]>1:
ax.legend(ncol=grpCol[data['grp'][i]],loc=6,bbox_to_anchor=(1,0.5),frameon=False)
ax.set_xlabel('%s1'%data['layout'])
ax.set_ylabel('%s2'%data['layout'])
for i in range(ngrpNum):
x = int(i/ncol)+ngrp
y = i % ncol
ax = sc.pl.embedding(adata,data['layout'],color=data['grpNum'][i],ax=fig.add_subplot(gs[x,y]),show=False)#,wspace=0.25
ax.set_xlabel('%s1'%data['layout'])
ax.set_ylabel('%s2'%data['layout'])
if 'splitGrp' in data.keys():
vMax = adata.to_df().apply(lambda x: max(x))
vMin = adata.to_df().apply(lambda x: min(x))
dotSize = 120000 / adata.n_obs
for i in range(ngene):
for j in range(len(splitName)):
x = ngrp + math.ceil(ngrpNum/ncol) + i*nsplitRow+int(j/ncol)
y = j % ncol
ax = sc.pl.embedding(adata,data['layout'],ax=fig.add_subplot(gs[x,y]),show=False)#color=data['genes'][i],wspace=0.25,
ax = sc.pl.embedding(adata[adata.obs[data['splitGrp']]==splitName[j]],data['layout'],color=data['genes'][i],
vmin=vMin[data['genes'][i]],vmax=vMax[data['genes'][i]],ax=ax,show=False,
size=dotSize,title='{} in {}'.format(data['genes'][i],splitName[j]))
ax.set_xlabel('%s1'%data['layout'])
ax.set_ylabel('%s2'%data['layout'])
else:
for i in range(ngene):
x = int(i/ncol)+ngrp+math.ceil(ngrpNum/ncol)
y = i % ncol
ax = sc.pl.embedding(adata,data['layout'],color=data['genes'][i],ax=fig.add_subplot(gs[x,y]),show=False)
ax.set_xlabel('%s1'%data['layout'])
ax.set_ylabel('%s2'%data['layout'])
return iostreamFig(fig)
def TRACK(data):
updateGene(data)
adata = createData(data)
if len(adata)==0:
return Msg('No cells in the condition!')
w = math.log2(adata.n_obs)
h = adata.n_vars/2
## a bug in scanpy reported: https://github.com/theislab/scanpy/issues/1265, if resolved the following code is not needed
if len(data['grpLoc'])>0 and data['grpLoc'][len(data['grpLoc'])-1][1] < (len(data['genes'])-1):
data['grpLoc'] += [(data['grpLoc'][len(data['grpLoc'])-1][1]+1,len(data['genes'])-1)]
data['grpID'] += ['others']
##############
#ppr.pprint(data['grpLoc'])
#ppr.pprint(data['grpID'])
ax = sc.pl.tracksplot(adata,data['genes'],groupby=data['grp'][0],figsize=(w,h),
var_group_positions=data['grpLoc'],var_group_labels=data['grpID'],
show=False)
fig=ax['track_axes'][0].figure
return iostreamFig(fig)
def cut(x,cutoff,anno):
iC = x[x>cutoff].count()
if iC ==0:
return "None"
elif iC==2:
return "Both"
elif x[0]>cutoff:
return anno[0]
elif x[1]>cutoff:
return anno[1]
return "ERROR"
def dualExp(df,cutoff,anno):
label = ['None']+list(anno)+['Both']
a = df.iloc[:,0]>cutoff
b = df.iloc[:,1]>cutoff
return pd.Series([label[i] for i in list(a+2*b)],index=df.index,dtype='category')
def DUAL(data):
adata = createData(data)
adata.obs['Expressed'] = dualExp(adata.to_df(),float(data['cutoff']),adata.var_names)
sT = time.time()
pCol = {"None":"#AAAAAA44","Both":"#EDDF01AA",data['genes'][0]:"#1CAF82AA",data['genes'][1]:"#FA2202AA"}
adata.uns["Expressed_colors"]=[pCol[i] for i in adata.obs['Expressed'].cat.categories]
rcParams['figure.figsize'] = 4.5, 4
fig = sc.pl.embedding(adata,data['layout'],color='Expressed',return_fig=True,show=False,legend_fontsize="small")
plt.xlabel('%s1'%data['layout'])
plt.ylabel('%s2'%data['layout'])
rcParams['figure.figsize'] = 4, 4
return iostreamFig(fig)
def MARK(data):
adata = createData(data)
if len(adata)==0:
return Msg('No cells in the condition!')
## remove the annotation whose cell counts are smaller than 2 to avoid division by zero
vCount = adata.obs[data["grp"][0]].value_counts()
keepG = [key for key,val in vCount.items() if val>2]
adata = adata[adata.obs[data["grp"][0]].isin(keepG),:]
if len(adata.obs[data['grp'][0]].unique())<3:
return 'ERROR @server: {}'.format('Less than 3 groups in selected cells! Please use DEG for 2 groups')
#return json.dumps([[['name','scores'],['None','0']],Msg('Less than 3 groups in selected cells!Please use DEG for 2 groups')])
sc.tl.rank_genes_groups(adata,groupby=data["grp"][0],n_genes=int(data['geneN']),method=data['markMethod'])#
ppr.pprint(int(data['geneN']))
sc.pl.rank_genes_groups(adata,n_genes=int(data['geneN']),ncols=min([3,len(adata.obs[data['grp'][0]].unique())]),show=False)
fig =plt.gcf()
gScore = adata.uns['rank_genes_groups']
#ppr.pprint(gScore)
pKeys = [i for i in ['names','scores','logfoldchanges','pvals','pvals_adj'] if i in gScore.keys()]
scoreM = [pKeys+['Group']]
for i in gScore['scores'].dtype.names:
for j in range(len(gScore['scores'][i])):
one = []
for k in pKeys:
if k=='logfoldchanges':
one += ['%.2f' % gScore[k][i][j]]
elif k in ['pvals','pvals_adj']:
one += ['%.4E' % gScore[k][i][j]]
elif k=='scores':
one += ['%.4f' % gScore[k][i][j]]
else:
one += [gScore[k][i][j]]
scoreM += [one+[i]]
return json.dumps([scoreM,iostreamFig(fig)])
def DENS(data):
#sT = time.time()
adata = createData(data)
#ppr.pprint("read data cost: %f seconds" % (time.time()-sT))
#sT = time.time()
adata.obs['None'] = pd.Categorical(['all']*adata.shape[0])
bw=float(data['bw'])
sGrp = data['category'][0]
cGrp = data['category'][1]
defaultFontsize = 16
if 'figOpt' in data.keys():
defaultFontsize = float(data['figOpt']['fontsize'])
subSize = 4
#split = list(adata.obs[sGrp].unique())
split = sorted(list(adata.obs[sGrp].cat.categories))
genes = sorted(list(adata.var.index))
#colGrp = list(adata.obs[cGrp].unique())
colGrp = sorted(list(adata.obs[cGrp].cat.categories))
legendCol = math.ceil(len(colGrp)/(len(split)*11))
fig = plt.figure(figsize=(len(genes)*subSize,len(split)*(subSize-1)))
plt.xlabel("Expression",labelpad=20,fontsize=defaultFontsize+1)
#plt.ylabel(sGrp,labelpad=50,fontsize=defaultFontsize+1)
plt.xticks([])
plt.yticks([])
plt.box(on=None)
#plt.xlabel("Expression")
#plt.ylabel(sGrp)
gs = fig.add_gridspec(len(split),len(genes),wspace=0.2)#
#dataT = 0
#plotT = 0
for i in range(len(split)):
#resT = time.time()
Dobs = adata[adata.obs[sGrp]==split[i]].obs[cGrp]
D = adata[adata.obs[sGrp]==split[i]].to_df()
#dataT += (time.time()-resT)
for j in range(len(genes)):
ax = fig.add_subplot(gs[i,j])
#resT = time.time()
for one in colGrp:
if sum(Dobs==one)<1:
sns.kdeplot([0],label=one)
else:
sns.kdeplot(D[Dobs==one][genes[j]].to_numpy(),bw_method=bw,label=one)
ax.set_ylabel("",fontsize=defaultFontsize)
if i==0:
ax.set_title(genes[j],fontsize=defaultFontsize+2)
if j==0:
ax.set_ylabel(split[i],fontsize=defaultFontsize)
if i==0 and j==(len(genes)-1):
ax.legend(prop={'size': 10},title = cGrp,loc=2,bbox_to_anchor=(1,1),ncol=legendCol,frameon=False)#
else:
leg = ax.get_legend()
if not leg==None:
leg.remove()
#fig.text(0.6,0.09,"Expression",ha='center')
#ppr.pprint("plotting data cost: %f seconds" % dataT)
#ppr.pprint("plotting plot cost: %f seconds" % plotT)
#ppr.pprint("plotting total cost: %f seconds" % (time.time()-sT))
return iostreamFig(fig)
def SANK(data):
updateGene(data)
if len(data['genes'])==0:
tmp, D = getObs(data)
D = D.apply(lambda x:x.apply(lambda y:x.name+":"+y))
else:
adata = createData(data)
D = pd.concat([adata.obs.apply(lambda x:x.apply(lambda y:x.name+":"+y)),
adata.to_df().apply(lambda x:pd.cut(x,int(data['sankBin'])).apply(lambda y:x.name+":"+'%.1f_%.1f'%(y.left,y.right)))],
axis=1,sort=False)
D = D.astype('str').astype('category')
if data['obs_index'] in D.columns:
del D[data['obs_index']]
colName =['Set1','Set3','viridis']
labels = []
cols = []
colindex = 0
for gID in D.columns:
gNames = list(D[gID].unique())
labels += gNames
if len(gNames) <10:
cols += sns.color_palette(colName[colindex%2],len(gNames)).as_hex()
colindex += 1
else:
cols += sns.color_palette(colName[2],len(gNames)).as_hex()
sIDs =[]
dIDs =[]
v=[]
Dnames = data['sankOrder']#list(D.columns)
#maxGrp = 0
#ppr.pprint(Dnames)
for i in range(len(Dnames)-1):
oneName = Dnames[i:i+2]
#maxGrp = max(maxGrp,len(D[oneName[0]].unique()))
summaryOne = D.groupby(oneName).size().reset_index(name='Count')
summaryOne=summaryOne[summaryOne['Count']>0]
sIDs += list(summaryOne[oneName[0]].apply(lambda x: labels.index(x)))
dIDs += list(summaryOne[oneName[1]].apply(lambda x: labels.index(x)))
v += list(summaryOne['Count'])
data_trace = dict(
type='sankey',
domain=dict(x=[0,1],y=[0,1]),
orientation='h',
valueformat = ".0f",
node = dict(
pad = 10,
thickness = 15,
line = dict(
color = "black",
width = 0.5
),
label = labels,
color = cols
),
link = dict(
source = sIDs,
target = dIDs,
value = v
)
)
## if the image is requested
if 'imgSave' in data.keys():
layout = dict(
font = dict(size=int(data['figOpt']['fontsize'])),
height= int(data['imgH']),
width = int(data['imgW'])*D.shape[1]
)
fig = go.Figure(data=[go.Sankey(data_trace)],layout=layout)
img = plotIO.to_image(fig,data['imgSave'])
return base64.encodebytes(img).decode('utf-8')
layout = dict(
font = dict(size=int(data['figOpt']['fontsize'])),
height= int(data['imgH']),
width = int(data['imgW'])*D.shape[1],
updatemenus= [
dict(
y=0.9,
buttons=[
dict(
label='Thick',
method='restyle',
args=['node.thickness', 15]
),
dict(
label='Thin',
method='restyle',
args=['node.thickness', 8]
)
]
),
dict(
y=0.8,
buttons=[
dict(
label='Small gap',
method='restyle',
args=['node.pad', 15]
),
dict(
label='Large gap',
method='restyle',
args=['node.pad', 20]
)
]
),
dict(
y=0.7,
buttons=[
dict(
label='Snap',
method='restyle',
args=['arrangement', 'snap']
),
dict(
label='Perpendicular',
method='restyle',
args=['arrangement', 'perpendicular']
),
dict(
label='Freeform',
method='restyle',
args=['arrangement', 'freeform']
),
dict(
label='Fixed',
method='restyle',
args=['arrangement', 'fixed']
)
]
),
dict(
y=0.6,
buttons=[
dict(
label='Horizontal',
method='restyle',
args=['orientation','h']#{,'height':700,'width':250*D.shape[1]}
),
dict(
label='Vertical',
method='restyle',
args=['orientation','v']#{'orientation': 'v','height':250*D.shape[1],'width':700}
)
]
)
]
)
fig = go.Figure(data=[go.Sankey(data_trace)],layout=layout)
div = plotIO.to_html(fig)
return div#[div.find('<div>'):(div.find('</div>')+6)]
def DENS2D(data):
adata = createData(data)
## plot in R
strF = ('%s/DENS2D%f.csv' % (data["CLItmp"],time.time()))
adata.to_df().to_csv(strF)#
res = subprocess.run([strExePath+'/Density2D.R',strF,data['figOpt']['img'],str(data['cutoff']),str(data['bandwidth']),data['figOpt']['colorMap'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']],capture_output=True)#
img = res.stdout.decode('utf-8')
os.remove(strF)
if 'Error' in res.stderr.decode('utf-8'):
raise SyntaxError("in R: "+res.stderr.decode('utf-8'))
return img
def toInt(x):
if len(x)==0:
return 0
return int(x)
def STACBAR(data):
if len(data['genes'])==0:
tmp, D = getObs(data)
D = D.apply(lambda x:x.apply(lambda y:y))
else:
adata = createData(data)
D = pd.concat([adata.obs.apply(lambda x:x.apply(lambda y:y)),
adata.to_df().apply(lambda x:pd.cut(x,int(data['Nbin'])).apply(lambda y:'%s:%.1f_%.1f'%(x.name,y.left,y.right)))],
axis=1,sort=False)
D = D.astype('str').astype('category')
if data['obs_index'] in D.columns:
del D[data['obs_index']]
cellN = D.groupby(list(D.columns)).size().reset_index(name="Count")
strCol = data['colorBy']
tmp = list(D.columns)
tmp.remove(strCol)
strX = tmp[0]
returnD = [{'name':i,
'sales':[{'year':j,#.replace(strX+':',''),
'profit':toInt(cellN[(cellN[strCol]==i) & (cellN[strX]==j)]['Count'])}
for j in cellN[strX].unique()]}
for i in cellN[strCol].unique()]
return json.dumps(returnD)
def CLI(data):
strPath = data["CLItmp"]+('/CLI%f' % time.time())
script = data['script']
del data['script']
adata = createData(data)
strData = strPath + '.h5ad'
adata.write(strData)
#with open(strData,'wb') as f:
#pickle.dump(adata,f)
ppr.pprint(len(re.findall(r'```',script)))
if (len(re.findall(r'```',script)) >0):
strScript = strPath + '.Rmd'
with open(strScript,'w') as f:
f.writelines(['---\noutput:\n html_document:\n code_folding: hide\n---\n\n```{r}\nstrPath <- "%s"\n```\n\n'%strPath])
f.write(script)
#ppr.pprint(subprocess.run('which Rscript',capture_output=True,shell=True).stdout.decode('utf-8'))
res = subprocess.run('Rscript -e \'rmarkdown::render("%s", output_file="%s.html")\''%(strScript,strPath),capture_output=True,shell=True)
if (os.path.exists('%s.html'%strPath)):
with open('%s.html'%strPath,'r') as file:
html = file.read()
else:
html = ''
ppr.pprint(res.stdout.decode('utf-8'))
ppr.pprint(res.stderr.decode('utf-8'))
else:
strScript = strPath + '.py'
with open(strScript,'w') as f:
f.writelines(['%load_ext rpy2.ipython\n','from anndata import read_h5ad\n','adata=read_h5ad("%s")\n'%strData, 'strPath="%s"\n\n'%strPath])
#f.writelines(['%load_ext rpy2.ipython\n','import pickle\n','with open("%s","rb") as f:\n'%strData,' adata=pickle.load(f)\n','strPath="%s"\n\n'%strPath])
f.writelines(['%%R\n','strPath="%s"\n\n'%strPath])
f.write(script)
ppr.pprint(subprocess.run('which Rscript',capture_output=True,shell=True).stdout.decode('utf-8'))
ppr.pprint(subprocess.run('which pandoc',capture_output=True,shell=True).stdout.decode('utf-8'))
ppr.pprint(subprocess.run("Rscript -e 'reticulate::py_config()'",capture_output=True,shell=True).stdout.decode('utf-8'))
res = subprocess.run('jupytext --to notebook --output - %s | jupyter nbconvert --ExecutePreprocessor.timeout=1800 --to html --execute --stdin --stdout'%strScript,capture_output=True,shell=True)
html = res.stdout.decode('utf-8')
h,s,e = html.partition('<div class="cell border-box-sizing code_cell rendered">')
h1,s,e = e.partition('<div class="cell border-box-sizing code_cell rendered">') ## remove the first cell
h1,s,e = e.partition('<div class="cell border-box-sizing code_cell rendered">') ## remove the second cell
html = h+s+e
if 'Error' in res.stderr.decode('utf-8'):
html = 'ERROR @server:\nstderr:\n' + res.stderr.decode('utf-8') + '\nstdout:\n' + res.stdout.decode('utf-8')
for f in glob.glob(strPath+"*"):
try:
os.remove(f)
except:
continue
return html
def getDesp(data):
strF = re.sub("h5ad$","txt",data["h5ad"])
if not os.path.isfile(strF):
return ""
txt = ""
with open(strF,'r') as fp:
for line in fp:
txt = "%s<br>%s"%(txt,line)
return txt
def getPreDEGname(data):
strF = re.sub("h5ad$","db",data["h5ad"])
if not os.path.isfile(strF):
#ppr.pprint(strF+" is NOT found!")
return ""
conn = sqlite3.connect(strF)
df = pd.read_sql_query("select DISTINCT contrast,tags from DEG;", conn)
conn.close()
return json.dumps(list(df['contrast']+"::"+df['tags']))
def getPreDEGvolcano(data):
strF = re.sub("h5ad$","db",data["h5ad"])
comGrp = data["compSel"].split("::")
conn = sqlite3.connect(strF)
df = pd.read_sql_query("select gene,log2fc,pval,qval from DEG where contrast=? and tags=?;", conn,params=comGrp)
conn.close()
deg = df.sort_values(by=['qval'])
data["comGrp"] = comGrp[0].split(".vs.")
## plot in R
strF = ('%s/DEG%f.csv' % (data["CLItmp"],time.time()))
deg.to_csv(strF,index=False)
#ppr.pprint([strExePath+'/volcano.R',strF,';'.join(genes),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),str(data['logFC']),data['comGrp'][1],data['comGrp'][0]])
res = subprocess.run([strExePath+'/volcano.R',strF,';'.join(data['genes']),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),str(data['logFC']),data['comGrp'][1],data['comGrp'][0],str(data['sigFDR']),str(data['sigFC']),data['Rlib']],capture_output=True)#
img = res.stdout.decode('utf-8')
os.remove(strF)
if 'Error' in res.stderr.decode('utf-8'):
raise SyntaxError("in R: "+res.stderr.decode('utf-8'))
#####
gInfo = getVar(data)
deg.index = deg['gene']
deg = pd.concat([deg,gInfo],axis=1,join='inner',sort=False)
#return deg.to_csv()
if not data['topN']=='All':
deg = deg.iloc[range(min(deg.shape[0],int(data['topN']))),]
#deg.loc[:,'log2fc'] = deg.loc[:,'log2fc'].apply(lambda x: '%.2f'%x)
#deg.loc[:,'pval'] = deg.loc[:,'pval'].apply(lambda x: '%.4E'%x)
#deg.loc[:,'qval'] = deg.loc[:,'qval'].apply(lambda x: '%.4E'%x)
return json.dumps([deg.to_csv(index=False),img])#json.dumps([deg.values.tolist(),img])
def getPreDEGbubble(data):
#data={'compSel':['MS.vs.Control::EN.L4','MS.vs.Control::Endo.cells','MS.vs.Control::EN.PYR'],'genes':['RASGEF1B','SLC26A3','UNC5C','AHI1','CD9']}
sql = "select gene,log2fc,pval,qval,contrast || '::' || tags as tag from DEG where tag in ({comp}) and gene in ({gList}) order by case tag {oList} end;".format(
comp=','.join(['?']*len(data['compSel'])),
gList=','.join(['?']*len(data['genes'])),
oList=' '.join(['WHEN ? THEN %d'%i for i in range(len(data['compSel']))]))
strF = re.sub("h5ad$","db",data["h5ad"])
conn = sqlite3.connect(strF)
deg = | pd.read_sql_query(sql,conn,params=data['compSel']+data['genes']+data['compSel']) | pandas.read_sql_query |
# Package import
from __future__ import print_function, division
from warnings import warn
from nilmtk.disaggregate import Disaggregator
import pandas as pd
import numpy as np
from collections import OrderedDict
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from statistics import mean
import os
import time
import argparse
import pickle
import random
import json
from torchsummary import summary
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
import torch.utils.data as tud
from torch.utils.data.dataset import TensorDataset
from torch.utils.tensorboard import SummaryWriter
# Fix the random seed to ensure the reproducibility of the experiment
random_seed = 10
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Use cuda or not
USE_CUDA = torch.cuda.is_available()
DEVICE = 'cuda' if USE_CUDA else 'cpu'
class Encoder(nn.Module):
def __init__(self, power_dis_dim, embed_dim = 128, enc_hid_dim = 128, dec_hid_dim = 256):
super(Encoder, self).__init__()
self.embedding = nn.Embedding(power_dis_dim, embed_dim)
self.rnn = nn.GRU(embed_dim, enc_hid_dim, bidirectional = True, batch_first = True)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.dropout = nn.Dropout(0.5)
self.act = nn.Tanh()
def forward(self, mains):
# mains = [batch_size, 1, mains_len]
# embedded = [batch_size, mains_len, embed_dim]
embedded = self.dropout(self.embedding(mains.squeeze(1)))
# enc_output = [batch_size, mains_len, enc_hid_dim * 2], enc_hidden = [batch_size, 2, enc_hid_dim]
enc_output, enc_hidden = self.rnn(embedded)
# s [batch_size, dec_hid_dim] = enc_hidden [batch_size, 2 * enc_hid_dim] * W [enc_hid_dim * 2, dec_hid_dim]
s = self.act(self.fc(enc_hidden.contiguous().view(mains.size(0), -1)))
return enc_output, s
class Attention(nn.Module):
def __init__(self, enc_hid_dim = 128, dec_hid_dim = 256):
super(Attention, self).__init__()
self.W_hs = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim, bias = False)
self.v = nn.Linear(dec_hid_dim, 1, bias = False)
self.act = nn.Tanh()
def forward(self, s, enc_output):
# s = [batch_size, dec_hid_dim], enc_output = [batch_size, mains_len, enc_hid_dim * 2]
batch_size, mains_len = enc_output.size(0), enc_output.size(1)
# repeat decoder hidden state mains_len times, so s = [batch_size, mains_len, dec_hid_dim]
# print(s.size())
s = s.unsqueeze(1).repeat(1, mains_len, 1)
# E [batch_size, mains_len, dec_hid_dim] = h_s [batch_size, mains_len, dec_hid_dim + enc_hid_dim * 2] * W_hs[dec_hid_dim + enc_hid_dim * 2, dec_hid_dim]
E = self.act(self.W_hs(torch.cat((s, enc_output), dim = 2)))
# attention = [batch_size, mains_len]
attention = self.v(E).squeeze(2)
return F.softmax(attention, dim = 1)
class Decoder(nn.Module):
def __init__(self, power_dis_dim, attention, enc_hid_dim = 128, dec_hid_dim = 256):
super(Decoder, self).__init__()
self.power_dis_dim = power_dis_dim
self.attention = attention
self.rnn = nn.GRU(enc_hid_dim * 2, dec_hid_dim, batch_first = True)
self.fc_out = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, power_dis_dim)
self.dropout = nn.Dropout(0.5)
def forward(self, enc_output, s):
# enc_output = [batch_size, mains_len, enc_hid_dim * 2], s = [batch_size, dec_hid_dim]
# a = [batch_size, 1, mains_len]
a = self.attention(s, enc_output).unsqueeze(1)
# c = [batch_size, 1, enc_hid_dim * 2]
c = torch.bmm(a, enc_output)
# dec_output = [batch_size, 1, dec_hid_dim] = dec_hidden = [batch_size, 1, dec_hid_dim]
dec_output, dec_hidden = self.rnn(c, s.unsqueeze(0))
# dec_output = [batch_size, dec_hid_dim], c = [batch_size, enc_hid_dim * 2]
dec_output, c = dec_output.squeeze(1), c.squeeze(1)
# pred = [batch_size, power_dis_dim]
pred = self.fc_out(torch.cat((dec_output, c),dim = 1))
return pred, dec_hidden.squeeze(0)
def initialize(layer):
if isinstance(layer, nn.LSTM):
# Xavier_uniform will be applied to W_{ih}, Orthogonal will be applied to W_{hh}, to be consistent with Keras and Tensorflow
torch.nn.init.xavier_uniform_(layer.weight_ih_l0.data)
torch.nn.init.orthogonal_(layer.weight_hh_l0.data)
torch.nn.init.constant_(layer.bias_ih_l0.data, val = 0.0)
torch.nn.init.constant_(layer.bias_hh_l0.data, val = 0.0)
elif isinstance(layer, nn.Linear):
# Xavier_uniform will be applied to conv1d and dense layer, to be consistent with Keras and Tensorflow
torch.nn.init.xavier_uniform_(layer.weight.data)
if layer.bias is not None:
torch.nn.init.constant_(layer.bias.data, val = 0.0)
class Seq2Seq_Pytorch(nn.Module):
def __init__(self, encoder, decoder, device = DEVICE):
# Refer to "<NAME>, <NAME>, <NAME>, et al. Nonintrusive Load Monitoring based on Sequence-to-sequence Model With Attention Mechanism[J]. Proceedings of the CSEE".
super(Seq2Seq_Pytorch, self).__init__()
self.encoder = encoder
self.encoder.apply(initialize)
self.decoder = decoder
self.decoder.apply(initialize)
self.device = device
def forward(self, mains):
# mains = [batch_size, 1 ,mains_len], appliance = [batch_size, 1, app_len]
batch_size, app_len = mains.size(0), mains.size(2)
# Notice that decoder.output_dim = encoder.input_dim
app_power_dim = self.decoder.power_dis_dim
# tensor to store decoder outputs
outputs = torch.zeros(batch_size, app_len, app_power_dim).to(self.device)
enc_output, s = self.encoder(mains)
# For-loop
for t in range(app_len):
# receive output tensor (predictions) and new hidden state, and place predictions in outputs
dec_output, s = self.decoder(enc_output, s)
outputs[:,t,:] = dec_output
return outputs
def train(appliance_name, model, sequence_length, mains, appliance, epochs, batch_size, pretrain = False, checkpoint_interval = None, train_patience = 3):
# Model configuration
if USE_CUDA:
model = model.cuda()
if not pretrain:
model.apply(initialize)
# summary(model, (1, mains.shape[1]),dtypes = torch.long)
# split the train and validation set
train_mains,valid_mains,train_appliance,valid_appliance = train_test_split(mains, appliance, test_size=.2, random_state = random_seed)
# Create optimizer, loss function, and dataload
optimizer = torch.optim.Adam(model.parameters(), lr = 1e-3)
loss_fn = torch.nn.CrossEntropyLoss()
train_dataset = TensorDataset(torch.from_numpy(train_mains).long().permute(0,2,1), torch.from_numpy(train_appliance).float().permute(0,2,1))
valid_dataset = TensorDataset(torch.from_numpy(valid_mains).long().permute(0,2,1), torch.from_numpy(valid_appliance).float().permute(0,2,1))
train_loader = tud.DataLoader(train_dataset, batch_size = batch_size, shuffle = True, num_workers = 0, drop_last = True)
valid_loader = tud.DataLoader(valid_dataset, batch_size = batch_size, shuffle = True, num_workers = 0, drop_last = True)
writer = SummaryWriter(comment='train_visual')
patience, best_loss = 0, None
for epoch in range(epochs):
# Earlystopping
if(patience == train_patience):
print("val_loss did not improve after {} Epochs, thus Earlystopping is calling".format(train_patience))
break
# Train the model
st = time.time()
model.train()
for i, (batch_mains, batch_appliance) in enumerate(train_loader):
if USE_CUDA:
batch_mains = batch_mains.cuda()
batch_appliance = batch_appliance.cuda()
batch_pred = model(batch_mains)
loss = loss_fn(batch_pred.view(batch_size * sequence_length, -1), batch_appliance.view(-1).long())
model.zero_grad()
loss.backward()
optimizer.step()
ed = time.time()
# Evaluate the model
model.eval()
with torch.no_grad():
cnt, loss_sum = 0, 0
for i, (batch_mains, batch_appliance) in enumerate(valid_loader):
if USE_CUDA:
batch_mains = batch_mains.cuda()
batch_appliance = batch_appliance.cuda()
batch_pred = model(batch_mains)
loss = loss_fn(batch_pred.view(batch_size * sequence_length, -1), batch_appliance.view(-1).long())
loss_sum += loss
cnt += 1
final_loss = loss_sum / cnt
# Save best only
if best_loss is None or final_loss < best_loss:
best_loss = final_loss
patience = 0
net_state_dict = model.state_dict()
path_state_dict = "./"+appliance_name+"_seq2seq_best_state_dict.pt"
torch.save(net_state_dict, path_state_dict)
else:
patience = patience + 1
print("Epoch: {}, Valid_Loss: {}, Time consumption: {}.".format(epoch, final_loss, ed - st))
# For the visualization of training process
for name,param in model.named_parameters():
writer.add_histogram(name + '_grad', param.grad, epoch)
writer.add_histogram(name + '_data', param, epoch)
writer.add_scalars("MSELoss", {"Valid":final_loss}, epoch)
# Save checkpoint
if (checkpoint_interval != None) and ((epoch + 1) % checkpoint_interval == 0):
checkpoint = {"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"epoch": epoch}
path_checkpoint = "./"+appliance_name+"_seq2seq_checkpoint_{}_epoch.pt".format(epoch)
torch.save(checkpoint, path_checkpoint)
def test(model, test_mains, batch_size = 512):
# Model test
st = time.time()
model.eval()
# Create test dataset and dataloader
batch_size = test_mains.shape[0] if batch_size > test_mains.shape[0] else batch_size
test_dataset = TensorDataset(torch.from_numpy(test_mains).float().permute(0,2,1))
test_loader = tud.DataLoader(test_dataset, batch_size = batch_size, shuffle = False, num_workers = 0)
with torch.no_grad():
for i, batch_mains in enumerate(test_loader):
batch_pred = torch.argmax(model(batch_mains[0].long()).cpu(), dim = -1)
if i == 0:
res = batch_pred
else:
res = torch.cat((res, batch_pred), dim = 0)
ed = time.time()
print("Inference Time consumption: {}.".format(ed - st))
return res.numpy()
class Seq2Seq(Disaggregator):
def __init__(self, params):
self.MODEL_NAME = "Seq2Seq"
self.sequence_length = params.get('sequence_length',63)
self.n_epochs = params.get('n_epochs', 10)
self.batch_size = params.get('batch_size',512)
self.appliance_params = params.get('appliance_params',{})
self.mains_max = params.get('mains_max', 10000)
self.models = OrderedDict()
def partial_fit(self, train_main, train_appliances, pretrain = False, do_preprocessing=True,**load_kwargs):
# To preprocess the data and bring it to a valid shape
if do_preprocessing:
print ("Doing Preprocessing")
train_main, train_appliances, power_dis_dim = self.call_preprocessing(train_main, train_appliances,'train')
train_main = pd.concat(train_main, axis = 0).values
train_main = train_main.reshape((-1, self.sequence_length, 1))
new_train_appliances = []
for app_name, app_df in train_appliances:
app_df = pd.concat(app_df, axis=0).values
app_df = app_df.reshape((-1, self.sequence_length, 1))
new_train_appliances.append((app_name, app_df))
train_appliances = new_train_appliances
for appliance_name, power in train_appliances:
if appliance_name not in self.models:
print ("First model training for ",appliance_name)
encoder = Encoder(power_dis_dim)
attention = Attention()
decoder = Decoder(power_dis_dim, attention)
self.models[appliance_name] = Seq2Seq_Pytorch(encoder, decoder)
# Load pretrain dict or not
if pretrain is True:
self.models[appliance_name].load_state_dict(torch.load("./"+appliance_name+"_seq2seq_pre_state_dict.pt"))
model = self.models[appliance_name]
train(appliance_name,model, self.sequence_length, train_main, power, self.n_epochs, self.batch_size, pretrain = pretrain, checkpoint_interval = 3)
# Model test will be based on the best model
self.models[appliance_name].load_state_dict(torch.load("./"+appliance_name+"_seq2seq_best_state_dict.pt"))
def disaggregate_chunk(self, test_main_list, do_preprocessing = True):
# Disaggregate (test process)
if do_preprocessing:
test_main_list = self.call_preprocessing(test_main_list, submeters_lst = None, method='test')
test_predictions = []
for test_main in test_main_list:
test_main = test_main.values.reshape((-1, self.sequence_length, 1))
disggregation_dict = {}
for appliance in self.models:
# Move the model to cpu, and then test it
model = self.models[appliance].to('cpu')
prediction = test(model, test_main)
prediction = self.continuous_output(prediction)
valid_predictions = prediction.flatten()
series = pd.Series(valid_predictions)
disggregation_dict[appliance] = series
results = pd.DataFrame(disggregation_dict,dtype = 'float32')
test_predictions.append(results)
return test_predictions
def call_preprocessing(self, mains_lst, submeters_lst, method):
# Seq2Seq Version
sequence_length = self.sequence_length
if method=='train':
# Preprocess the main and appliance data, the parameter 'overlapping' will be set 'True'
processed_mains = []
for mains in mains_lst:
# Notice that we will not use z-score method to normalize the data, since the seq2seq requires us to convert continuous power reading into discrete label
mains = self.discrete_data(mains.values, sequence_length, True)
processed_mains.append(pd.DataFrame(mains))
tuples_of_appliances = []
for (appliance_name,app_df_list) in submeters_lst:
processed_app_dfs = []
for app_df in app_df_list:
data = self.discrete_data(app_df.values, sequence_length, True)
processed_app_dfs.append(pd.DataFrame(data))
tuples_of_appliances.append((appliance_name, processed_app_dfs))
return processed_mains, tuples_of_appliances, int((self.mains_max + 9) / 10) + 1
if method=='test':
# Preprocess the main data only, the parameter 'overlapping' will be set 'False'
processed_mains = []
for mains in mains_lst:
mains = self.discrete_data(mains.values, sequence_length, False)
processed_mains.append( | pd.DataFrame(mains) | pandas.DataFrame |
import csv
import pandas as pd
import random
import numpy as np
from sklearn.decomposition import PCA
from sklearn import svm
#from sklearn.neural_network import MLPClassifier
#from sklearn import tree
from sklearn.metrics import accuracy_score
df= | pd.read_csv('C:\\Users\\Admin\\Desktop\\BE Proj\\HighFrequency.txt') | pandas.read_csv |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import Optional, Union
from flask_babel import gettext as _
from pandas import DataFrame
from superset.exceptions import InvalidPostProcessingError
from superset.utils.core import DTTM_ALIAS
from superset.utils.pandas_postprocessing.utils import PROPHET_TIME_GRAIN_MAP
def _prophet_parse_seasonality(
input_value: Optional[Union[bool, int]]
) -> Union[bool, str, int]:
if input_value is None:
return "auto"
if isinstance(input_value, bool):
return input_value
try:
return int(input_value)
except ValueError:
return input_value
def _prophet_fit_and_predict( # pylint: disable=too-many-arguments
df: DataFrame,
confidence_interval: float,
yearly_seasonality: Union[bool, str, int],
weekly_seasonality: Union[bool, str, int],
daily_seasonality: Union[bool, str, int],
periods: int,
freq: str,
) -> DataFrame:
"""
Fit a prophet model and return a DataFrame with predicted results.
"""
try:
# pylint: disable=import-error,import-outside-toplevel
from prophet import Prophet
prophet_logger = logging.getLogger("prophet.plot")
prophet_logger.setLevel(logging.CRITICAL)
prophet_logger.setLevel(logging.NOTSET)
except ModuleNotFoundError as ex:
raise InvalidPostProcessingError(_("`prophet` package not installed")) from ex
model = Prophet(
interval_width=confidence_interval,
yearly_seasonality=yearly_seasonality,
weekly_seasonality=weekly_seasonality,
daily_seasonality=daily_seasonality,
)
if df["ds"].dt.tz:
df["ds"] = df["ds"].dt.tz_convert(None)
model.fit(df)
future = model.make_future_dataframe(periods=periods, freq=freq)
forecast = model.predict(future)[["ds", "yhat", "yhat_lower", "yhat_upper"]]
return forecast.join(df.set_index("ds"), on="ds").set_index(["ds"])
def prophet( # pylint: disable=too-many-arguments
df: DataFrame,
time_grain: str,
periods: int,
confidence_interval: float,
yearly_seasonality: Optional[Union[bool, int]] = None,
weekly_seasonality: Optional[Union[bool, int]] = None,
daily_seasonality: Optional[Union[bool, int]] = None,
index: Optional[str] = None,
) -> DataFrame:
"""
Add forecasts to each series in a timeseries dataframe, along with confidence
intervals for the prediction. For each series, the operation creates three
new columns with the column name suffixed with the following values:
- `__yhat`: the forecast for the given date
- `__yhat_lower`: the lower bound of the forecast for the given date
- `__yhat_upper`: the upper bound of the forecast for the given date
:param df: DataFrame containing all-numeric data (temporal column ignored)
:param time_grain: Time grain used to specify time period increments in prediction
:param periods: Time periods (in units of `time_grain`) to predict into the future
:param confidence_interval: Width of predicted confidence interval
:param yearly_seasonality: Should yearly seasonality be applied.
An integer value will specify Fourier order of seasonality.
:param weekly_seasonality: Should weekly seasonality be applied.
An integer value will specify Fourier order of seasonality, `None` will
automatically detect seasonality.
:param daily_seasonality: Should daily seasonality be applied.
An integer value will specify Fourier order of seasonality, `None` will
automatically detect seasonality.
:param index: the name of the column containing the x-axis data
:return: DataFrame with contributions, with temporal column at beginning if present
"""
index = index or DTTM_ALIAS
# validate inputs
if not time_grain:
raise InvalidPostProcessingError(_("Time grain missing"))
if time_grain not in PROPHET_TIME_GRAIN_MAP:
raise InvalidPostProcessingError(
_("Unsupported time grain: %(time_grain)s", time_grain=time_grain,)
)
freq = PROPHET_TIME_GRAIN_MAP[time_grain]
# check type at runtime due to marhsmallow schema not being able to handle
# union types
if not isinstance(periods, int) or periods < 0:
raise InvalidPostProcessingError(_("Periods must be a whole number"))
if not confidence_interval or confidence_interval <= 0 or confidence_interval >= 1:
raise InvalidPostProcessingError(
_("Confidence interval must be between 0 and 1 (exclusive)")
)
if index not in df.columns:
raise InvalidPostProcessingError(_("DataFrame must include temporal column"))
if len(df.columns) < 2:
raise InvalidPostProcessingError(_("DataFrame include at least one series"))
target_df = | DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
@pytest.mark.parametrize("bad_raw", [None, 1, 0])
def test_rolling_apply_invalid_raw(bad_raw):
with pytest.raises(ValueError, match="raw parameter must be `True` or `False`"):
Series(range(3)).rolling(1).apply(len, raw=bad_raw)
def test_rolling_apply_out_of_bounds(engine_and_raw):
# gh-1850
engine, raw = engine_and_raw
vals = Series([1, 2, 3, 4])
result = vals.rolling(10).apply(np.sum, engine=engine, raw=raw)
assert result.isna().all()
result = vals.rolling(10, min_periods=1).apply(np.sum, engine=engine, raw=raw)
expected = Series([1, 3, 6, 10], dtype=float)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("window", [2, "2s"])
def test_rolling_apply_with_pandas_objects(window):
# 5071
df = DataFrame(
{"A": np.random.randn(5), "B": np.random.randint(0, 10, size=5)},
index=date_range("20130101", periods=5, freq="s"),
)
# we have an equal spaced timeseries index
# so simulate removing the first period
def f(x):
if x.index[0] == df.index[0]:
return np.nan
return x.iloc[-1]
result = df.rolling(window).apply(f, raw=False)
expected = df.iloc[2:].reindex_like(df)
tm.assert_frame_equal(result, expected)
with tm.external_error_raised(AttributeError):
df.rolling(window).apply(f, raw=True)
def test_rolling_apply(engine_and_raw):
engine, raw = engine_and_raw
expected = Series([], dtype="float64")
result = expected.rolling(10).apply(lambda x: x.mean(), engine=engine, raw=raw)
tm.assert_series_equal(result, expected)
# gh-8080
s = Series([None, None, None])
result = s.rolling(2, min_periods=0).apply(lambda x: len(x), engine=engine, raw=raw)
expected = Series([1.0, 2.0, 2.0])
tm.assert_series_equal(result, expected)
result = s.rolling(2, min_periods=0).apply(len, engine=engine, raw=raw)
tm.assert_series_equal(result, expected)
def test_all_apply(engine_and_raw):
engine, raw = engine_and_raw
df = (
DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
* 2
)
er = df.rolling(window=1)
r = df.rolling(window="1s")
result = r.apply(lambda x: 1, engine=engine, raw=raw)
expected = er.apply(lambda x: 1, engine=engine, raw=raw)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
from __future__ import division
import pytest
import numpy as np
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat)
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), | Interval(1, 2) | pandas.Interval |
import os
import pandas as pd
import numpy as np
import h5py
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from collections import OrderedDict
test_path = "/Users/marina/Documents/PhD/research/astro_research/data/testing/"
dpath = test_path + "PROCESSED_DATA/"
def prettify(class_name):
if "/" in class_name:
class_name = class_name.replace("/", "")
class_name = class_name.replace(" ", "_")
return class_name
def save_HDF5s(training_folds, val_fold, test_fold, thex_data_path):
"""
Save class data to HDF5
"""
# Save to HDF5 File
hfile = h5py.File(thex_data_path, 'w')
# define & fill groups
for i in range(8):
training = hfile.create_group("folds/1/training/" + str(i + 1))
data = training_folds[i].to_numpy(dtype=np.float32)
dset = training.create_dataset("data", data=data)
val = hfile.create_group("folds/1/training/9")
dset = val.create_dataset("data", data=val_fold.to_numpy(dtype=np.float32))
val = hfile.create_group("folds/1/tests/1")
dset = val.create_dataset("data", data=test_fold.to_numpy(dtype=np.float32))
hfile.close()
def save_CSVs(fold_sets, class_X, class_name, output_dir):
"""
Save class data to CSV
"""
train_indices = []
for i in range(9): # Include validation fold in training
train_indices += fold_sets[i].tolist()
class_train = class_X.iloc[train_indices]
class_test = class_X.iloc[fold_sets[9]]
class_train.to_csv(output_dir + prettify(class_name) + "train.csv", index=False)
class_test.to_csv(output_dir + prettify(class_name) + "test.csv", index=False)
def save_class_data(class_name, X, y, output_dir, scaling=False):
"""
Save the X data of this class as HDF5 file
Returns test fold to be saved separately in joined test file.
"""
label_name = list(y)[0]
class_indices = y.loc[y[label_name].str.contains(class_name)].index
class_X = X.iloc[class_indices]
# Divide data into 10 folds; use 8 as training, 1 as validation, 1 as testing
kf = KFold(n_splits=10, shuffle=True)
fold_sets = []
for remaining_indices, fold_indices in kf.split(class_X):
fold_sets.append(fold_indices)
training_folds = []
for i in range(8):
training_folds.append(class_X.iloc[fold_sets[i]])
val_fold = class_X.iloc[fold_sets[8]]
test_fold = class_X.iloc[fold_sets[9]]
if scaling:
fs = list(val_fold)
scaler = StandardScaler()
class_X = pd.DataFrame(
data=scaler.fit_transform(class_X),
columns=fs)
training_folds = []
for i in range(8):
training_folds.append(class_X.iloc[fold_sets[i]])
val_fold = pd.DataFrame(
data=scaler.transform(val_fold), columns=fs)
test_fold = pd.DataFrame(
data=scaler.transform(test_fold), columns=fs)
# Save to HDF5 File
class_path = output_dir + prettify(class_name) + 'X.hdf5'
save_HDF5s(training_folds, val_fold, test_fold, class_path)
# Also save as CSVs - to test on KDE Model
save_CSVs(fold_sets, class_X, class_name, output_dir)
return test_fold
def save_test_X_y(test_folds, dpath, label="transient_type"):
"""
Using existing folds, combine each class's test fold into one whole test data set.
Save as both an HDF5 and CSV.
"""
full_test_set = pd.concat(test_folds.values())
hfile = h5py.File(dpath + "test_X.hdf5", 'w')
group = hfile.create_group("folds/1/tests/1")
dset = group.create_dataset("data", data=full_test_set.to_numpy(dtype=np.float32))
hfile.close()
# Save as CSV too for KDE model testing
full_test_set.to_csv(dpath + "test_X.csv", index=False)
# Save labels corresponding to test set in CSV.
labels = []
for class_name in test_folds.keys():
class_count = test_folds[class_name].shape[0]
for i in range(class_count):
labels.append(class_name)
label_df = | pd.DataFrame(labels, columns=[label]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
:Author: <NAME>
:Date: 2018. 1. 24.
"""
import numpy as np
import pandas as pd
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import Ridge, LogisticRegression, Lasso
from sklearn.naive_bayes import GaussianNB
from scipy import stats
COLUMN_NAME = 'column_name'
COEFFICIENT_VALUE = 'coefficient_value'
P_VALUE = 'p-value'
def custom_round(number):
return 1 if number >= 0.5 else 0
# noinspection PyUnusedLocal
def get_logistic_regression(x_train, y_train, x_test, alpha=None, summary=False):
"""
:param x_train: (DataFrame) The variables of train set.
:param y_train: (Series) The correct answers of train set.
:param x_test: (DataFrame) The variables of test set.
:param alpha:
:param summary:
:return y_prediction: (Series) The predictions of test set.
"""
model = LogisticRegression()
model.fit(x_train, y_train)
y_prediction = model.predict(X=x_test)
y_prediction = pd.Series(y_prediction).apply(custom_round)
return y_prediction
def get_ridge_regression(x_train, y_train, x_test, alpha, summary=False):
"""
:param x_train: (DataFrame) The variables of train set.
:param y_train: (Series) The correct answers of train set.
:param x_test: (DataFrame) The variables of test set.
:param alpha: Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
:param summary: (bool) If summary is True, print the coefficient values by descent order.
:return y_prediction: (Series) The predictions of test set.
"""
model = Ridge(alpha=alpha)
model.fit(x_train, y_train)
if summary:
# Calculate coefficients and p-values
standard_error = np.sum((model.predict(X=x_train) - y_train) ** 2, axis=0) / float(x_train.shape[0] - x_train.shape[1])
t_statistics = model.coef_ / standard_error
p_values = 2 * (1 - stats.t.cdf(np.abs(t_statistics), y_train.shape[0] - x_train.shape[1]))
model_coef = pd.DataFrame(data=list(zip(x_train.columns, np.abs(model.coef_), p_values)),
columns=[COLUMN_NAME, COEFFICIENT_VALUE, P_VALUE])
model_coef = model_coef.sort_values(by=COEFFICIENT_VALUE, ascending=False)
print(model_coef)
y_prediction = model.predict(X=x_test)
y_prediction = pd.Series(y_prediction).apply(custom_round)
return y_prediction
def get_lasso_regression(x_train, y_train, x_test, alpha, summary=False):
"""
:param x_train: (DataFrame) The variables of train set.
:param y_train: (Series) The correct answers of train set.
:param x_test: (DataFrame) The variables of test set.
:param alpha: Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
:param summary: (bool) If summary is True, print the coefficient values by descent order.
:return y_prediction: (Series) The predictions of test set.
"""
model = Lasso(alpha=alpha)
model.fit(X=x_train, y=y_train)
if summary:
# Calculate coefficients and p-values
standard_error = np.sum((model.predict(X=x_train) - y_train) ** 2, axis=0) / float(x_train.shape[0] - x_train.shape[1])
t_statistics = model.coef_ / standard_error
p_values = 2 * (1 - stats.t.cdf(np.abs(t_statistics), y_train.shape[0] - x_train.shape[1]))
model_coef = pd.DataFrame(data=list(zip(x_train.columns, np.abs(model.coef_), p_values)),
columns=[COLUMN_NAME, COEFFICIENT_VALUE, P_VALUE])
model_coef = model_coef.sort_values(by=COEFFICIENT_VALUE, ascending=False)
print(model_coef)
y_prediction = model.predict(X=x_test)
y_prediction = pd.Series(y_prediction).apply(custom_round)
return y_prediction
# noinspection PyUnusedLocal
def get_linear_discriminant_analysis(x_train, y_train, x_test, alpha=None, summary=False):
"""
:param x_train:
:param y_train:
:param x_test:
:param alpha:
:param summary:
:return y_prediction: (Series) The predictions of test set.
"""
model = LinearDiscriminantAnalysis()
model.fit(x_train, y_train)
y_prediction = model.predict(X=x_test)
y_prediction = pd.Series(y_prediction).apply(custom_round)
return y_prediction
# noinspection PyUnusedLocal
def get_quadratic_discriminant_analysis(x_train, y_train, x_test, alpha=None, summary=False):
"""
:param x_train:
:param y_train:
:param x_test:
:param alpha:
:param summary:
:return y_prediction: (Series) The predictions of test set.
"""
model = QuadraticDiscriminantAnalysis()
model.fit(x_train, y_train)
y_prediction = model.predict(X=x_test)
y_prediction = pd.Series(y_prediction).apply(custom_round)
return y_prediction
# noinspection PyUnusedLocal
def get_naive_bayes(x_train, y_train, x_test, alpha=None, summary=False):
"""
:param x_train:
:param y_train:
:param x_test:
:param alpha:
:param summary:
:return y_prediction: (Series) The predictions of test set.
"""
model = GaussianNB()
model.fit(x_train, y_train)
y_prediction = model.predict(X=x_test)
y_prediction = pd.Series(y_prediction).apply(custom_round)
return y_prediction
# noinspection PyUnusedLocal
def get_random_forest(x_train, y_train, x_test, alpha=None, summary=False):
"""
:param x_train:
:param y_train:
:param x_test:
:param alpha:
:param summary:
:return y_prediction: (Series) The predictions of test set.
"""
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
n_features = len(x_train.columns)
x_train, y_train = make_classification(n_samples=5500, n_features=n_features, n_informative=2, n_redundant=0,
random_state=0, shuffle=False)
model = RandomForestClassifier(max_depth=2, random_state=0)
model.fit(x_train, y_train)
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=2, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=0, verbose=0, warm_start=False)
print(model.feature_importances_)
print(model.predict([[0] * n_features]))
y_prediction = model.predict(X=x_test)
y_prediction = pd.Series(y_prediction).apply(custom_round)
return y_prediction
# The column names of following_count
A_FOLLOWER_COUNT = 'A_following_count'
B_FOLLOWER_COUNT = 'B_following_count'
# noinspection PyUnusedLocal
def get_select_more_follower_count(x_train, y_train, original_x_test, alpha=None, summary=False):
"""
:param x_train:
:param y_train:
:param original_x_test:
:param alpha:
:param summary:
:return y_prediction: (Series) The predictions of test set.
"""
y_prediction = pd.Series(np.where(original_x_test[A_FOLLOWER_COUNT] > original_x_test[B_FOLLOWER_COUNT], 1, 0))
return y_prediction
# An usage example
if __name__ == '__main__':
from data.data_reader import get_training_data
from data.data_combinator import get_full_combinations
alpha = 0.002
x_train, y_train, x_val, y_val = get_training_data(validation=True)
x_train = get_full_combinations(x_train)
original_x_val = x_val.copy()
x_val = get_full_combinations(x_val)
y_val = y_val.reset_index(drop=True)
print('Logistic Regression')
y_prediction = get_logistic_regression(x_train, y_train, x_val)
result = | pd.concat([y_val, y_prediction], axis=1) | pandas.concat |
# test vector generation module
__doc__ = """
Test vector generation block for mProbo. We use three sampling schemes:
- Orthogonal arrays with the strength of two in a OA table;
- LatinHyperCube sampling if proper OA doesn't exist;
- Random sampling.
"""
import numpy as np
import os
from BitVector import BitVector
import copy
from itertools import product, ifilter, ifilterfalse
import pandas as pd
import random
from dave.common.davelogger import DaVELogger
from dave.common.misc import print_section, all_therm, dec2bin, bin2dec, bin2thermdec, flatten_list, assert_file, isNone
from environ import EnvOaTable, EnvFileLoc, EnvTestcfgPort
from port import get_singlebit_name
import oatable
import pyDOE
import dave.mprobo.mchkmsg as mcode
#------------------------------------------------------
class LatinHyperCube(object):
''' Perform Latin Hyper Cube sampling using pyDOE
and scale the generated samples by depth
(i.e. number of levels) to make all integers
- n_var : number of variables
- depth : Depth applied to all variables
- sample : number of samples to be generated
'''
def __call__(self, n_var, depth, sample):
lhs_samples = self._get_lhs(n_var, sample)
return self._scale(lhs_samples, depth)
def _scale(self, vector, depth): # scale vector by depth
return np.ceil(depth*vector)
def _get_lhs(self, n_var, sample): # get samples using LHS
return pyDOE.lhs(n_var, sample)
#------------------------------------------------------
class OrthogonalArray(object):
''' NOT YET IMPLEMENTED for generic OA '''
def __init__(self, logger_id='logger_id'):
self._logger = DaVELogger.get_logger('%s.%s.%s' % (logger_id, __name__, self.__class__.__name__))
#------------------------------------------------------
class OrthogonalArrayTable(OrthogonalArray):
''' Generates orthogonal array samples from pre-defined tables. '''
TABLENAME_FORMAT = 'OA_V%d_L%d_tbl'
def __init__(self, logger_id='logger_id'):
OrthogonalArray.__init__(self, logger_id)
@property
def max_nvar(self): # max # of vars supported by mProbo
return EnvOaTable().max_oa_var
@property
def max_depth(self): # max # oa depth supported by mProbo
''' TODO: somehow self._max_oa_depth returns a string, os int() is used '''
return int(EnvOaTable().max_oa_depth)
@property
def vector(self): # generated vector
return self._vector
@property
def length(self): # length of generated vector
return self._length
@property
def depth(self): # depth of generated vector
return self._depth
def generate(self, n_var, depth):
''' generate OA+random vector for given # of vars, OA depth '''
self._depth = depth
self._vector = self._read_oatable(n_var, depth)
self._length = self._vector.shape[0] if not isNone(self._vector) else 0
self._logger.debug(mcode.DEBUG_019 %(self.depth, self.length))
def test(self, n_var, depth): # test if OA exists for given # of vars, OA depth
return self._read_oatable(n_var, depth)
def get_oatable(self, n_var, depth): # return oa table StringIO if exists
try:
return getattr(oatable, self.TABLENAME_FORMAT %(n_var, depth))
except:
return None
def _read_oatable(self, n_var, depth): # return OA vector if exists
table = self.get_oatable(n_var, depth)
if table:
strio = copy.deepcopy(table) # copy since StringIO is read more than once
vector_array = np.loadtxt(strio, dtype=int)-1 # -1 because table starts from 1
return vector_array.reshape(vector_array.shape[0], n_var)
else:
return None
#------------------------------------------------------
class TestVectorGenerator(object):
def __init__(self, ph, test_cfg, logger_id='logger_id'):
'''
ph: Port Handler class instance
test_cfg: TestConfig class instance
'''
self._logger_id = logger_id
self._logger = DaVELogger.get_logger('%s.%s.%s' % (logger_id, __name__, self.__class__.__name__))
self.option = { # read vector generation options from test config
#'oa_depth': test_cfg.get_option_regression_oa_depth(),
'oa_depth': int(test_cfg.get_option_regression_min_oa_depth()),
'min_oa_depth': int(test_cfg.get_option_regression_min_oa_depth()),
'max_sample': int(test_cfg.get_option_regression_max_sample()),
'en_interact': test_cfg.get_option_regression_en_interact(),
'order': int(test_cfg.get_option_regression_order()) }
map(self._logger.info, print_section(mcode.INFO_036, 2)) # print section header
# all possible linear circuit configurations by DigitalModePort
self._generate_digital_vector(ph.get_digital_input())
# process analog ports
self._ph = ph
if self._ph.get_by_name('dummy_analoginput') != None:
self.option['max_sample'] = 1
self._count_port(ph) # count number of (pinned, unpinned) ports
self._update_analog_grid()
analog_raw_vector = self._generate_analog_raw_vector()
# analog test vectors by scaling raw vector to real range
self._a_vector = self._map_analog_vector(analog_raw_vector)
self._logger.info(mcode.INFO_045 % self.get_analog_vector_length())
def _count_port(self, ph): # separate unpinned, pinned analog ports and count them
self.unpin_analog = ph.get_unpinned(ph.get_pure_analog_input())
self.pin_analog = ph.get_pinned(ph.get_pure_analog_input())
self.unpin_quantized = ph.get_unpinned(ph.get_quantized_analog())
self.pin_quantized = ph.get_pinned(ph.get_quantized_analog())
self.no_unpin_analog = len(self.unpin_analog) + len(self.unpin_quantized)
self.no_pin_analog = len(self.pin_analog) + len(self.pin_quantized)
def _update_analog_grid(self):
''' calculate required analog grid for given max_sample option
max_sample: maximum number of vectors set by user
Na : # of analog+quantized input ports
TODO: Decide whether max_bitw affects analog grid or not
'''
# adjust grid to (self.max_bitw + 1) if that is smaller than 3
self.max_bitw = self._get_max_bitwidth(self.unpin_quantized)
if self.option['oa_depth'] <= self.max_bitw:
self.option.update({'oa_depth': self.max_bitw + 1})
self._logger.info(mcode.INFO_039 % self.option['oa_depth'])
self._logger.info(mcode.INFO_036_1 % self.option['max_sample'])
if isNone(self._ph.get_by_name('dummy_analoginput')):
# Adjust max_sample to 2x no of all the linear terms
#max_sample_internal = self.get_unit_no_testvector() + min(self.get_unit_no_testvector(), 2*self.get_unit_no_testvector_otf())
max_sample_internal = max(8,2*self.get_unit_no_testvector())
if self.option['max_sample'] < max_sample_internal:
self.option['max_sample'] = max_sample_internal
self._logger.info(mcode.INFO_036_1_1 % max_sample_internal)
Na = self.no_unpin_analog
Ng = self.option['oa_depth']
oa = OrthogonalArrayTable(self._logger_id)
if not isNone(oa.get_oatable(Na, Ng)): # caculate # of grid, if oa exists
max_sample = self.option['max_sample']
oa_vec0 = oa.test(Na, Ng)
if len(oa_vec0) <= max_sample:
max_depth = oa.max_depth+1 if Na > 1 else 100
for i in range (self.option['oa_depth'], max_depth):
oa_vec = oa.test(Na, i)
if isNone(oa_vec):
Ng = i - 1
break
elif len(oa_vec) >= max_sample:
Ng = i
break
#Ng = i
oa_vec0 = oa.test(Na, Ng)
if len(oa_vec0) > max_sample:
self.option['max_sample'] = len(oa_vec0)
else:
self.option['max_sample'] = len(oa_vec0)
self.option['oa_depth'] = Ng
self._logger.info(mcode.INFO_036_2 % self.option['max_sample'])
self._logger.info(mcode.INFO_036_3 % self.option['oa_depth'])
self._logger.info(mcode.INFO_036_4 % (Ng, len(oa.test(Na, Ng))))
def get_unit_no_testvector_otf(self): # unit number of test vectors for on-the-fly check
n = len(self.unpin_analog)*self.option['order']
for p in self.unpin_quantized:
n += p.bit_width
return max(4,n)
def get_unit_no_testvector(self): # N+1 where N is the number of linear terms
n = len(self.unpin_analog) # number of analog inputs (exclude quantized analog)
nh = n*(self.option['order']-1) # number of higher-order terms for analog inputs
nqa = 0
nqa_int = 0
for p in self.unpin_quantized:
nqa += p.bit_width # total bit width of quantized analog
if len(self.unpin_quantized) > 1: # if # of quantized analog inputs > 1
nqa_int = 1
for p in self.unpin_quantized:
nqa_int *= p.bit_width # interaction between quantized analog bits
n_tot = 1 + n + nh + nqa # linear terms
if self.option['en_interact']: # take into account for interaction terms
return n_tot + n*(n-1)/2 + nqa*n + nqa_int # linear terms + 1st interaction terms
else:
return n_tot
def dump_test_vector(self, ph, workdir): # dump generated test vectors to a csv file
csv_d = os.path.join(workdir, EnvFileLoc().csv_vector_prefix+'_digital.csv') # for digital
csv_a = os.path.join(workdir, EnvFileLoc().csv_vector_prefix+'_analog.csv') # for analog
d_vector = dict([ (k, self.conv_tobin(ph, k, v)) for k, v in self._d_vector.items() ])
a_vector = dict([ (k, self.conv_tobin(ph, k, v)) for k, v in self._a_vector.items() ])
df_d = | pd.DataFrame(d_vector) | pandas.DataFrame |
# coding: utf-8
# In[1]:
import sys
sys.path.append("../")
# In[2]:
get_ipython().run_line_magic('load_ext', 'watermark')
get_ipython().run_line_magic('watermark', '-p torch,pandas,numpy -m')
# In[3]:
from pathlib import Path
import itertools
from collections import Counter
from functools import partial, reduce
import joblib
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
from fastai.text import (
TextDataset, SortishSampler, SortSampler, DataLoader, ModelData, get_rnn_classifier, seq2seq_reg,
RNN_Learner, TextModel, to_gpu, LanguageModelLoader, LanguageModelData
)
from fastai.core import T
from fastai.rnn_reg import EmbeddingDropout
from fastai.text import accuracy
from torch.optim import Adam
import torch.nn as nn
import torch
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook
import sentencepiece as spm
get_ipython().run_line_magic('matplotlib', 'inline')
# In[4]:
path = Path("../data/cache/lm_unigram_douban/")
path.mkdir(parents=True, exist_ok=True)
# In[5]:
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# ## Import And Tokenize Comments and Ratings
# In[6]:
df_ratings = pd.read_csv("../data/ratings_word.csv")
df_ratings.head()
# In[7]:
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.4, random_state=888)
train_idx, test_idx = next(sss.split(df_ratings, df_ratings.rating))
df_train = df_ratings.iloc[train_idx].copy()
df_test = df_ratings.iloc[test_idx].copy()
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=888)
val_idx, test_idx = next(sss.split(df_test, df_test.rating))
df_val = df_test.iloc[val_idx].copy()
df_test = df_test.iloc[test_idx].copy()
del df_ratings
# In[8]:
UNK = 0
BEG = 1
EMB_DIM = 300
# ### Use the Unigram Vocabulary from the Wiki model
# In[9]:
sp = spm.SentencePieceProcessor()
sp.Load("../data/unigram_model.model")
# #### Tokenize
# In[11]:
results = []
tokens_train, tokens_val, tokens_test = [], [], []
for df, tokens in zip((df_train, df_val, df_test), (tokens_train, tokens_val, tokens_test)) :
for i, row in tqdm_notebook(df.iterrows(), total=df.shape[0]):
tokens.append(np.array([BEG] + sp.EncodeAsIds(row["comment"])))
# In[12]:
assert len(tokens_train) == df_train.shape[0]
# In[14]:
tokens_train[0]
# #### Embedding
# We can keep using the original embedding matrix, but the row corresponding to the BEG token must be zeroed.
# In[17]:
MODEL_PATH = "../data/cache/lm_unigram/models/lm_lstm.h5"
weights = torch.load(MODEL_PATH, map_location=lambda storage, loc: storage)
assert weights['0.encoder.weight'].shape[1] == EMB_DIM
weights['0.encoder.weight'].shape
# In[18]:
weights['0.encoder.weight'][BEG, :] = 0
weights['0.encoder_with_dropout.embed.weight'][BEG, :] = 0
weights['1.decoder.weight'][BEG, :] = 0
# In[22]:
n_toks = weights['0.encoder.weight'].shape[0]
# ### Use the Refitted Vocabulary
# #### Investigate Vocabulary Differences
# In[9]:
itos_orig = []
with open("../data/unigram_model.vocab", mode="r", encoding="utf-8") as f:
for line in f.readlines():
itos_orig.append(line.split("\t")[0])
itos = []
with open("../data/rating_unigram_model.vocab", mode="r", encoding="utf-8") as f:
for line in f.readlines():
itos.append(line.split("\t")[0])
n_toks = len(itos)
n_toks
# In[10]:
itos[:5]
# In[11]:
mapping = {s: idx for idx, s in enumerate(itos)}
mapping_orig = {s: idx for idx, s in enumerate(itos_orig)}
# In[12]:
voc_diff = set(itos) - set(itos_orig)
print(len(voc_diff), len(itos))
sorted([(x, mapping[x]) for x in list(voc_diff)], key=lambda x: x[1], reverse=True)[:50]
# #### Tokenize
# In[13]:
sp = spm.SentencePieceProcessor()
sp.Load("../data/rating_unigram_model.model")
# In[14]:
results = []
tokens_train, tokens_val, tokens_test = [], [], []
for df, tokens in zip((df_train, df_val, df_test), (tokens_train, tokens_val, tokens_test)) :
for i, row in tqdm_notebook(df.iterrows(), total=df.shape[0]):
tokens.append(np.array([BEG] + sp.EncodeAsIds(row["comment"])))
assert len(tokens_train) == df_train.shape[0]
# In[15]:
tokens_val[0]
# In[16]:
df_val.iloc[0]
# #### Prepare the embedding matrix
# In[17]:
MODEL_PATH = "../data/cache/lm_unigram/models/lm_lstm.h5"
weights = torch.load(MODEL_PATH, map_location=lambda storage, loc: storage)
assert weights['0.encoder.weight'].shape[1] == EMB_DIM
weights['0.encoder.weight'].shape
# In[18]:
new_matrix = np.zeros((n_toks, EMB_DIM))
hits = 0
for i, w in enumerate(itos):
if w in mapping_orig:
new_matrix[i] = weights['0.encoder.weight'][mapping_orig[w]]
hits += 1
new_matrix[BEG, :] = 0
hits, hits *100 / len(itos[3:])
# In[19]:
weights['0.encoder.weight'] = T(new_matrix)
weights['0.encoder_with_dropout.embed.weight'] = T(np.copy(new_matrix))
weights['1.decoder.weight'] = T(np.copy(new_matrix))
# ## Languange Model
# In[20]:
bs = 64
bptt = 50
trn_dl = LanguageModelLoader(np.concatenate(tokens_train), bs, bptt)
val_dl = LanguageModelLoader(np.concatenate(tokens_val), bs, bptt)
# In[21]:
np.max(np.array(list(itertools.chain.from_iterable(tokens_train))))
# In[23]:
model_data = LanguageModelData(path, 2, n_toks, trn_dl, val_dl, bs=bs, bptt=bptt)
# In[24]:
drops = np.array([0.25, 0.1, 0.2, 0.02, 0.15])*0.7
opt_fn = partial(torch.optim.Adam, betas=(0.8, 0.99))
# In[25]:
learner = model_data.get_model(opt_fn, EMB_DIM, 500, 3,
dropouti=drops[0], dropout=drops[1], wdrop=drops[2], dropoute=drops[3], dropouth=drops[4])
learner.metrics = [accuracy]
learner.freeze_to(-1)
# In[26]:
learner.model.load_state_dict(weights)
# In[27]:
lr=1e-3
lrs = lr
learner.fit(lrs/2, 1, wds=1e-7, use_clr=(32,2), cycle_len=1)
# In[28]:
learner.save('lm_last_ft')
# In[29]:
learner.unfreeze()
learner.clip = 25
learner.lr_find(start_lr=lrs/10, end_lr=lrs*10, linear=True)
# In[30]:
learner.sched.plot()
# In[31]:
lr = 3e-3
lrs = lr
learner.fit(lrs, 1, wds=1e-7, use_clr=(20,5), cycle_len=10)
# In[34]:
learner.save_encoder("lm1_enc")
# In[35]:
learner.save("lm1")
# In[36]:
del learner
# ## 3-class Classifier
# As in https://zhuanlan.zhihu.com/p/27198713
# ### Full Dataset (v1)
# In[37]:
for df in (df_train, df_val, df_test):
df["label"] = (df["rating"] >= 3) * 1
df.loc[df.rating == 3, "label"] = 1
df.loc[df.rating > 3, "label"] = 2
# In[38]:
df_train.label.value_counts()
# In[39]:
bs = 64
trn_ds = TextDataset(tokens_train, df_train.label.values)
val_ds = TextDataset(tokens_val, df_val.label.values)
trn_samp = SortishSampler(tokens_train, key=lambda x: len(tokens_train[x]), bs=bs//2)
val_samp = SortSampler(tokens_val, key=lambda x: len(tokens_val[x]))
trn_dl = DataLoader(trn_ds, bs//2, transpose=True, num_workers=1, pad_idx=0, sampler=trn_samp)
val_dl = DataLoader(val_ds, bs, transpose=True, num_workers=1, pad_idx=0, sampler=val_samp)
model_data = ModelData(path, trn_dl, val_dl)
# In[40]:
dps = np.array([0.4,0.5,0.05,0.3,0.4]) * 0.5
opt_fn = partial(torch.optim.Adam, betas=(0.7, 0.99))
bptt = 50
# In[41]:
model = get_rnn_classifier(bptt, bptt*2, 3, n_toks, emb_sz=EMB_DIM, n_hid=500, n_layers=3, pad_token=2,
layers=[EMB_DIM*3, 50, 3], drops=[dps[4], 0.1],
dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3])
# In[42]:
learn = RNN_Learner(model_data, TextModel(to_gpu(model)), opt_fn=opt_fn)
learn.reg_fn = partial(seq2seq_reg, alpha=2, beta=1)
learn.clip=25.
learn.metrics = [accuracy]
learn.load_encoder('lm1_enc')
# In[43]:
learn.freeze_to(-1)
learn.lr_find(lrs/1000)
learn.sched.plot()
# In[44]:
lr=2e-4
lrm = 2.6
lrs = np.array([lr/(lrm**4), lr/(lrm**3), lr/(lrm**2), lr/lrm, lr])
learn.fit(lrs, 1, wds=0, cycle_len=1, use_clr=(8,3))
# In[45]:
learn.save('clas_0')
# In[46]:
learn.freeze_to(-2)
learn.fit(lrs, 1, wds=0, cycle_len=1, use_clr=(8,3))
# In[47]:
learn.save('clas_1')
# In[48]:
learn.unfreeze()
learn.fit(lrs, 1, wds=0, cycle_len=14, use_clr=(32,10))
# In[49]:
learn.save("clas_full")
# #### Evaluate
# In[50]:
learn.load("clas_full")
learn.model.reset()
_ = learn.model.eval()
# In[51]:
learn.model.eval()
preds, ys = [], []
for x, y in tqdm_notebook(val_dl):
preds.append(np.argmax(learn.model(x)[0].cpu().data.numpy(), axis=1))
ys.append(y.cpu().numpy())
# In[52]:
preds = np.concatenate(preds)
ys = np.concatenate(ys)
preds.shape, ys.shape
# In[53]:
| pd.Series(ys) | pandas.Series |
######################################################################
## DeepBiome
## - Main code
##
## July 10. 2019
## Youngwon (<EMAIL>)
##
## Reference
## - Keras (https://github.com/keras-team/keras)
######################################################################
import os
import sys
import json
import time
import numpy as np
import pandas as pd
import gc
import warnings
warnings.filterwarnings("ignore")
import logging
from sklearn.model_selection import KFold
from . import logging_daily
from . import configuration
from . import loss_and_metric
from . import readers
from . import build_network
from .utils import file_path_fold, argv_parse, taxa_selection_accuracy
import keras.backend as k
import tensorflow as tf
import copy
from ete3 import Tree, faces, AttrFace, TreeStyle, NodeStyle, CircleFace, TextFace, RectFace
import matplotlib.colors as mcolors
pd.set_option('display.float_format', lambda x: '%.03f' % x)
np.set_printoptions(formatter={'float_kind':lambda x: '%.03f' % x})
def deepbiome_train(log, network_info, path_info, number_of_fold=None,
tree_level_list = ['Genus', 'Family', 'Order', 'Class', 'Phylum'],
max_queue_size=10, workers=1, use_multiprocessing=False,
verbose=True):
"""
Function for training the deep neural network with phylogenetic tree weight regularizer.
It uses microbiome abundance data as input and uses the phylogenetic taxonomy to guide the decision of the optimal number of layers and neurons in the deep learning architecture.
Parameters
----------
log (logging instance) :
python logging instance for logging
network_info (dictionary) :
python dictionary with network_information
path_info (dictionary):
python dictionary with path_information
number_of_fold (int):
default=None
tree_level_list (list):
name of each level of the given reference tree weights
default=['Genus', 'Family', 'Order', 'Class', 'Phylum']
max_queue_size (int):
default=10
workers (int):
default=1
use_multiprocessing (boolean):
default=False
verbose (boolean):
show the log if True
default=True
Returns
-------
test_evaluation (numpy array):
numpy array of the evaluation using testset from all fold
train_evaluation (numpy array):
numpy array of the evaluation using training from all fold
network (deepbiome network instance):
deepbiome class instance
Examples
--------
Training the deep neural network with phylogenetic tree weight regularizer.
test_evaluation, train_evaluation, network = deepbiome_train(log, network_info, path_info)
"""
if tf.__version__.startswith('2'):
gpus = tf.config.experimental.get_visible_devices(device_type='GPU')
try: tf.config.experimental.set_memory_growth(gpus, True)
except: pass
else:
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
### Argument #########################################################################################
model_save_dir = path_info['model_info']['model_dir']
model_path = os.path.join(model_save_dir, path_info['model_info']['weight'])
try:
hist_path = os.path.join(model_save_dir, path_info['model_info']['history'])
is_save_hist = True
except:
is_save_hist = False
try:
warm_start = network_info['training_info']['warm_start'] == 'True'
warm_start_model = network_info['training_info']['warm_start_model']
except:
warm_start = False
# try: save_frequency=int(network_info['training_info']['save_frequency'])
# except: save_frequency=None
### Reader ###########################################################################################
if verbose: log.info('-----------------------------------------------------------------')
reader_class = getattr(readers, network_info['model_info']['reader_class'].strip())
# TODO: fix path_info
reader = reader_class(log, path_info, verbose=verbose)
data_path = path_info['data_info']['data_path']
y_path = '%s/%s'%(data_path, path_info['data_info']['y_path'])
############################################
# Set the cross-validation
try:
idxs = np.array(pd.read_csv(path_info['data_info']['idx_path'])-1, dtype=np.int)
if number_of_fold == None:
number_of_fold = idxs.shape[1]
except:
nsample = pd.read_csv(y_path).shape[0]
if number_of_fold == None:
number_of_fold = nsample
kf = KFold(n_splits=number_of_fold, shuffle=True, random_state=12)
cv_gen = kf.split(range(nsample))
idxs = np.array([train_idx for train_idx, test_idx in cv_gen]).T
############################################
try:
count_path = path_info['data_info']['count_path']
x_list = np.array( | pd.read_csv(path_info['data_info']['count_list_path'], header=None) | pandas.read_csv |
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.preprocessing as preprocessing
from sklearn import linear_model
from sklearn import model_selection
from sklearn.ensemble import RandomForestRegressor
print(os.getcwd())
# data_path = r'C:\Users\ArseneLupin\Desktop\OrderType.csv'
data_path = os.getcwd() + r'\dataset\train.csv'
data_train = pd.read_csv(data_path)
data_train.shape
# 使用 len 和 df
for i in range(len(data_train)):
for j in range(12):
cur_data = data_train.loc[i][j]
print(cur_data)
# 使用 .iteriterms()
for i, series in data_train.iteritems():
# print(i, ":", type(series))
# print(i + ' : ' + series)
print(series)
print(data_train.head())
# data.select_dtypes()
data_train.info()
# show the data
fig = plt.figure()
plt.subplot2grid((2, 3), (0, 0))
# the Survived is the y
data_train.Survived.value_counts().plot(kind='bar') # 柱状图
plt.title(u'live num')
plt.ylabel(u'num')
plt.subplot2grid((2, 3), (0, 1))
data_train.Pclass.value_counts().plot(kind="bar")
plt.ylabel(u"num")
plt.title(u"passenger class")
plt.subplot2grid((2, 3), (0, 2))
plt.scatter(data_train.Age, data_train.Survived)
plt.ylabel(u"live") # 设定纵坐标名称
plt.grid(b=True, which='major', axis='y')
plt.title(u"live by age")
plt.subplot2grid((2, 3), (1, 0), colspan=2)
data_train.Age[data_train.Pclass == 1].plot(kind='kde')
data_train.Age[data_train.Pclass == 2].plot(kind='kde')
data_train.Age[data_train.Pclass == 3].plot(kind='kde')
plt.xlabel(u"age") # plots an axis lable
plt.ylabel(u"density")
plt.title(u"passerger class by age ")
plt.legend((u'1 class ', u'2 class', u'3 class'), loc='best') # sets our legend for our graph.
plt.subplot2grid((2, 3), (1, 2))
data_train.Embarked.value_counts().plot(kind='bar')
plt.title(u"num at embarked ")
plt.ylabel(u"num")
# 看看各乘客等级的获救情况
fig = plt.figure()
# fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_0 = data_train.Pclass[data_train.Survived == 0].value_counts()
Survived_1 = data_train.Pclass[data_train.Survived == 1].value_counts()
df = pd.DataFrame({u'live': Survived_1, u'unlive': Survived_0})
df.plot(kind='bar', stacked=True)
plt.title(u"live by class")
plt.xlabel(u"passenger class")
plt.ylabel(u"num")
plt.show()
# the baby all lived the old lives little than the new
Survived_age = data_train.Age[data_train.Survived == 1].value_counts()
unSurvived_age = data_train.Age[data_train.Survived == 0].value_counts()
temp_data = {u'live': Survived_age, u'unlive': unSurvived_age}
df = pd.DataFrame(temp_data)
df.plot(kind='bar', stacked=True)
plt.title(u'live by age')
plt.ylabel(u'num')
plt.xlabel(u'age')
print(df)
plt.show()
print(df.head())
print(df.size)
print(df.shape)
df.describe()
df.get_dtype_counts()
df.idxmax()
df.idxmin()
df.info()
data_list = df.iteritems
# 看看各性别的获救情况
fig = plt.figure()
# fig.set(alpha=0.2) # 设定图表颜色alpha参数
# most of the people died and in the live ,women is more
Survived_m = data_train.Survived[data_train.Sex == 'male'].value_counts()
Survived_f = data_train.Survived[data_train.Sex == 'female'].value_counts()
df = pd.DataFrame({u'man': Survived_m, u'female': Survived_f})
df.plot(kind='bar', stacked=True)
plt.title(u"survied by sex")
plt.xlabel(u"sex")
plt.ylabel(u"num")
plt.show()
# 然后我们再来看看各种舱级别情况下各性别的获救情况
fig = plt.figure()
# fig.set(alpha=0.65) # 设置图像透明度,无所谓
plt.title(u"surviced by class and sex")
ax1 = fig.add_subplot(141)
data_train.Survived[data_train.Sex == 'female'][data_train.Pclass != 3].value_counts().plot(kind='bar',
label="female highclass",
color='#FA2479')
ax1.set_xticklabels([u"unlive", u"live"], rotation=0)
ax1.legend([u"femall/high class"], loc='best')
ax2 = fig.add_subplot(142, sharey=ax1)
data_train.Survived[data_train.Sex == 'female'][data_train.Pclass == 3].value_counts().plot(kind='bar',
label='female, low class',
color='pink')
ax2.set_xticklabels([u"live", u"unlive"], rotation=0)
plt.legend([u"female/low class"], loc='best')
ax3 = fig.add_subplot(143, sharey=ax1)
data_train.Survived[data_train.Sex == 'male'][data_train.Pclass != 3].value_counts().plot(kind='bar',
label='male, high class',
color='lightblue')
ax3.set_xticklabels([u"unlive", u"live"], rotation=0)
plt.legend([u"man/high class"], loc='best')
ax4 = fig.add_subplot(144, sharey=ax1)
data_train.Survived[data_train.Sex == 'male'][data_train.Pclass == 3].value_counts().plot(kind='bar',
label='male low class',
color='steelblue')
ax4.set_xticklabels([u"unlive", u"live"], rotation=0)
plt.legend([u"man/low class"], loc='best')
plt.show()
fig = plt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
# x is the Embarked and y is the num and in the dataframe the row is the Embarked and the clo is the num
Survived_0 = data_train.Embarked[data_train.Survived == 0].value_counts()
Survived_1 = data_train.Embarked[data_train.Survived == 1].value_counts()
df = pd.DataFrame({u'live': Survived_1, u'unlive': Survived_0})
df.plot(kind='bar', stacked=True)
plt.title(u"live by Embarked")
plt.xlabel(u"Embarked")
plt.ylabel(u"num")
plt.show()
df
# 堂兄妹个数 这就是特征工程,就是属性队结果的影响就是侦探的直觉,所以这些东西吸引我的原因,因为这些东西 在一开始就是和我在一起。很久很久以前。
g = data_train.groupby(['SibSp', 'Survived'])
df = pd.DataFrame(g.count()['PassengerId'])
df
g = data_train.groupby(['Parch', 'Survived'])
df = pd.DataFrame(g.count()['PassengerId'])
df
temp_data = data_train.Parch
temp_data
data_train.head()
# ticket是船票编号,应该是unique的,和最后的结果没有太大的关系,先不纳入考虑的特征范畴把
# cabin只有204个乘客有值,我们先看看它的一个分布
temp_data = data_train.Cabin.value_counts()
temp_data
fig = plt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
# cabin 客舱
Survived_cabin = data_train.Survived[pd.notnull(data_train.Cabin)].value_counts()
Survived_nocabin = data_train.Survived[pd.isnull(data_train.Cabin)].value_counts()
df = pd.DataFrame({u'yes cabin': Survived_cabin, u'no cabin': Survived_nocabin}).transpose()
df.plot(kind='bar', stacked=True)
plt.title(u"live by cabin")
plt.xlabel(u"Cabin exit")
plt.ylabel(u"num")
plt.show()
### 使用 RandomForestClassifier 填补缺失的年龄属性
def set_missing_ages(df):
# 把已有的数值型特征取出来丢进Random Forest Regressor中
age_df = df[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass']]
# 乘客分成已知年龄和未知年龄两部分
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
# y即目标年龄
y = known_age[:, 0]
# X即特征属性值
X = known_age[:, 1:]
# fit到RandomForestRegressor之中
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(X, y)
# 用得到的模型进行未知年龄结果预测
predictedAges = rfr.predict(unknown_age[:, 1::])
# 用得到的预测结果填补原缺失数据
df.loc[(df.Age.isnull()), 'Age'] = predictedAges
return df, rfr
def set_Cabin_type(df):
df.loc[(df.Cabin.notnull()), 'Cabin'] = "Yes"
df.loc[(df.Cabin.isnull()), 'Cabin'] = "No"
return df
data_train, rfr = set_missing_ages(data_train)
data_train = set_Cabin_type(data_train)
age_df = data_train[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass']]
known_age = age_df[age_df.Age.notnull()].values
unknown_age = age_df[age_df.Age.isnull()].values
# y即目标年龄
y = known_age[:, 0]
X = known_age[:, 1:]
# fit到RandomForestRegressor之中
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(X, y)
predictedAges = rfr.predict(unknown_age[:, 1::])
# 用得到的预测结果填补原缺失数据
df.loc[(df.Age.isnull()), 'Age'] = predictedAges
dummies_Cabin = pd.get_dummies(data_train['Cabin'], prefix='Cabin')
dummies_Embarked = | pd.get_dummies(data_train['Embarked'], prefix='Embarked') | pandas.get_dummies |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 2 18:48:59 2019
@author: Kazuki
AvSigVersion を datetime とみなし、日毎の EngineVersion 等のシェアを計る
"""
import numpy as np
import pandas as pd
import os, gc
from glob import glob
from multiprocessing import cpu_count, Pool
import utils
utils.start(__file__)
PREF = 'f009'
features = ['EngineVersion', 'AppVersion', 'OsBuild', 'OsBuildLab',
'IeVerIdentifier', 'Census_OSBranch', 'Census_OSBuildNumber',
'Census_OSBuildRevision', 'Census_OSVersion']
tr = pd.read_feather('../data/train.f')[['AvSigVersion']+features]
te = pd.read_feather('../data/test.f')[['AvSigVersion']+features]
# AS timestamp
datedictAS = np.load('../external/AvSigVersionTimestamps.npy')[()]
tr['AvSigVersion_date'] = tr['AvSigVersion'].map(datedictAS).dt.date
te['AvSigVersion_date'] = te['AvSigVersion'].map(datedictAS).dt.date
trte = pd.concat([tr, te], ignore_index=True)
gc.collect()
def multi(args):
gc.collect()
key, outpath_tr, outpath_te = args
ct = pd.crosstab(trte['AvSigVersion_date'],
trte[key],
normalize='index')
melt = pd.melt(ct.reset_index(), 'AvSigVersion_date')
melt.columns = ['AvSigVersion_date', key, f'AvSigVersion_{key}_ratio']
# shift
melt[f'lag1_AvSigVersion_{key}_ratio'] = pd.melt(ct.shift(1).reset_index(), 'AvSigVersion_date')['value']
melt[f'lead1_AvSigVersion_{key}_ratio'] = pd.melt(ct.shift(-1).reset_index(), 'AvSigVersion_date')['value']
keys = ['AvSigVersion_date', key]
tr_f = pd.merge(tr[keys], melt, on=keys, how='left')
te_f = pd.merge(te[keys], melt, on=keys, how='left')
tr_f.drop(keys, axis=1, inplace=True)
te_f.drop(keys, axis=1, inplace=True)
# output
tr_f.add_prefix(PREF+'_').to_feather(outpath_tr)
te_f.add_prefix(PREF+'_').to_feather(outpath_te)
return
os.system(f'rm ../data/tmp_*_{PREF}*')
argss = []
for i,c in enumerate(features):
argss.append([c, f'../data/tmp_tr_{PREF}_{c}.f', f'../data/tmp_te_{PREF}_{c}.f'])
pool = Pool( cpu_count() )
pool.map(multi, argss)
pool.close()
# train
df = pd.concat([ | pd.read_feather(f) | pandas.read_feather |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', None)
train = pd.read_csv('./train.csv', encoding='utf-8')
train.head()
test = pd.read_csv('./test.csv', encoding='utf-8')
test.head()
## 결측치를 확인하고 결측치 채우기 (simple imputer 이용)
train.info()
train.isnull().sum()
train[train['hour_bef_pm2.5'].isnull()]
from sklearn.impute import SimpleImputer
si = SimpleImputer(strategy='mean')
imputed_df = si.fit_transform(train)
train = pd.DataFrame(imputed_df, columns = train.columns)
train.isnull().sum()
test.info()
test.isnull().sum()
test[test['hour_bef_pm2.5'].isnull()]
si = SimpleImputer(strategy='mean')
imputed_df2 = si.fit_transform(test)
test = pd.DataFrame(imputed_df2, columns = test.columns)
test.isnull().sum()
## 컬럼간 상관관계 확인하기
train.corr()
train.corr()[np.abs(train.corr())>=0.3]
sns.heatmap(train.corr()[np.abs(train.corr())>=0.3], annot=True)
test.corr()
test.corr()[np.abs(test.corr())>=0.3]
sns.heatmap(test.corr()[np.abs(test.corr())>=0.3], annot=True)
'''
train에서는 id는 상관관계가 없기 때문에 삭제하고 진행
강수량은 상관관계가 낮으나 test에서는 상관관계가 존재하므로 삭제 안함
test는 id만 삭제하고 진행
'''
X_train = train.drop(columns=['id', 'count'], axis=1)
y_train = train['count']
print(X_train.shape, y_train.shape)
X_test = test.drop(columns=['id'], axis=1)
print(X_test.shape)
### 앙상블 모델링 진행하기
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
import lightgbm as lgb
from sklearn.metrics import *
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
abc = AdaBoostRegressor(random_state=100)
gbc = GradientBoostingRegressor(random_state=100)
rf = RandomForestRegressor(random_state=100, booster='gbtree')
xgb = xgb.XGBRegressor(random_state=100, booster='gbtree')
lgb = lgb.LGBMRegressor(random_state=100, booster='gbtree', boosting_type = 'gbdt')
#########
param_grid_abc = {
'n_estimators': [1, 10, 50, 100],
'loss': ['linear', 'square', 'exponential'],
'learning_rate': [0, 0.1, 0.2, 0.5, 0.8, 1.0],
}
grid_search_abc = GridSearchCV(abc, param_grid=param_grid_abc, cv=10, n_jobs=-1)
grid_search_abc.fit(X_train, y_train)
print(grid_search_abc.best_estimator_) #AdaBoostRegressor(learning_rate=0.1, n_estimators=100, random_state=100)
best_param_abc_gs = grid_search_abc.best_estimator_
pred_abc_gs = best_param_abc_gs.predict(X_test)
random_search_abc = RandomizedSearchCV(abc, param_grid_abc, n_iter=30, cv = 10, n_jobs=-1, scoring = 'neg_mean_squared_error')
random_search_abc.fit(X_train, y_train)
print(random_search_abc.best_estimator_) #AdaBoostRegressor(learning_rate=0.1, n_estimators=100, random_state=100)
best_param_abc_rs = random_search_abc.best_estimator_
pred_abc_rs = best_param_abc_rs.predict(X_test)
sns.kdeplot(pred_abc_gs, label = 'grid_pred')
sns.kdeplot(pred_abc_rs, label = 'rand_pred')
plt.legend()
plt.show()
print(best_param_abc_rs.score(X_train, y_train)) #0.7338069755742368
col_imp1 = pd.DataFrame(best_param_abc_gs.feature_importances_, index = X_train.columns, columns = ['value']).sort_values(by='value', ascending=False)
plt.figure(figsize=(10,10))
sns.barplot(col_imp1.index, col_imp1['value'])
plt.xticks(rotation=45)
#########
param_grid_rf = {
'max_depth': [None, 1, 10, 15, 20],
'max_leaf_nodes': [2],
'criterion':["mse"],
'n_estimators': [1, 10, 50, 100, 150, 200],
'min_samples_split':[2,3,4,8,10],
}
param_grid_rf = GridSearchCV(rf, param_grid=param_grid_rf, cv=10, n_jobs=-1)
param_grid_rf.fit(X_train, y_train)
print(param_grid_rf.best_estimator_) #RandomForestRegressor(max_leaf_nodes=2, n_estimators=150, random_state=100)
best_param_rf_gs = param_grid_rf.best_estimator_
pred_rf_gs = best_param_rf_gs.predict(X_test)
'''
random_search_rf = RandomizedSearchCV(rf, param_grid_rf, n_iter=30, cv = 10, n_jobs=-1, scoring = 'neg_mean_squared_error')
random_search_rf.fit(X_train, y_train)
print(random_search_rf.best_estimator_)
best_param_rf_rs = random_search_rf.best_estimator_
pred_rf_rs = best_param_rf_rs.predict(X_test)'''
sns.kdeplot(pred_rf_gs, label = 'grid_pred')
sns.kdeplot(pred_rf_rs, label = 'rand_pred')
plt.legend()
plt.show()
print(rf.fit(X_train, y_train).score(X_train, y_train)) #0.97125328407911
col_imp2 = pd.DataFrame(best_param_rf_gs.feature_importances_, index = X_train.columns, columns = ['value']).sort_values(by='value', ascending=False)
plt.figure(figsize=(10,10))
sns.barplot(col_imp2.index, col_imp2['value'])
plt.xticks(rotation=45)
#########
param_grid_gbc = {
'n_estimators': [1, 10, 50, 100],
'learning_rate': [0, 0.1, 0.2, 0.5, 0.8, 1.0],
'criterion':["mse"],
'max_depth':[None, 10, 20, 30, 50],
'min_samples_split':[2,3,4,8,10],
}
param_grid_gbc = GridSearchCV(gbc, param_grid=param_grid_gbc, cv=10, n_jobs=-1)
param_grid_gbc.fit(X_train, y_train)
print(param_grid_gbc.best_estimator_) #GradientBoostingRegressor(criterion='mse', max_depth=10, min_samples_split=10, random_state=100)
best_param_gbc_gs = param_grid_gbc.best_estimator_
pred_gbc_gs = best_param_gbc_gs.predict(X_test)
'''
random_search_gbc = RandomizedSearchCV(gbc, param_grid_gbc, n_iter=30, cv = 10, n_jobs=-1, scoring = 'neg_mean_squared_error')
random_search_gbc.fit(X_train, y_train)
print(random_search_gbc.best_estimator_)
best_param_gbc_rs = random_search_gbc.best_estimator_
pred_gbc_rs = best_param_gbc_rs.predict(X_test)'''
sns.kdeplot(pred_gbc_gs, label = 'grid_pred')
sns.kdeplot(pred_gbc_rs, label = 'rand_pred')
plt.legend()
plt.show()
col_imp3 = pd.DataFrame(best_param_gbc_gs.feature_importances_, index = X_train.columns, columns = ['value']).sort_values(by='value', ascending=False)
plt.figure(figsize=(10,10))
sns.barplot(col_imp3.index, col_imp3['value'])
plt.xticks(rotation=45)
print(best_param_gbc_gs.score(X_train, y_train)) #0.9994846712235719
######### """
param_grid_xgb = {
'max_depth': [None, 1, 10, 15, 20],
'n_estimators': [1, 10, 50, 100],
# 'alpha': [0.001, 0.01, 0.1, 1],
# 'lambda': [0.001, 0.01, 0.1, 1],
'learning_rate': [0, 0.1, 0.2, 0.5, 0.8, 1.0],
}
param_grid_xgb = GridSearchCV(xgb, param_grid=param_grid_xgb, cv=10, n_jobs=-1)
param_grid_xgb.fit(X_train, y_train)
print(param_grid_xgb.best_estimator_)
best_param_xgb_gs = param_grid_xgb.best_estimator_
pred_xgb_gs = best_param_xgb_gs.predict(X_test)
'''
random_search_xgb = RandomizedSearchCV(xgb, param_grid_xgb, n_iter=30, cv = 10, n_jobs=-1, scoring = 'neg_mean_squared_error')
random_search_xgb.fit(X_train, y_train)
print(random_search_xgb.best_estimator_)
best_param_xgb_rs = random_search_xgb.best_estimator_
pred_xgb_rs = best_param_xgb_rs.predict(X_test)'''
sns.kdeplot(pred_xgb_gs, label = 'grid_pred')
sns.kdeplot(pred_xgb_rs, label = 'rand_pred')
plt.legend()
plt.show()
col_imp4 = pd.DataFrame(best_param_xgb_gs.feature_importances_, index = X_train.columns, columns = ['value']).sort_values(by='value', ascending=False)
plt.figure(figsize=(10,10))
sns.barplot(col_imp4.index, col_imp4['value'])
plt.xticks(rotation=45)
#from xgboost import plot_importance
#plot_importance(param_grid_xgb.fit(X_train, y_train))
print(best_param_xgb_gs.score(X_train, y_train)) #0.9708202535661757
#########
param_grid_lgb = {
'max_depth': [-1, 1, 5, 10, 15, 20],
'n_estimators': [1, 9, 10, 50, 100],
# 'alpha': [0.001, 0.01, 0.1, 1],
# 'lambda': [0.001, 0.01, 0.1, 1],
'learning_rate': [0.1, 0.2, 0.5, 0.8, 1.0],
}
param_grid_lgb = GridSearchCV(lgb, param_grid=param_grid_lgb, cv=10, n_jobs=-1)
param_grid_lgb.fit(X_train, y_train)
print(param_grid_lgb.best_estimator_)
best_param_lgb_gs = param_grid_lgb.best_estimator_
pred_lgb_gs = best_param_lgb_gs.predict(X_test)
'''
random_search_lgb = RandomizedSearchCV(lgb, param_grid_lgb, n_iter=30, cv = 10, n_jobs=-1, scoring = 'neg_mean_squared_error')
random_search_lgb.fit(X_train, y_train)
print(random_search_rf.best_estimator_)
best_param_rf_rs = random_search_rf.best_estimator_
pred_rf_rs = best_param_rf_rs.predict(X_test)'''
sns.kdeplot(pred_lgb_gs, label = 'grid_pred_lgb')
sns.kdeplot(pred_rf_rs, label = 'rand_pred')
plt.legend()
plt.show()
col_imp5 = | pd.DataFrame(best_param_lgb_gs.feature_importances_, index = X_train.columns, columns = ['value']) | pandas.DataFrame |
import pandas as pd
import re
from functools import wraps
from lxml.etree import ParserError, XMLSyntaxError
from pyquery import PyQuery as pq
from urllib.error import HTTPError
from .. import utils
from .constants import (NATIONALITY,
PLAYER_ELEMENT_INDEX,
PLAYER_SCHEME,
PLAYER_URL,
ROSTER_URL)
from .player import AbstractPlayer
def _cleanup(prop):
try:
prop = prop.replace('%', '')
prop = prop.replace('$', '')
prop = prop.replace(',', '')
return prop.replace('+', '')
# Occurs when a value is of Nonetype. When that happens, return a blank
# string as whatever came in had an incomplete value.
except AttributeError:
return ''
def _int_property_decorator(func):
@property
@wraps(func)
def wrapper(*args):
index = args[0]._index
prop = func(*args)
element_ind = 0
if func.__name__ in PLAYER_ELEMENT_INDEX.keys():
element_ind = PLAYER_ELEMENT_INDEX[func.__name__]
try:
value = _cleanup(prop[index][element_ind])
return int(value)
except (ValueError, TypeError, IndexError):
# If there is no value, default to None
return None
return wrapper
def _float_property_decorator(func):
@property
@wraps(func)
def wrapper(*args):
index = args[0]._index
prop = func(*args)
element_ind = 0
try:
value = _cleanup(prop[index][element_ind])
return float(value)
except (ValueError, TypeError, IndexError):
# If there is no value, default to None
return None
return wrapper
def _most_recent_decorator(func):
@property
@wraps(func)
def wrapper(*args):
season = args[0]._most_recent_season
seasons = args[0]._season
index = seasons.index(season)
prop = func(*args)
element_ind = 0
try:
return prop[index][element_ind]
except (TypeError, IndexError):
# If there is no value, default to None
return None
return wrapper
class Player(AbstractPlayer):
"""
Get player information and stats for all seasons.
Given a player ID, such as 'altuvjo01' for <NAME>, capture all
relevant stats and information like name, nationality, height/weight,
career home runs, last season's batting average, salary, contract amount,
and much more.
By default, the class instance will return the player's career stats, but
single-season stats can be found by calling the instance with the requested
season as denoted on baseball-reference.com.
Parameters
----------
player_id : string
A player's ID according to basketball-reference.com, such as
'altuvjo01' for <NAME>. The player ID can be found by navigating
to the player's stats page and getting the string between the final
slash and the '.html' in the URL. In general, the ID is in the format
'LLLLLFFNN' where 'LLLLL' are the first 5 letters in the player's last
name, 'FF', are the first 2 letters in the player's first name, and
'NN' is a number starting at '01' for the first time that player ID has
been used and increments by 1 for every successive player.
"""
def __init__(self, player_id):
self._most_recent_season = ''
self._index = None
self._player_id = player_id
self._season = None
self._name = None
self._team_abbreviation = None
self._position = None
self._height = None
self._weight = None
self._birth_date = None
self._nationality = None
self._contract = None
self._games = None
self._games_started = None
self._plate_appearances = None
self._at_bats = None
self._runs = None
self._hits = None
self._doubles = None
self._triples = None
self._home_runs = None
self._runs_batted_in = None
self._stolen_bases = None
self._times_caught_stealing = None
self._bases_on_balls = None
self._times_struck_out = None
self._batting_average = None
self._on_base_percentage = None
self._slugging_percentage = None
self._on_base_plus_slugging_percentage = None
self._on_base_plus_slugging_percentage_plus = None
self._total_bases = None
self._grounded_into_double_plays = None
self._times_hit_by_pitch = None
self._sacrifice_hits = None
self._sacrifice_flies = None
self._intentional_bases_on_balls = None
self._complete_games = None
self._innings_played = None
self._defensive_chances = None
self._putouts = None
self._assists = None
self._errors = None
self._double_plays_turned = None
self._fielding_percentage = None
self._total_fielding_runs_above_average = None
self._defensive_runs_saved_above_average = None
self._total_fielding_runs_above_average_per_innings = None
self._defensive_runs_saved_above_average_per_innings = None
self._range_factor_per_nine_innings = None
self._range_factor_per_game = None
self._league_fielding_percentage = None
self._league_range_factor_per_nine_innings = None
self._league_range_factor_per_game = None
self._games_in_batting_order = None
self._games_in_defensive_lineup = None
self._games_pitcher = None
self._games_catcher = None
self._games_first_baseman = None
self._games_second_baseman = None
self._games_third_baseman = None
self._games_shortstop = None
self._games_left_fielder = None
self._games_center_fielder = None
self._games_right_fielder = None
self._games_outfielder = None
self._games_designated_hitter = None
self._games_pinch_hitter = None
self._games_pinch_runner = None
# Stats specific to pitchers
self._wins = None
self._losses = None
self._win_percentage = None
self._era = None
self._games_finished = None
self._shutouts = None
self._saves = None
self._hits_allowed = None
self._runs_allowed = None
self._earned_runs_allowed = None
self._home_runs_allowed = None
self._bases_on_balls_given = None
self._intentional_bases_on_balls_given = None
self._strikeouts = None
self._times_hit_player = None
self._balks = None
self._wild_pitches = None
self._batters_faced = None
self._era_plus = None
self._fielding_independent_pitching = None
self._whip = None
self._hits_against_per_nine_innings = None
self._home_runs_against_per_nine_innings = None
self._bases_on_balls_given_per_nine_innings = None
self._batters_struckout_per_nine_innings = None
self._strikeouts_thrown_per_walk = None
player_data = self._pull_player_data()
self._find_initial_index()
AbstractPlayer.__init__(self, player_id, self._name, player_data)
def _build_url(self):
"""
Create the player's URL to pull stats from.
The player's URL requires the first letter of the player's last name
followed by the player ID.
Returns
-------
string
The string URL for the player's stats page.
"""
# The first letter of the player's last name is used to sort the player
# list and is a part of the URL.
first_character = self._player_id[0]
return PLAYER_URL % (first_character, self._player_id)
def _retrieve_html_page(self):
"""
Download the requested player's stats page.
Download the requested page and strip all of the comment tags before
returning a pyquery object which will be used to parse the data.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
"""
url = self._build_url()
try:
url_data = pq(url)
except HTTPError:
return None
return pq(utils._remove_html_comment_tags(url_data))
def _parse_season(self, row):
"""
Parse the season string from the table.
The season is generally located in the first column of the stats tables
and should be parsed to denote which season metrics are being pulled
from.
Parameters
----------
row : PyQuery object
A PyQuery object of a single row in a stats table.
Returns
-------
string
A string representation of the season in the format 'YYYY', such as
'2017'.
"""
return utils._parse_field(PLAYER_SCHEME, row, 'season')
def _combine_season_stats(self, table_rows, career_stats, all_stats_dict):
"""
Combine all stats for each season.
Since all of the stats are spread across multiple tables, they should
be combined into a single field which can be used to easily query stats
at once.
Parameters
----------
table_rows : generator
A generator where each element is a row in a stats table.
career_stats : generator
A generator where each element is a row in the footer of a stats
table. Career stats are kept in the footer, hence the usage.
all_stats_dict : dictionary
A dictionary of all stats separated by season where each key is the
season ``string``, such as '2017', and the value is a
``dictionary`` with a ``string`` of 'data' and ``string``
containing all of the data.
Returns
-------
dictionary
Returns an updated version of the passed all_stats_dict which
includes more metrics from the provided table.
"""
most_recent_season = self._most_recent_season
if not table_rows:
table_rows = []
for row in table_rows:
# For now, remove minor-league stats
if 'class="minors_table hidden"' in str(row) or \
'class="spacer partial_table"' in str(row) or \
'class="partial_table"' in str(row):
continue
season = self._parse_season(row)
try:
all_stats_dict[season]['data'] += str(row)
except KeyError:
all_stats_dict[season] = {'data': str(row)}
most_recent_season = season
self._most_recent_season = most_recent_season
if not career_stats:
return all_stats_dict
try:
all_stats_dict['Career']['data'] += str(next(career_stats))
except KeyError:
all_stats_dict['Career'] = {'data': str(next(career_stats))}
return all_stats_dict
def _combine_all_stats(self, player_info):
"""
Pull stats from all tables into single data structure.
Pull the stats from all of the requested tables into a dictionary that
is separated by season to allow easy queries of the player's stats for
each season.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing all of the stats information for the
requested player.
Returns
-------
dictionary
Returns a dictionary where all stats from each table are combined
by season to allow easy queries by year.
"""
all_stats_dict = {}
for table_id in ['batting_standard', 'standard_fielding',
'appearances', 'pitching_standard']:
table_items = utils._get_stats_table(player_info,
'table#%s' % table_id)
career_items = utils._get_stats_table(player_info,
'table#%s' % table_id,
footer=True)
all_stats_dict = self._combine_season_stats(table_items,
career_items,
all_stats_dict)
return all_stats_dict
def _parse_nationality(self, player_info):
"""
Parse the player's nationality.
The player's nationality is denoted by a flag in the information
section with a country code for each nation. The country code needs to
pulled and then matched to find the player's home country. Once found,
the '_nationality' attribute is set for the player.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
"""
for span in player_info('span').items():
if 'class="f-i' in str(span):
nationality = span.text()
nationality = NATIONALITY[nationality]
setattr(self, '_nationality', nationality)
break
def _parse_player_information(self, player_info):
"""
Parse general player information.
Parse general player information such as height, weight, and name. The
attribute for the requested field will be set with the value prior to
returning.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
"""
for field in ['_height', '_weight', '_name']:
short_field = str(field)[1:]
value = utils._parse_field(PLAYER_SCHEME, player_info, short_field)
setattr(self, field, value)
def _parse_birth_date(self, player_info):
"""
Parse the player's birth date.
Pull the player's birth date from the player information and set the
'_birth_date' attribute with the value prior to returning.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
"""
date = player_info('span[itemprop="birthDate"]').attr('data-birth')
setattr(self, '_birth_date', date)
def _parse_team_name(self, team):
"""
Parse the team name in the contract table.
The team names in the contract table should be pulled in plain text and
returned as a string.
Parameters
----------
team : string
A string representing the team_name tag in a row in the player's
contract table.
Returns
-------
string
A string of the team's name, such as '<NAME>'.
"""
team = team.replace('\xa0', ' ')
team_html = pq(team)
return team_html.text()
def _parse_contract(self, player_info):
"""
Parse the player's contract.
Depending on the player's contract status, a contract table is located
at the bottom of the stats page and includes player wages by season. If
found, create a dictionary housing the wages by season.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
"""
contract = {}
salary_table = player_info('table#br-salaries')
for row in salary_table('tbody tr').items():
if 'class="spacer partial_table"' in str(row):
continue
year = row('th[data-stat="year_ID"]').text()
if year.strip() == '':
continue
age = row('td[data-stat="age"]').text()
team = self._parse_team_name(str(row('td[data-stat="team_name"]')))
salary = row('td[data-stat="Salary"]').text()
contract[year] = {
'age': age,
'team': team,
'salary': salary
}
setattr(self, '_contract', contract)
def _parse_value(self, html_data, field):
"""
Parse the HTML table to find the requested field's value.
All of the values are passed in an HTML table row instead of as
individual items. The values need to be parsed by matching the
requested attribute with a parsing scheme that sports-reference uses
to differentiate stats. This function returns a single value for the
given attribute.
Parameters
----------
html_data : string
A string containing all of the rows of stats for a given team. If
multiple tables are being referenced, this will be comprised of
multiple rows in a single string.
field : string
The name of the attribute to match. Field must be a key in the
PLAYER_SCHEME dictionary.
Returns
-------
list
A list of all values that match the requested field. If no value
could be found, returns None.
"""
scheme = PLAYER_SCHEME[field]
items = [i.text() for i in html_data(scheme).items()]
# Stats can be added and removed on a yearly basis. If no stats are
# found, return None and have that be the value.
if len(items) == 0:
return None
return items
def _pull_player_data(self):
"""
Pull and aggregate all player information.
Pull the player's HTML stats page and parse unique properties, such as
the player's height, weight, and position. Next, combine all stats for
all seasons plus the player's career stats into a single object which
can easily be iterated upon.
Returns
-------
dictionary
Returns a dictionary of the player's combined stats where each key
is a string of the season and the value is the season's associated
stats.
"""
player_info = self._retrieve_html_page()
self._parse_player_information(player_info)
self._parse_nationality(player_info)
self._parse_birth_date(player_info)
self._parse_contract(player_info)
all_stats = self._combine_all_stats(player_info)
setattr(self, '_season', list(all_stats.keys()))
return all_stats
def _find_initial_index(self):
"""
Find the index of career stats.
When the Player class is instantiated, the default stats to pull are
the player's career stats. Upon being called, the index of the 'Career'
element should be the index value.
"""
index = 0
for season in self._season:
# The career stats default to Nonetype
if season is None or season == 'Career':
self._index = index
self._season[index] = 'Career'
break
index += 1
def __call__(self, requested_season=''):
"""
Specify a different season to pull stats from.
A different season can be requested by passing the season string, such
as '2017' to the class instance.
Parameters
----------
requested_season : string (optional)
A string of the requested season to query, such as '2017'. If left
blank or 'Career' is passed, the career stats will be used for
stats queries.
Returns
-------
Player class instance
Returns the class instance with the updated stats being referenced.
"""
if requested_season.lower() == 'career' or \
requested_season == '':
requested_season = 'Career'
index = 0
for season in self._season:
if season == requested_season:
self._index = index
break
index += 1
return self
def _dataframe_fields(self):
"""
Creates a dictionary of all fields to include with DataFrame.
With the result of the calls to class properties changing based on the
class index value, the dictionary should be regenerated every time the
index is changed when the dataframe property is requested.
Returns
-------
dictionary
Returns a dictionary where the keys are the shortened ``string``
attribute names and the values are the actual value for each
attribute for the specified index.
"""
fields_to_include = {
'assists': self.assists,
'at_bats': self.at_bats,
'bases_on_balls': self.bases_on_balls,
'batting_average': self.batting_average,
'birth_date': self.birth_date,
'complete_games': self.complete_games,
'defensive_chances': self.defensive_chances,
'defensive_runs_saved_above_average':
self.defensive_runs_saved_above_average,
'defensive_runs_saved_above_average_per_innings':
self.defensive_runs_saved_above_average_per_innings,
'double_plays_turned': self.double_plays_turned,
'doubles': self.doubles,
'errors': self.errors,
'fielding_percentage': self.fielding_percentage,
'games': self.games,
'games_catcher': self.games_catcher,
'games_center_fielder': self.games_center_fielder,
'games_designated_hitter': self.games_designated_hitter,
'games_first_baseman': self.games_first_baseman,
'games_in_batting_order': self.games_in_batting_order,
'games_in_defensive_lineup': self.games_in_defensive_lineup,
'games_left_fielder': self.games_left_fielder,
'games_outfielder': self.games_outfielder,
'games_pinch_hitter': self.games_pinch_hitter,
'games_pinch_runner': self.games_pinch_runner,
'games_pitcher': self.games_pitcher,
'games_right_fielder': self.games_right_fielder,
'games_second_baseman': self.games_second_baseman,
'games_shortstop': self.games_shortstop,
'games_started': self.games_started,
'games_third_baseman': self.games_third_baseman,
'grounded_into_double_plays': self.grounded_into_double_plays,
'height': self.height,
'hits': self.hits,
'home_runs': self.home_runs,
'innings_played': self.innings_played,
'intentional_bases_on_balls': self.intentional_bases_on_balls,
'league_fielding_percentage': self.league_fielding_percentage,
'league_range_factor_per_game': self.league_range_factor_per_game,
'league_range_factor_per_nine_innings':
self.league_range_factor_per_nine_innings,
'name': self.name,
'nationality': self.nationality,
'on_base_percentage': self.on_base_percentage,
'on_base_plus_slugging_percentage':
self.on_base_plus_slugging_percentage,
'on_base_plus_slugging_percentage_plus':
self.on_base_plus_slugging_percentage_plus,
'plate_appearances': self.plate_appearances,
'player_id': self.player_id,
'position': self.position,
'putouts': self.putouts,
'range_factor_per_game': self.range_factor_per_game,
'range_factor_per_nine_innings':
self.range_factor_per_nine_innings,
'runs': self.runs,
'runs_batted_in': self.runs_batted_in,
'sacrifice_flies': self.sacrifice_flies,
'sacrifice_hits': self.sacrifice_hits,
'season': self.season,
'slugging_percentage': self.slugging_percentage,
'stolen_bases': self.stolen_bases,
'team_abbreviation': self.team_abbreviation,
'times_caught_stealing': self.times_caught_stealing,
'times_hit_by_pitch': self.times_hit_by_pitch,
'times_struck_out': self.times_struck_out,
'total_bases': self.total_bases,
'total_fielding_runs_above_average':
self.total_fielding_runs_above_average,
'total_fielding_runs_above_average_per_innings':
self.total_fielding_runs_above_average_per_innings,
'triples': self.triples,
'weight': self.weight,
# Properties specific to pitchers
'balks': self.balks,
'bases_on_balls_given': self.bases_on_balls_given,
'bases_on_balls_given_per_nine_innings':
self.bases_on_balls_given_per_nine_innings,
'batters_faced': self.batters_faced,
'batters_struckout_per_nine_innings':
self.batters_struckout_per_nine_innings,
'earned_runs_allowed': self.earned_runs_allowed,
'era': self.era,
'era_plus': self.era_plus,
'fielding_independent_pitching':
self.fielding_independent_pitching,
'games_finished': self.games_finished,
'hits_against_per_nine_innings':
self.hits_against_per_nine_innings,
'hits_allowed': self.hits_allowed,
'home_runs_against_per_nine_innings':
self.home_runs_against_per_nine_innings,
'home_runs_allowed': self.home_runs_allowed,
'intentional_bases_on_balls_given':
self.intentional_bases_on_balls_given,
'losses': self.losses,
'runs_allowed': self.runs_allowed,
'saves': self.saves,
'shutouts': self.shutouts,
'strikeouts': self.strikeouts,
'strikeouts_thrown_per_walk': self.strikeouts_thrown_per_walk,
'times_hit_player': self.times_hit_player,
'whip': self.whip,
'wild_pitches': self.wild_pitches,
'win_percentage': self.win_percentage,
'wins': self.wins
}
return fields_to_include
@property
def dataframe(self):
"""
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values where each index is a different season plus the
career stats.
"""
temp_index = self._index
rows = []
indices = []
for season in self._season:
self._index = self._season.index(season)
rows.append(self._dataframe_fields())
indices.append(season)
self._index = temp_index
return | pd.DataFrame(rows, index=[indices]) | pandas.DataFrame |
import numpy as np
import pandas as pd
shirley_1015_bs_name = np.load(r'D:\voice2face\shirley_1015\shirley_1015_bs_name.npy')
shirley_1119_bs_name = np.load(r'D:\voice2face\shirley_1015\shirley_1119_bs_name.npy')
shirley_1119_bs_name316 = np.load(r'D:\voice2face\shirley_1119\shirley_1119_bs_name316.npy')
bs_value_1114_3_16 = np.load(r'D:\voice2face\shirley_1119\bs_value\bs_value_1114_3_16.npy')
print(bs_value_1114_3_16.shape)
weights1 = np.zeros((bs_value_1114_3_16.shape[0],len(shirley_1119_bs_name)))
bs_name_index = [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 94, 93, 95, 96, 97, 98, 99, 100, 101, 102, 103, 105, 104, 106, 107, 108, 109, 110, 111, 112, 113, 114, 1, 115]
for i in range(len(bs_name_index)):
weights1[:,i] = bs_value_1114_3_16[:,bs_name_index[i]]
# 导出权重的csv文件
import pandas as pd
df = | pd.DataFrame(weights1,columns=shirley_1119_bs_name) | pandas.DataFrame |
# --------------
# import packages
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# Load Offers
offers= | pd.read_excel(path,sheet_name=0) | pandas.read_excel |
import os
import json
import traceback
import numpy as np
import pickle
import pandas as pd
import csv
#Run this code file from console to create the pickle file
pklfile = "taxa_mapping.pkl"
root_path = "<add root path here>"
image_location = root_path + "result-img\\"
taxa_file_path = root_path + "\\data\\taxa.csv"
image_url = "/static/result-img/"
def create_taxa_mapping():
try:
taxa_info = | pd.read_csv(taxa_file_path) | pandas.read_csv |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": pandas.StringDtype(),
"dpnandReads": pandas.StringDtype(),
"qpnandReads": pandas.StringDtype(),
"verifynandReads": pandas.StringDtype(),
"softnandReads": pandas.StringDtype(),
"spnandWrites": pandas.StringDtype(),
"dpnandWrites": pandas.StringDtype(),
"qpnandWrites": pandas.StringDtype(),
"opnandWrites": pandas.StringDtype(),
"xpnandWrites": pandas.StringDtype(),
"unalignedHostWriteCmd": pandas.StringDtype(),
"randomReadCmd": pandas.StringDtype(),
"randomWriteCmd": pandas.StringDtype(),
"secVenCmdCount": pandas.StringDtype(),
"secVenCmdCountFails": pandas.StringDtype(),
"mrrFailOnSlcOtfPages": pandas.StringDtype(),
"mrrFailOnSlcOtfPageMarkedAsMBPD": pandas.StringDtype(),
"lcorParitySeedErrors": pandas.StringDtype(),
"fwDownloadFails": pandas.StringDtype(),
"fwAuthenticationFails": pandas.StringDtype(),
"fwSecurityRev": pandas.StringDtype(),
"isCapacitorHealthly": pandas.StringDtype(),
"fwWRCounter": pandas.StringDtype(),
"sysAreaEraseFailCount": pandas.StringDtype(),
"iusDefragRelocated4DataRetention": pandas.StringDtype(),
"I2CTemp": pandas.StringDtype(),
"lbaMismatchOnNandReads": pandas.StringDtype(),
"currentWriteStreamsCount": pandas.StringDtype(),
"nandWritesPerStream1": pandas.StringDtype(),
"nandWritesPerStream2": pandas.StringDtype(),
"nandWritesPerStream3": pandas.StringDtype(),
"nandWritesPerStream4": pandas.StringDtype(),
"nandWritesPerStream5": pandas.StringDtype(),
"nandWritesPerStream6": pandas.StringDtype(),
"nandWritesPerStream7": pandas.StringDtype(),
"nandWritesPerStream8": pandas.StringDtype(),
"nandWritesPerStream9": pandas.StringDtype(),
"nandWritesPerStream10": | pandas.StringDtype() | pandas.StringDtype |
import numpy as np
import pandas as pd
import scipy.stats
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.ticker as ticker
import matplotlib.colors as colors
from matplotlib.colors import hsv_to_rgb
import seaborn as sns
import scipy.cluster.hierarchy as hierarchy
from cycler import cycler
import copy
from . import stats
from . import map as qtl_map
def setup_figure(aw=4.5, ah=3, xspace=[0.75,0.25], yspace=[0.75,0.25],
colorbar=False, ds=0.15, cw=0.15, ct=0, ch=None):
"""
"""
dl, dr = xspace
db, dt = yspace
fw = dl + aw + dr
fh = db + ah + dt
fig = plt.figure(facecolor=(1,1,1), figsize=(fw,fh))
ax = fig.add_axes([dl/fw, db/fh, aw/fw, ah/fh])
if not colorbar:
return ax
else:
if ch is None:
ch = ah/2
cax = fig.add_axes([(dl+aw+ds)/fw, (db+ah-ch-ct)/fh, cw/fw, ch/fh])
return ax, cax
# if not box:
# ax.spines['left'].set_position(('outward', 6))
# ax.spines['bottom'].set_position(('outward', 6))
# ax.spines['right'].set_visible(False)
# ax.spines['top'].set_visible(False)
# ax.tick_params(axis='both', which='both', direction='out', labelsize=fontsize)
def get_axgrid(nr, nc, ntot=None, sharex=False, sharey=False,
x_offset=6, y_offset=6,
dl=0.5, aw=2, dx=0.75, dr=0.25,
db=0.5, ah=2, dy=0.75, dt=0.25,
colorbar=False, ds=0.15, cw=0.15, ct=0, ch=None,
tri=None, fontsize=10, hide=['top', 'right']):
"""
"""
if ntot is None:
ntot = nr * nc
fw = dl + nc*aw + (nc-1)*dx + dr
fh = db + nr*ah + (nr-1)*dy + dt
fig = plt.figure(figsize=(fw,fh))
axes = []
n = 0
if tri is None:
si = lambda x: 0
elif tri == 'upper':
si = lambda x: x
for j in range(nr):
for i in range(si(j), nc):
if n<ntot:
ax = fig.add_axes([(dl+i*(aw+dx))/fw, (db+(nr-j-1)*(ah+dy))/fh, aw/fw, ah/fh], facecolor='none',
sharex=axes[0] if sharex and n>0 else None,
sharey=axes[0] if sharey and n>0 else None)
format_plot(ax, fontsize=fontsize, hide=hide, x_offset=x_offset, y_offset=y_offset)
axes.append(ax)
n += 1
if not colorbar:
return axes
else:
if ch is None:
ch = ah/2
cax = fig.add_axes([(dl+nc*aw+(nc-1)*dx+ds)/fw, (db+nr*ah+(nr-1)*dy-ch-ct)/fh, cw/fw, ch/fh])
# cax = fig.add_axes([(dl+aw+ds)/fw, (db+ah-ch-ct)/fh, cw/fw, ch/fh])
return axes, cax
def format_plot(ax, tick_direction='out', tick_length=4, hide=['top', 'right'],
hide_spines=True, lw=1, fontsize=10,
equal_limits=False, x_offset=0, y_offset=0, vmin=None):
# ax.autoscale(False)
for i in ['left', 'bottom', 'right', 'top']:
ax.spines[i].set_linewidth(lw)
ax.tick_params(axis='both', which='both', direction=tick_direction, labelsize=fontsize)
# set tick positions
if 'top' in hide and 'bottom' in hide:
ax.get_xaxis().set_ticks_position('none')
elif 'top' in hide:
ax.get_xaxis().set_ticks_position('bottom')
elif 'bottom' in hide:
ax.get_xaxis().set_ticks_position('top')
else:
ax.get_xaxis().set_ticks_position('both')
if 'left' in hide and 'right' in hide:
ax.get_yaxis().set_ticks_position('none')
elif 'left' in hide:
ax.get_yaxis().set_ticks_position('right')
elif 'right' in hide:
ax.get_yaxis().set_ticks_position('left')
elif len(hide)==0:
ax.get_xaxis().set_ticks_position('bottom')
ax.get_yaxis().set_ticks_position('left')
else:
ax.get_yaxis().set_ticks_position('both')
if hide_spines:
for i in hide:
ax.spines[i].set_visible(False)
# adjust tick size
for line in ax.xaxis.get_ticklines() + ax.yaxis.get_ticklines():
line.set_markersize(tick_length)
line.set_markeredgewidth(lw)
for line in (ax.xaxis.get_ticklines(minor=True) + ax.yaxis.get_ticklines(minor=True)):
line.set_markersize(tick_length/2)
line.set_markeredgewidth(lw/2)
ax.spines['left'].set_position(('outward', y_offset))
ax.spines['bottom'].set_position(('outward', x_offset))
if equal_limits:
xlim = ax.get_xlim()
ylim = ax.get_ylim()
lims = [np.minimum(xlim[0], ylim[0]), np.maximum(xlim[1], ylim[1])]
if vmin is not None:
lims[0] = vmin
ax.set_xlim(lims)
ax.set_ylim(lims)
# ax.autoscale(True) # temporary fix?
def plot_qtl(g, p, label_s=None, label_colors=None, split=False, split_colors=None, covariates_df=None,
legend_text=None, normalized=False, loc=None, ax=None, color=[0.5]*3,
variant_id=None, jitter=0, bvec=None, boxplot=False, xlabel=None,
ylabel='Normalized expression', title=None, show_counts=True):
""""""
assert p.index.equals(g.index)
if covariates_df is not None:
# only residualize the phenotype for plotting
p = stats.residualize(p.copy(), covariates_df.loc[p.index])
eqtl_df = pd.concat([g, p], axis=1)
eqtl_df.columns = ['genotype', 'phenotype']
if label_s is not None:
eqtl_df = | pd.concat([eqtl_df, label_s], axis=1, sort=False) | pandas.concat |
import importlib
import copy
import io, time
from io import BytesIO
import chardet
import os
import collections
from itertools import combinations, cycle, product
import math
import numpy as np
import pandas as pd
import pickle
import tarfile
import random
import re
import requests
from nltk.corpus import stopwords
from scipy.sparse import hstack, lil_matrix
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.decomposition import PCA
from sklearn.cluster import AgglomerativeClustering
from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge
import torch.nn.functional as F
from tqdm import tqdm
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 1000)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', -1) # change None to -1
from collections import Counter, defaultdict
import numpy as np
import re
import sys
import sklearn
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics import roc_auc_score
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import classification_report, accuracy_score
import torch
from torchvision import datasets, transforms
from torch import nn, optim, autograd
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from transformers import * # here import bert
import warnings
warnings.filterwarnings("ignore")
# from data_structure import Dataset #, get_IMDB, get_kindle
import argparse
import utils
importlib.reload(utils)
from utils import *
from vae import VAE, vae_loss_function, train_vae, test_vae
# randseed = 52744889
randseed = int(time.time()*1e7%1e8)
print("random seed: ", randseed)
sys.stdout.flush()
random.seed(randseed)
np.random.seed(randseed)
torch.manual_seed(randseed)
parser = argparse.ArgumentParser(description='Text Reviews')
parser.add_argument('-d', '--dataset', type=str, default='amazon',choices=['yelp', 'amazon', 'tripadvisor'])
parser.add_argument('--datsubsample', type=int, default=10000)
parser.add_argument('--n_restarts', type=int, default=1)
parser.add_argument('--steps', type=int, default=2001)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--l2_reg', type=float, default=1e-3)
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--mode', type=str, default="linear", choices=["linear", "logistic"])
parser.add_argument('--z_dim', type=int, default=1000)
parser.add_argument('--batch_size', type=int, default=200)
parser.add_argument('--num_features', type=int, default=5)
parser.add_argument('--input_dim', type=int, default=0)
parser.add_argument('--vae_epochs', type=int, default=101)
parser.add_argument('--spurious_corr', type=float, default=0.9)
parser.add_argument('--alter_freq', type=int, default=50)
parser.add_argument('--mode_latent', type=str, default="pcaz", choices=["vaez", "bertz", "bertz_cl", "pcaz"])
parser.add_argument('--mode_train_data', type=str, default="text", choices=["text", "bertz"])
flags, unk = parser.parse_known_args()
res = pd.DataFrame(vars(flags), index=[0])
res['randseed'] = randseed
print(flags)
sys.stdout.flush()
moniker = flags.dataset
out_dir = moniker + '_out'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
dat_file = 'dat/'+ moniker + '/' + moniker + '_meta.csv'
# detect encoding
# rawdata=open(dat_file,'rb').read()
# result = chardet.detect(rawdata)
# charenc = result['encoding']
# print(charenc)
if moniker == 'amazon':
full_dat = pd.read_csv(dat_file)
elif moniker == 'tripadvisor':
full_dat = pd.read_csv(dat_file, encoding='Windows-1252')
elif moniker == 'yelp':
full_dat = pd.read_csv(dat_file, lineterminator='\n')
full_dat = full_dat.rename(columns={'stars_x':'y', 'text':'review_text'})
data = full_dat[full_dat['y']!=3].sample(n=flags.datsubsample)
texts = list(data['review_text'])
labels = (np.array(data['y']) > 3)
split1, split2 = int(0.6*len(texts)), (int(0.6*len(texts)) + int(0.2*len(texts)))
train_text, train_label = texts[:split1], torch.from_numpy(labels[:split1]).float().cuda()
testobs_text, testobs_label = texts[split1:split2], torch.from_numpy(labels[split1:split2]).float().cuda()
testct_text, testct_label = texts[split2:], torch.from_numpy(labels[split2:]).float().cuda()
stop_words = set(stopwords.words('english'))
# vec = CountVectorizer(min_df=5, binary=True, max_df=0.8, ngram_range=(1,3))
vec = TfidfVectorizer(min_df=10, binary=True, max_df=0.8, ngram_range=(1,3))
X_full = vec.fit_transform(train_text)
X_train_full = vec.transform(train_text)
X_testobs_full = vec.transform(testobs_text)
X_testct_full = vec.transform(testct_text)
feats = np.array(vec.get_feature_names())
top_feature_idx, placebo_feature_idx, coef = get_top_terms(vec.transform(train_text), train_label.cpu().numpy(), coef_thresh=0.0, placebo_thresh=0.1) # use coef_threshold=0.0 to take all features, no thresholding happening here.
# top_feature_idx = np.arange(500)
X_train_np = vec.transform(train_text).toarray()
X_testobs_np = vec.transform(testobs_text).toarray()
X_testct_np = vec.transform(testct_text).toarray()
fea_corrcoef = np.corrcoef(X_train_np[:,top_feature_idx].T) - np.eye(X_train_np[:,top_feature_idx].shape[1])
colinear_fea = np.where(fea_corrcoef>0.96)[0]
feature_idx = np.array(list(set(top_feature_idx) - set(colinear_fea)))
# only consider words in feature_idx
id2term = collections.OrderedDict({i:v for i,v in enumerate(feats[feature_idx])})
term2id = collections.OrderedDict({v:i for i,v in enumerate(feats[feature_idx])})
spurious_words = np.array([term2id['as'], term2id['also'], term2id['am'], term2id['an']])
final_train_accs = []
final_test_accs = []
final_train_baselineaccs = []
final_test_baselineaccs = []
final_train_baselinevaeaccs = []
final_test_baselinevaeaccs = []
for restart in range(flags.n_restarts):
print("Restart", restart)
def make_environment(texts, labels, e):
def torch_bernoulli(p, size):
return (torch.rand(size) < p).float()
def torch_xor(a, b):
return (a-b).abs() # Assumes both inputs are either 0 or 1
# Assign a binary label based on the digit; flip label with probability 0.25
labels = (labels == 1).float()
labels = torch_xor(labels, torch_bernoulli(0.35, len(labels)).cuda())
# Assign a color based on the label; flip the color with probability e
spurious_counts = torch.stack([torch_xor(labels, torch_bernoulli(e, len(labels)).cuda()) for i in range(len(spurious_words))], axis=1)
# Apply the color to the image by zeroing out the other color channel
texts[:,spurious_words] = spurious_counts.cpu().numpy()
return {
'texts': torch.from_numpy(texts).float().cuda(),
'labels': labels[:, None].cuda(),
'colors': spurious_counts.cuda()
}
train_data = make_environment(X_train_np[:,feature_idx], train_label, 1-flags.spurious_corr)
X_train, train_label = train_data['texts'], train_data['labels']
testobs_data = make_environment(X_testobs_np[:,feature_idx], testobs_label, 1-flags.spurious_corr)
X_testobs, testobs_label = testobs_data['texts'], testobs_data['labels']
testct_data = make_environment(X_testct_np[:,feature_idx], testct_label, 0.9)
X_testct, testct_label = testct_data['texts'], testct_data['labels']
vocabsize = X_train.shape[1]
flags.input_dim = vocabsize
# calculate pca embedding
pca = PCA(n_components=flags.z_dim)
# pca.fit(np.row_stack([X_train_np, X_testobs_np, X_testct_np]))
pca.fit(np.row_stack([X_train_np[:,feature_idx]]))
train_pca_embedding = torch.from_numpy(pca.transform(X_train_np[:,feature_idx])).float().cuda()
testobs_pca_embedding = torch.from_numpy(pca.transform(X_testobs_np[:,feature_idx])).float().cuda()
testct_pca_embedding = torch.from_numpy(pca.transform(X_testct_np[:,feature_idx])).float().cuda()
print(np.cumsum(pca.explained_variance_ratio_))
print(pca.explained_variance_ratio_ * flags.input_dim)
# take only the top pc dimensions with effective sample size > 100
# flags.z_dim = np.sum(pca.explained_variance_ratio_ * flags.input_dim > 30)
# print(flags.z_dim)
# # calculate pca embedding
# pca = PCA(n_components=flags.z_dim)
# # pca.fit(np.row_stack([X_train_np, X_testobs_np, X_testct_np]))
# pca.fit(np.row_stack([X_train_np[:,feature_idx]]))
# train_pca_embedding = torch.from_numpy(pca.transform(X_train_np[:,feature_idx])).float().cuda()
# testobs_pca_embedding = torch.from_numpy(pca.transform(X_testobs_np[:,feature_idx])).float().cuda()
# testct_pca_embedding = torch.from_numpy(pca.transform(X_testct_np[:,feature_idx])).float().cuda()
# flags.num_features = flags.input_dim - flags.z_dim
subset_nonsing=False
if flags.mode_latent == "vaez":
z_dim = flags.z_dim
elif flags.mode_latent == "bertz":
z_dim = train_embedding.shape[1]
elif flags.mode_latent == "bertz_cl":
z_dim = X_train_cl_embedding.shape[1]
subset_nonsing=True
elif flags.mode_latent == "pcaz":
z_dim = flags.z_dim
# z_dim = flags.z_dim
print(vocabsize, z_dim)
sys.stdout.flush()
def compute_prob(logits, mode="logistic"):
if mode == "linear":
probs = torch.max(torch.stack([logits,torch.zeros_like(logits)],dim=2),dim=2)[0]
probs = torch.min(torch.stack([probs,torch.ones_like(probs)],dim=2),dim=2)[0]
elif mode == "logistic":
probs = nn.Sigmoid()(logits)
return probs
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.input_dim = flags.input_dim
self.z_dim = z_dim
self.num_features = flags.num_features
lin1 = nn.Linear(self.input_dim, self.num_features)
lin4 = nn.Linear(self.z_dim, 1)
for lin in [lin1, lin4]:
nn.init.xavier_uniform_(lin.weight)
nn.init.zeros_(lin.bias)
self._main = nn.Sequential(lin1)
self._tvaez = nn.Sequential(lin4)
self.finallayer = nn.Linear(self.num_features + 1, 1)
def forward(self, inputbow, vaez):
features = torch.matmul(inputbow, F.softmax(self._main[0].weight,dim=1).T)
logits = self.finallayer(torch.cat([features, self._tvaez(vaez)],dim=1))
probs = compute_prob(logits, mode=flags.mode)
features_ctr = features - features.mean(dim=0)
beta_hat = 0.
feature_hats = 0.
logit_hats = logits
prob_hats = probs
return features, logits, probs, beta_hat, logit_hats, prob_hats
def mean_nll(probs, y, mode="logistic"):
if mode == "linear":
mean_nll = nn.MSELoss()(probs, y)
elif mode == "logistic":
mean_nll = nn.BCELoss()(probs, y)
return mean_nll
def mean_accuracy(probs, y):
preds = (probs > 0.5).float()
return ((preds - y).abs() < 1e-2).float().mean()
# the Net component is not used
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(flags.num_features, 1)
def forward(self, x):
x = self.fc(x)
return x
def initNet(layer):
nn.init.xavier_uniform_(layer.weight)
nn.init.zeros_(layer.bias)
envs = [
{'text': X_train, 'pcaz': train_pca_embedding, 'labels': train_label}, \
{'text': X_testct, 'pcaz': testct_pca_embedding, 'labels': testct_label}, \
{'text': X_testobs, 'pcaz': testobs_pca_embedding, 'labels': testobs_label}]
if subset_nonsing == True:
envs[0]['text'] = envs[0]['text'][nonsing_sents]
envs[0]['labels'] = envs[0]['labels'][nonsing_sents]
if flags.mode_train_data == 'text':
flags.input_dim = vocabsize
train_loader = torch.utils.data.DataLoader(dataset=envs[0]['text'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
testct_loader = torch.utils.data.DataLoader(dataset=envs[1]['text'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
testobs_loader = torch.utils.data.DataLoader(dataset=envs[2]['text'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
elif flags.mode_train_data == 'bertz':
flags.input_dim = train_embedding.shape[1]
train_loader = torch.utils.data.DataLoader(dataset=envs[0]['bertz'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
testct_loader = torch.utils.data.DataLoader(dataset=envs[1]['bertz'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
testobs_loader = torch.utils.data.DataLoader(dataset=envs[2]['bertz'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
if flags.mode_latent == 'vae':
trainvaez_name = flags.dataset + 'k' + str(flags.z_dim) + 'trainvae.pt'
testctvaez_name = flags.dataset + 'k' + str(flags.z_dim) + 'testctvae.pt'
testobsvaez_name = flags.dataset + 'k' + str(flags.z_dim) + 'testobsvae.pt'
envs[0]['vaeimage'] = torch.load(trainvaez_name)[0].detach()
envs[1]['vaeimage'] = torch.load(testctvaez_name)[0].detach()
envs[2]['vaeimage'] = torch.load(testobsvaez_name)[0].detach()
envs[0]['vaez'] = torch.load(trainvaez_name)[1].detach()
envs[1]['vaez'] = torch.load(testctvaez_name)[1].detach()
envs[2]['vaez'] = torch.load(testobsvaez_name)[1].detach()
mlp = MLP().cuda()
optimizer_causalrep = optim.Adam(mlp._main.parameters(), lr=flags.lr, weight_decay=1e-8)
for step in range(flags.steps):
for i in range(len(envs)):
env = envs[i]
features, logits, probs, beta_hat, logit_hats, prob_hats = mlp(env[flags.mode_train_data], env[flags.mode_latent])
labels = env['labels']
env['nll'] = mean_nll(probs, env['labels'], mode=flags.mode)
env['nllhat'] = mean_nll(prob_hats, env['labels'], mode=flags.mode)
env['acc'] = mean_accuracy(probs, env['labels'])
env['acchat'] = mean_accuracy(prob_hats, env['labels'])
y = labels - labels.mean()
X = torch.cat([features, env[flags.mode_latent]], dim=1)
X = X - X.mean(dim=0)
X = torch.cat([torch.ones(X.shape[0],1).cuda(), X], dim=1)
beta = [torch.matmul(
torch.matmul(
torch.inverse(flags.l2_reg*torch.eye(X.shape[1]).cuda()+
torch.matmul(
torch.transpose(X, 0, 1),
X)),
torch.transpose(X, 0, 1)),
y[:,j]) for j in range(y.shape[1])]
env['covs'] = cov(torch.cat([beta[0][1:flags.num_features+1] *features, torch.unsqueeze((beta[0][-flags.z_dim:] * env[flags.mode_latent]).sum(dim=1),1)], dim=1))[-1][:-1] # extract the last row to have cov(Features, C)
env['causalrep'] = ((features.std(dim=0) * beta[0][1:flags.num_features+1])**2).sum()
# + 2 * env['covs']).sum()
weight_norm = torch.tensor(0.).cuda()
for w in mlp.finallayer.parameters():
weight_norm += w.norm().pow(2)
env['l2penalty'] = flags.l2_reg * weight_norm
if step % 500 == 0:
print("\nnll", env['nll'],
"\nl2", env['l2penalty'],
"\ncausalrep", env['causalrep'])
# "\nfeatureZr2", env['featureZr2'])
sys.stdout.flush()
train_l2penalty = torch.stack([envs[0]['l2penalty']])
train_causalrep = torch.stack([envs[0]['causalrep']])
train_nll = torch.stack([envs[0]['nll']]).mean()
train_acc = torch.stack([envs[0]['acc']]).mean()
testct_nll = torch.stack([envs[1]['nll']]).mean()
testct_acc = torch.stack([envs[1]['acc']]).mean()
testobs_nll = torch.stack([envs[2]['nll']]).mean()
testobs_acc = torch.stack([envs[2]['acc']]).mean()
nll_loss = train_nll.clone()
# + train_l2penalty.clone()
if step % 1 == 0:
l1_penalty = F.softmax(mlp._main[0].weight,dim=1).abs().sum()
train_causalrep_loss = -train_causalrep.clone()
# + 1e-3 * l1_penalty - 1e-2 * torch.log(1 - train_featureZr2)
optimizer_causalrep.zero_grad()
train_causalrep_loss.backward(retain_graph=True)
optimizer_causalrep.step()
if step % 100 == 0:
train_features, train_y = mlp(envs[0][flags.mode_train_data], envs[0][flags.mode_latent])[0].clone().cpu().detach().numpy(), envs[0]['labels'].clone().cpu().detach().numpy()
testct_features, testct_y = mlp(envs[1][flags.mode_train_data], envs[1][flags.mode_latent])[0].clone().cpu().detach().numpy(), envs[1]['labels'].clone().cpu().detach().numpy()
testobs_features, testobs_y = mlp(envs[2][flags.mode_train_data], envs[2][flags.mode_latent])[0].clone().cpu().detach().numpy(), envs[2]['labels'].clone().cpu().detach().numpy()
C_vals = [1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3]
causalrep_alphas, causalrep_trainaccs, causalrep_testobsaccs, causalrep_testctaccs = [], [], [], []
for C in C_vals:
alpha = 1./C
print('\ncausal-pred-w-features', 'C', C)
# clf = LinearRegression()
# clf = Ridge(alpha=alpha)
clf = LogisticRegression(C=C, class_weight='auto', solver='lbfgs')
clf.fit(train_features, train_y)
resulttrain = classification_report((train_y > 0), (clf.predict(train_features) > 0), output_dict=True)
resultct = classification_report((testct_y > 0), (clf.predict(testct_features) > 0), output_dict=True)
resultobs = classification_report((testobs_y > 0), (clf.predict(testobs_features)> 0), output_dict=True)
print('train',resulttrain['accuracy'])
print('testobs',resultobs['accuracy'])
print('testct',resultct['accuracy'])
sys.stdout.flush()
causalrep_trainaccs.append(resulttrain['accuracy'])
causalrep_testobsaccs.append(resultobs['accuracy'])
causalrep_testctaccs.append(resultct['accuracy'])
causalrep_alphas.append(alpha)
print("\n\n##### causal rep top words")
feature_weights = torch.topk(F.softmax(mlp._main[0].weight,dim=1),20, axis=1)
top_causal_words = feature_weights[1].detach().cpu().numpy()
top_causal_weights = feature_weights[0].detach().cpu().numpy()
for j in np.argsort(-np.abs(beta[0][1:(1+flags.num_features)].detach().cpu().numpy())):
# for j in range(top_causal_words.shape[0]):
print("feature", j)
print("coefficient", beta[0][j+1])
sort_causal_words = np.argsort(-top_causal_weights[j])[:20]
print("top causal words", [id2term[i] for i in top_causal_words[j][sort_causal_words]], top_causal_weights[j][sort_causal_words]
)
causalrep_res = {}
assert len(causalrep_alphas) == len(causalrep_trainaccs)
assert len(causalrep_alphas) == len(causalrep_testobsaccs)
assert len(causalrep_alphas) == len(causalrep_testctaccs)
for item in ['causalrep_trainaccs', 'causalrep_testobsaccs', 'causalrep_testctaccs']:
for i, alpha in enumerate(causalrep_alphas):
curname = item + '_' + str(alpha)
if item == 'causalrep_trainaccs':
causalrep_res[curname] = causalrep_trainaccs[i]
elif item == 'causalrep_testobsaccs':
causalrep_res[curname] = causalrep_testobsaccs[i]
elif item == 'causalrep_testctaccs':
causalrep_res[curname] = causalrep_testctaccs[i]
res = pd.concat([ | pd.DataFrame(causalrep_res, index=[0]) | pandas.DataFrame |
import pandas as pd
import click
from hgvs_helpers import var_c_p_prep, rev_comp, tryconvert
def hgvs_nomenclature(output_folder, weight_filter):
table = | pd.read_csv(output_folder + '/all_mutations_with_weights.csv') | pandas.read_csv |
import pandas as pd
import os
data=pd.read_csv('./data/name/namecode.csv')
result=pd.DataFrame()
re=0
for i,d in enumerate(zip(data['ts_code'],data['name'],data['industry'])):
temp=pd.DataFrame()
try:
temp= | pd.read_csv('./data/stock/'+d[0]+'_'+d[1]+'_'+d[2]+'.csv') | pandas.read_csv |
# Copyright 2017 Sidewalk Labs | https://www.apache.org/licenses/LICENSE-2.0
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from collections import defaultdict, namedtuple
import numpy as np
import pandas
from doppelganger.listbalancer import (
balance_multi_cvx, discretize_multi_weights
)
from doppelganger import inputs
HIGH_PASS_THRESHOLD = .1 # Filter controls which are present in less than 10% of HHs
# These are the minimum fields needed to allocate households
DEFAULT_PERSON_FIELDS = {
inputs.STATE,
inputs.PUMA,
inputs.SERIAL_NUMBER,
inputs.AGE,
inputs.SEX,
inputs.PERSON_WEIGHT,
}
DEFAULT_HOUSEHOLD_FIELDS = {
inputs.STATE,
inputs.PUMA,
inputs.SERIAL_NUMBER,
inputs.NUM_PEOPLE,
inputs.HOUSEHOLD_WEIGHT,
}
CountInformation = namedtuple('CountInformation', ['tract', 'count'])
class HouseholdAllocator(object):
@staticmethod
def from_csvs(households_csv, persons_csv):
"""Load saved household and person allocations.
Args:
households_csv (unicode): path to households file
persons_csv (unicode): path to persons file
Returns:
HouseholdAllocator: allocated persons & households_csv
"""
allocated_households = pandas.read_csv(households_csv)
allocated_persons = pandas.read_csv(persons_csv)
return HouseholdAllocator(allocated_households, allocated_persons)
@staticmethod
def from_cleaned_data(marginals, households_data, persons_data):
"""Allocate households based on the given data.
marginals (Marginals): controls to match when allocating
households_data (CleanedData): data about households. Must contain
DEFAULT_HOUSEHOLD_FIELDS.
persons_data (CleanedData): data about persons. Must contain
DEFAULT_PERSON_FIELDS.
"""
for field in DEFAULT_HOUSEHOLD_FIELDS:
assert field.name in households_data.data, \
'Missing required field {}'.format(field.name)
for field in DEFAULT_PERSON_FIELDS:
assert field.name in persons_data.data, \
'Missing required field {}'.format(field.name)
households, persons = HouseholdAllocator._format_data(
households_data.data, persons_data.data)
allocated_households, allocated_persons = \
HouseholdAllocator._allocate_households(households, persons, marginals)
return HouseholdAllocator(allocated_households, allocated_persons)
def __init__(self, allocated_households, allocated_persons):
self.allocated_households = allocated_households
self.allocated_persons = allocated_persons
self.serialno_to_counts = defaultdict(list)
for _, row in self.allocated_households.iterrows():
serialno = row[inputs.SERIAL_NUMBER.name]
tract = row[inputs.TRACT.name]
count = int(row[inputs.COUNT.name])
self.serialno_to_counts[serialno].append(CountInformation(tract, count))
def get_counts(self, serialno):
"""Return the information about weights for a given serial number.
A household is repeated for a certain number of times for each tract.
This returns a list of (tract, repeat count). The repeat count
indicates the number of times this serial number should be repeated in
this tract.
Args:
seriano (unicode): the household's serial number
Returns:
list(CountInformation): the weighted repetitions for this serialno
"""
return self.serialno_to_counts[serialno]
def write(self, household_file, person_file):
"""Write allocated households and persons to the given files
Args:
household_file (unicode): path to write households to
person_file (unicode): path to write persons to
"""
self.allocated_households.to_csv(household_file)
self.allocated_persons.to_csv(person_file)
@staticmethod
def _filter_sparse_columns(df, cols):
''' Filter out variables who are are so sparse they would break the solver.
Columns are assumed to be of an indicator type (0/1)
Args
df (pandas.DataFrame): dataframe to filter
cols (list(str)): column names
Returns
filtered column list (list(str))
'''
return df[cols]\
.loc[:, df[cols].sum()/float(len(df)) > HIGH_PASS_THRESHOLD]\
.columns.tolist()
@staticmethod
def _allocate_households(households, persons, tract_controls):
# Only take nonzero weights
households = households[households[inputs.HOUSEHOLD_WEIGHT.name] > 0]
# Initial weights from PUMS
w = households[inputs.HOUSEHOLD_WEIGHT.name].as_matrix().T
allocation_inputs = [inputs.NUM_PEOPLE, inputs.NUM_VEHICLES] # Hard-coded for now
# Prepend column name to bin name to prevent bin collision
hh_columns = []
for a_input in allocation_inputs:
subset_values = households[a_input.name].unique().tolist()
hh_columns += HouseholdAllocator._str_broadcast(a_input.name, subset_values)
hh_columns = HouseholdAllocator._filter_sparse_columns(households, hh_columns)
hh_table = households[hh_columns].as_matrix()
A = tract_controls.data[hh_columns].as_matrix()
n_tracts, n_controls = A.shape
n_samples = len(households.index.values)
# Control importance weights
# < 1 means not important (thus relaxing the constraint in the solver)
mu = np.mat([1] * n_controls)
w_extend = np.tile(w, (n_tracts, 1))
mu_extend = np.mat(np.tile(mu, (n_tracts, 1)))
B = np.mat(np.dot(np.ones((1, n_tracts)), A)[0])
# Our trade-off coefficient gamma
# Low values (~1) mean we trust our initial weights, high values
# (~10000) mean want to fit the marginals.
gamma = 100.
# Meta-balancing coefficient
meta_gamma = 100.
hh_weights = balance_multi_cvx(
hh_table, A, B, w_extend, gamma * mu_extend.T, meta_gamma
)
# We're running discretization independently for each tract
tract_ids = tract_controls.data['TRACTCE'].values
total_weights = np.zeros(hh_weights.shape)
sample_weights_int = hh_weights.astype(int)
discretized_hh_weights = discretize_multi_weights(hh_table, hh_weights)
total_weights = sample_weights_int + discretized_hh_weights
# Extend households and add the weights and ids
households_extend = | pandas.concat([households] * n_tracts) | pandas.concat |
import pandas as pd
import numpy as np
import holidays
import statsmodels.formula.api as sm
import time
from Helper import helper
import datetime
class DR(object):
def __init__(self, dataframe):
df = dataframe.copy()
self.lm_data = helper.DR_Temp_data_cleaning(df)
self.name = 'DR'
def set_date(self, date):
self.date = date
def model_building(self, training_data, station):
ml = sm.ols(formula=station + "_Temp_Log~Load_Lag_48+Humi_Lag_48+I(Load_Lag_48**2)+I(Humi_Lag_48**2)+\
Hour+Weekday+Month+Holiday+ RIV_Temp_Log_Lag_48+I(RIV_Temp_Log_Lag_48**2)+\
Month:Load_Lag_48+Month:Humi_Lag_48+\
Hour:Load_Lag_48+Hour:Humi_Lag_48+\
Holiday:Load_Lag_48+Holiday:Humi_Lag_48", data=training_data).fit()
return ml
def model_selection_mape_rmse(self, station):
training_days = 30
date_time = | pd.to_datetime(self.date) | pandas.to_datetime |
"""
Main experimentation pipeline for measuring robustness of explainers.
Unlike the other pipelines, we just want to compare the original LIME with its robustified version,
so we do not require a list of configs to run through.
We mainly run three experiments:
* Robustness of original LIME against Fooling LIME attack (surrogate sampler)
* Robustness of CTGAN-LIME against Fooling LIME attack (surrogate sampler)
* Robustness of CTGAN-LIME against Fooling LIME attack with CTGAN sampler (white-box)
We measure the following metrics:
* How often is the biased column (e.g. race) identified as the top feature for a prediction (top-1 accuracy)
* How often is the biased column identified as among the top k features for a prediction (top-k accuracy)
* How often is 'unrelated_column' identified as the top feature for a prediction (success rate)
"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from experiments.experiments_common import create_save_path
from experiments.utils.adversarial_lime import Adversarial_Lime_Model, one_hot_encode
from experiments.utils.constants import Datasets, Explainers
from experiments.utils.datasets import get_dataset
from experiments.utils.explainers import get_explainer
DATASET_CONFIGS = {
Datasets.COMPAS: {
'biased_column': 'race',
'unrelated_column': 'unrelated_column',
'use_cat_for_ctgan': True,
'ctgan_params': {
'embedding_dim': 512,
'gen_dim': (256, 256, 256, 256, 256),
'dis_dim': (256, 256, 256, 256, 256)
},
'discriminator_threshold': 0.5
},
Datasets.GERMANCREDIT: {
'biased_column': 'Sex',
'unrelated_column': 'unrelated_column',
'use_cat_for_ctgan': False,
'ctgan_params': {
'embedding_dim': 512,
'gen_dim': (256, 256, 256, 256, 256),
'dis_dim': (256, 256, 256, 256, 256)
},
'ctgan_epochs': 300,
'use_onehot': True,
'measure_distance': 'raw',
'discriminator_threshold': 0.5
},
Datasets.COMMUNITY: {
'biased_column': 'racePctWhite numeric',
'unrelated_column': 'unrelated_column',
'use_cat_for_ctgan': True,
'ctgan_params': {
'embedding_dim': 512,
'gen_dim': (256, 256, 256, 256, 256),
'dis_dim': (256, 256, 256, 256, 256),
},
'ctgan_epochs': 1000,
'use_onehot': False,
'measure_distance': 'raw',
'discriminator_threshold': 0.01
}
}
class sexist_model_f:
"""
For the German credit dataset
"""
def __init__(self, sex_index):
self.sex_index = sex_index
# Decision rule: classify negatively if race is black
def predict(self, X):
return np.array([0 if x[self.sex_index] > 0 else 1 for x in X])
def predict_proba(self, X):
return one_hot_encode(self.predict(X))
def score(self, X, y):
return np.sum(self.predict(X) == y) / len(X)
class innocuous_model_psi_german:
def __init__(self, unrelated_index):
self.unrelated_index = unrelated_index
# Decision rule: classify according to randomly drawn column 'unrelated column'
def predict(self, X):
return np.array([0 if x[self.unrelated_index] > 0 else 1 for x in X])
def predict_proba(self, X):
return one_hot_encode(self.predict(X))
def score(self, X, y):
return np.sum(self.predict(X) == y) / len(X)
class racist_model_f:
"""
For the COMPAS dataset
"""
def __init__(self, race_index):
self.race_index = race_index
# Decision rule: classify negatively if race is black
def predict(self, X):
return np.array([0 if x[self.race_index] > 0 else 1 for x in X])
def predict_proba(self, X):
return one_hot_encode(self.predict(X))
def score(self, X, y):
return np.sum(self.predict(X) == y) / len(X)
class innocuous_model_psi:
def __init__(self, unrelated_index):
self.unrelated_index = unrelated_index
# Decision rule: classify according to randomly drawn column 'unrelated column'
def predict(self, X):
return np.array([0 if x[self.unrelated_index] > 0 else 1 for x in X])
def predict_proba(self, X):
return one_hot_encode(self.predict(X))
def score(self, X, y):
return np.sum(self.predict(X) == y) / len(X)
def preprocess_robustness_datasets(dataset, params={}):
data = get_dataset(dataset, params)
if dataset == Datasets.COMPAS:
X, y, _ = data['data'], data['target'], data['cols']
X[DATASET_CONFIGS[Datasets.COMPAS]['unrelated_column']] = np.random.choice([0, 1],
size=X.shape[0])
features = list(X.columns)
categorical_feature_name = ['two_year_recid', 'c_charge_degree_F', 'c_charge_degree_M',
'sex_Female', 'sex_Male', 'race', 'unrelated_column']
categorical_feature_indcs = [features.index(c) for c in categorical_feature_name]
X = X.values
elif dataset == Datasets.GERMANCREDIT:
X, y = data['data'], data['target']
X = | pd.DataFrame(X, columns=data['feature_names']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------------------------------------------
@pytest.fixture()
def indices_3years():
"""Three indices over 3 years."""
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0),
(Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217),
(Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862),
(Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369),
(Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785),
(Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035),
(Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522),
(Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001),
(Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903),
(Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999),
(Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985),
(Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336),
(Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001),
(Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278),
(Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231),
(Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817),
(Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436),
(Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001),
(Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359),
(Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999),
(Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375),
(Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999),
(Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999),
(Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001),
(Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919),
(Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834),
(Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549),
(Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067),
(Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671),
(Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506),
(Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083),
(Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001),
(Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475),
(Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999),
(Timestamp('2014-11-01 00:00:00'), 109.91248118, 97.730489099, 90.50638234200001),
(Timestamp('2014-12-01 00:00:00'), 111.19756703600001, 99.734704555, 90.470418612),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years_start_feb(weights_3years):
return weights_3years.shift(1, freq='MS')
@pytest.fixture()
def weight_shares_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 0.489537029, 0.21362007800000002, 0.29684289199999997),
(Timestamp('2013-01-01 00:00:00'), 0.535477885, 0.147572705, 0.31694941),
(Timestamp('2014-01-01 00:00:00'), 0.512055362, 0.1940439, 0.293900738),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_shares_start_feb(weight_shares_3years):
return weight_shares_3years.shift(1, freq='MS')
@pytest.fixture()
def indices_1year(indices_3years):
return indices_3years.loc['2012', :]
@pytest.fixture()
def weights_1year(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_6months(indices_3years):
return indices_3years.loc['2012-Jan':'2012-Jun', :]
@pytest.fixture()
def weights_6months(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_transposed(indices_3years):
return indices_3years.T
@pytest.fixture()
def weights_transposed(weights_3years):
return weights_3years.T
@pytest.fixture()
def indices_missing(indices_3years):
indices_missing = indices_3years.copy()
change_to_nans = [
('2012-06', 2),
('2012-12', 3),
('2013-10', 2),
('2014-07', 1),
]
for sl in change_to_nans:
indices_missing.loc[sl] = np.nan
return indices_missing
@pytest.fixture()
def indices_missing_transposed(indices_missing):
return indices_missing.T
### AGGREGATION FIXTURES -----------------------------------------------------
@pytest.fixture()
def aggregate_outcome_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.47443727),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 102.4399192),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.93374613),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 103.9199248),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
@pytest.fixture()
def aggregate_outcome_1year(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012']
@pytest.fixture()
def aggregate_outcome_6months(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012-Jan':'2012-Jun']
@pytest.fixture()
def aggregate_outcome_missing():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.75024119),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 105.2864531),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.08353503),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 97.38610996),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
### WEIGHTS FIXTURES ------------------------------------------------------
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_jan():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
( | Timestamp('2012-08-01 00:00:00') | pandas.Timestamp |
# Voronoi-CNN-ch2Dxysec.py
# 2021 <NAME> (UCLA, <EMAIL>)
## Voronoi CNN for channel flow data.
## Authors:
# <NAME> (UCLA), <NAME> (Argonne National Lab.), <NAME> (Argonne National Lab.), <NAME> (Keio University), <NAME> (UCLA)
## We provide no guarantees for this code. Use as-is and for academic research use only; no commercial use allowed without permission. For citation, please use the reference below:
# Ref: <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
# "Global field reconstruction from sparse sensors with Voronoi tessellation-assisted deep learning,"
# in Review, 2021
#
# The code is written for educational clarity and not for speed.
# -- version 1: Mar 13, 2021
from keras.layers import Input, Add, Dense, Conv2D, merge, Conv2DTranspose, MaxPooling2D, UpSampling2D, Flatten, Reshape, LSTM
from keras.models import Model
from keras import backend as K
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.cross_validation import train_test_split
from tqdm import tqdm as tqdm
import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from scipy.spatial import Voronoi
import math
import pickle
from scipy.interpolate import griddata
import tensorflow as tf
from keras.backend import tensorflow_backend
config = tf.ConfigProto(
gpu_options=tf.GPUOptions(
allow_growth=True,
visible_device_list="0"
)
)
session = tf.Session(config=config)
tensorflow_backend.set_session(session)
# Data can be downloaded from https://drive.google.com/<KEY>
x_num=256
y_num=96
#--- Prepare coordinate ---#
xcor = | pd.read_csv('./record_x.csv',header=None,delim_whitespace=False) | pandas.read_csv |
from flask import Flask, flash, current_app, session, render_template, request, redirect, jsonify, abort, send_file
from flask_calendar.calendar_data import CalendarData
from flask_calendar.gregorian_calendar import GregorianCalendar
from flask_calendar.db_setup import init_db, db_session
from flask_calendar.models import Project, Pms, Apikeys
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import json
from flask_calendar.app import app
from flask_calendar.call_providers import send_email, send_rest1
from datetime import datetime, timedelta
from sqlalchemy import and_
import calendar
import pandas as pd
from io import BytesIO
import xlsxwriter
from flask_calendar.calendar_functions import check_calendar_duty, get_duty_project, get_day_of_week, get_dutys
def export_to_excel(m,y):
with app.app_context():
if m:
month_days= int(calendar.monthrange(int(y), int(m))[1])
month_name = GregorianCalendar.MONTH_NAMES[int(m) - 1]
else:
now = datetime.now()
month_days= int(calendar.monthrange(now.year, now.month)[1])
month_name = mydate = now.strftime("%B")
m=now.strftime("%m")
if y:
year=int(y)
else:
y=int(datetime.now().year)
if request.method == 'POST':
date = request.form.get("date", "")
fragments = re.split("-", date)
try:
m = int(fragments[1])
month = max(min(m, 12), 1)
month_name = GregorianCalendar.MONTH_NAMES[month - 1]
y = int(fragments[0])
month_days= int(calendar.monthrange(y, m)[1])
except Exception:
False
month_days=list(range(1,(month_days + 1)))
dutys=get_dutys()
days=month_days
data={}
for duty in dutys:
tmp_list=[]
duty_days=0
for day in days:
if check_calendar_duty(duty,y,m,day) == 'X':
duty_days=duty_days + 1
tmp_list.append(check_calendar_duty(duty,y,m,day))
tmp_list.insert(len(tmp_list),duty_days)
tmp_list.insert(0,str(get_duty_project(duty)))
data[str(duty)] = tmp_list
days_week=['Project']
for day in days:
days_week.append( str(day) + ' ' + str(get_day_of_week(y,m,day)) )
days_week.insert(len(days_week),"Days")
df = pd.DataFrame.from_dict(data, orient='index')
df.columns = days_week
df.index.name = 'Name'
output = BytesIO()
writer = | pd.ExcelWriter(output, engine='xlsxwriter') | pandas.ExcelWriter |
"""
Support function for mod handling
Author:
<NAME> <<EMAIL>>
"""
import pandas as pd
import numpy as np
from pandas.io.parsers import read_csv
import itertools as iter
# from lol_file
def get_modularity_value_from_lol_file(lol_file):
"""get_modularity_value_from_lol_file"""
with open(lol_file, 'r') as f:
for line in f.readlines():
split_line = line.strip().split(' ')
print(split_line)
if split_line[0] == 'Q':
print("Found modularity value line")
return split_line[2]
print("Unable to find modularity line in file, returning -1")
return -1.0
# reading info files
# from info-nodes
def get_max_degree_from_node_info_file(info_nodes_file):
"""Return max degree AND index and name of max degree (radatools based)"""
df = pd.read_table(info_nodes_file)
max_degree_value = df['Degree'].max()
md_indexes = df[df['Degree'] == max_degree_value].Index.values[0]
md_names = df[df['Degree'] == max_degree_value].Name.values[0]
return max_degree_value, md_indexes, md_names
def get_strength_values_from_info_nodes_file(info_nodes_file):
"""Read strength from Network_Properties node results"""
info_nodes = read_csv(info_nodes_file, sep="\t")
return info_nodes['Strength'].values
def get_strength_pos_values_from_info_nodes_file(info_nodes_file):
"""Read positive strength from Network_Properties node results"""
info_nodes = | read_csv(info_nodes_file, sep="\t") | pandas.io.parsers.read_csv |
# -*- encoding:utf-8 -*-
import pandas as pd
import numpy as np
import datetime
# from datetime import datetime
dire = '../../data/'
start = datetime.datetime.now()
orderHistory_train = pd.read_csv(dire + 'train/orderHistory_train.csv', encoding='utf-8')
orderFuture_train = | pd.read_csv(dire + 'train/orderFuture_train6.csv', encoding='utf-8') | pandas.read_csv |
import datetime
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from plateau.io.eager import (
read_dataset_as_dataframes,
read_table,
store_dataframes_as_dataset,
)
from plateau.io.testing.read import * # noqa
@pytest.fixture(
params=["dataframe", "table"],
ids=["dataframe", "table"],
)
def output_type(request):
# TODO: get rid of this parametrization and split properly into two functions
return request.param
def _read_table(*args, **kwargs):
kwargs.pop("dispatch_by", None)
res = read_table(*args, **kwargs)
if len(res):
# Array split conserves dtypes
return np.array_split(res, len(res))
else:
return [res]
# FIXME: handle removal of metparittion function properly.
# FIXME: consolidate read_Dataset_as_dataframes (replaced by iter)
def _read_dataset(output_type, *args, **kwargs):
if output_type == "table":
return _read_table
elif output_type == "dataframe":
return read_dataset_as_dataframes
else:
raise NotImplementedError()
@pytest.fixture()
def bound_load_dataframes(output_type):
return _read_dataset(output_type)
@pytest.fixture()
def backend_identifier():
return "eager"
def test_read_table_eager(dataset, store_session, use_categoricals):
if use_categoricals:
categories = ["P"]
else:
categories = None
df = read_table(
store=store_session,
dataset_uuid="dataset_uuid",
categoricals=categories,
)
expected_df = pd.DataFrame(
{
"P": [1, 2],
"L": [1, 2],
"TARGET": [1, 2],
"DATE": [datetime.date(2010, 1, 1), datetime.date(2009, 12, 31)],
}
)
if categories:
expected_df = expected_df.astype({"P": "category"})
# No stability of partitions
df = df.sort_values(by="P").reset_index(drop=True)
pdt.assert_frame_equal(df, expected_df, check_dtype=True, check_like=True)
def test_read_table_with_columns(dataset, store_session):
df = read_table(
store=store_session,
dataset_uuid="dataset_uuid",
columns=["P", "L"],
)
expected_df = pd.DataFrame({"P": [1, 2], "L": [1, 2]})
# No stability of partitions
df = df.sort_values(by="P").reset_index(drop=True)
expected_df = expected_df.sort_values(by="P").reset_index(drop=True)
pdt.assert_frame_equal(df, expected_df, check_dtype=False, check_like=True)
def test_read_table_simple_list_for_cols_cats(dataset, store_session):
df = read_table(
store=store_session,
dataset_uuid="dataset_uuid",
columns=["P", "L"],
categoricals=["P", "L"],
)
expected_df = pd.DataFrame({"P": [1, 2], "L": [1, 2]})
# No stability of partitions
df = df.sort_values(by="P").reset_index(drop=True)
expected_df = expected_df.sort_values(by="P").reset_index(drop=True)
expected_df = expected_df.astype("category")
| pdt.assert_frame_equal(df, expected_df, check_dtype=False, check_like=True) | pandas.testing.assert_frame_equal |
"""Backtester"""
from copy import deepcopy
import unittest
import pandas as pd
import pytest
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.preprocessing import StandardScaler
from soam.constants import (
ANOMALY_PLOT,
DS_COL,
FIG_SIZE,
MONTHLY_TIME_GRANULARITY,
PLOT_CONFIG,
Y_COL,
)
from soam.models.prophet import SkProphet
from soam.plotting.forecast_plotter import ForecastPlotterTask
from soam.workflow import (
Backtester,
BaseDataFrameTransformer,
Forecaster,
Transformer,
compute_metrics,
)
from soam.workflow.backtester import METRICS_KEYWORD, PLOT_KEYWORD, RANGES_KEYWORD
from tests.helpers import sample_data_df # pylint: disable=unused-import
def test_compute_metrics():
"""Function to compute performance metrics."""
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
expected_output = {'mae': 0.5, 'mse': 0.375}
output = compute_metrics(y_true, y_pred, metrics)
unittest.TestCase().assertDictEqual(expected_output, output)
class SimpleProcessor(BaseDataFrameTransformer):
"""Create a Simple Processor object."""
def __init__(self, **fit_params): # pylint:disable=super-init-not-called
self.preproc = StandardScaler(**fit_params)
def fit(self, df_X):
self.preproc.fit(df_X[Y_COL].values.reshape(-1, 1))
return self
def transform(self, df_X, inplace=True):
if not inplace:
df_X = df_X.copy()
df_X[Y_COL] = self.preproc.transform(df_X[Y_COL].values.reshape(-1, 1)) + 10
return df_X
def assert_backtest_fold_result_common_checks(rv, ranges=None, plots=None):
"""Backtest fold result common checks assertion."""
assert tuple(rv) == (RANGES_KEYWORD, METRICS_KEYWORD, PLOT_KEYWORD)
assert rv[RANGES_KEYWORD] == ranges
assert rv[PLOT_KEYWORD].name == plots
def assert_backtest_fold_result(rv, ranges=None, metrics=None, plots=None):
"""Backtest fold result assertion."""
assert_backtest_fold_result_common_checks(rv, ranges=ranges, plots=plots)
for metric_name, values in metrics.items():
assert metric_name in rv[METRICS_KEYWORD]
if isinstance(values, dict):
for measure_name, value in values.items():
assert value, pytest.approx(rv[METRICS_KEYWORD][measure_name], 0.01)
else:
assert values, pytest.approx(rv[METRICS_KEYWORD][metric_name], 0.01)
def assert_backtest_all_folds_result(rvs, expected_values):
"""Backtest all fold result assertion."""
assert len(rvs) == len(expected_values)
for rv, evs in zip(rvs, expected_values):
assert_backtest_fold_result(rv, **evs)
def assert_backtest_fold_result_aggregated(rv, ranges=None, metrics=None, plots=None):
"""Backtest fold result aggregated assertion."""
assert_backtest_fold_result_common_checks(rv, ranges=ranges, plots=plots)
output_metrics = pd.DataFrame(rv[METRICS_KEYWORD])
expected_metrics = pd.DataFrame(metrics)
pd.testing.assert_frame_equal(output_metrics, expected_metrics, rtol=1e-1)
def assert_backtest_all_folds_result_aggregated(rvs, expected_values):
"""Backtest all fold result aggregated assertion."""
assert len(rvs) == len(expected_values)
for rv, evs in zip(rvs, expected_values):
assert_backtest_fold_result_aggregated(rv, **evs)
def test_integration_backtester_single_fold(
tmp_path, sample_data_df
): # pylint: disable=redefined-outer-name
"""Backtest single fold integration test."""
test_window = 10
train_data = sample_data_df
forecaster = Forecaster(model=SkProphet(), output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2015-07-01 00:00:00'),
pd.Timestamp('2016-05-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 0.19286372252777645, 'mse': 0.07077117049346579},
'plots': '0_forecast_2013020100_2015080100_.png',
},
]
assert_backtest_all_folds_result(rvs, expected_values)
def test_integration_backtester_multi_fold(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2015-07-01 00:00:00'),
pd.Timestamp('2018-01-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 1.140921182444867, 'mse': 2.4605768804352675},
'plots': '0_forecast_2013020100_2015080100_.png',
},
{
RANGES_KEYWORD: (
pd.Timestamp('2015-08-01 00:00:00'),
pd.Timestamp('2018-01-01 00:00:00'),
pd.Timestamp('2020-07-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 1.600049020613293, 'mse': 4.383723067139095},
'plots': '0_forecast_2015080100_2018020100_.png',
},
{
RANGES_KEYWORD: (
pd.Timestamp('2018-02-01 00:00:00'),
pd.Timestamp('2020-07-01 00:00:00'),
pd.Timestamp('2023-01-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 3.1358162976127217, 'mse': 12.666965373730687},
'plots': '0_forecast_2018020100_2020080100_.png',
},
]
assert_backtest_all_folds_result(rvs, expected_values)
# TODO: It maybe a good visual aggregation to include all metrics in one plot. This
# TODO: is not possible with the current implementation.
def test_integration_backtester_multi_fold_default_aggregation(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold default aggregation integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
aggregation="default",
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
| pd.Timestamp('2013-02-01 00:00:00') | pandas.Timestamp |
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import roc_curve, auc, precision_recall_curve
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
import matplotlib.pyplot as plt
def plot_roc_curve( y_predict_proba, y_truth):
y_score = np.array(y_predict_proba)
if len(y_truth.shape) == 1:
dummies = | pd.get_dummies(y_truth) | pandas.get_dummies |
# Copyright (c) 2018 The Regents of the University of Michigan
# and the University of Pennsylvania
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Utility functions for performing cross-validation for model training/testing.
"""
from morf.utils.log import set_logger_handlers, execute_and_log_output
from morf.utils.docker import load_docker_image, make_docker_run_command
from morf.utils.config import MorfJobConfig
from morf.utils import fetch_complete_courses, fetch_sessions, download_train_test_data, initialize_input_output_dirs, make_feature_csv_name, make_label_csv_name, clear_s3_subdirectory, upload_file_to_s3, download_from_s3, initialize_labels, aggregate_session_input_data
from morf.utils.s3interface import make_s3_key_path
from morf.utils.api_utils import collect_course_cv_results
from multiprocessing import Pool
import logging
import tempfile
import pandas as pd
import os
import numpy as np
from sklearn.model_selection import StratifiedKFold
module_logger = logging.getLogger(__name__)
CONFIG_FILENAME = "config.properties"
mode = "cv"
def make_folds(job_config, raw_data_bucket, course, k, label_type, raw_data_dir="morf-data/"):
"""
Utility function to be called by create_course_folds for creating the folds for a specific course.
:return:
"""
logger = set_logger_handlers(module_logger, job_config)
user_id_col = "userID"
label_col = "label_value"
logger.info("creating cross-validation folds for course {}".format(course))
with tempfile.TemporaryDirectory(dir=job_config.local_working_directory) as working_dir:
input_dir, output_dir = initialize_input_output_dirs(working_dir)
# download data for each session
for session in fetch_sessions(job_config, raw_data_bucket, data_dir=raw_data_dir, course=course,
fetch_all_sessions=True):
# get the session feature and label data
download_train_test_data(job_config, raw_data_bucket, raw_data_dir, course, session, input_dir,
label_type=label_type)
# merge features to ensure splits are correct
feat_csv_path = aggregate_session_input_data("features", os.path.join(input_dir, course))
label_csv_path = aggregate_session_input_data("labels", os.path.join(input_dir, course))
feat_df = | pd.read_csv(feat_csv_path, dtype=object) | pandas.read_csv |
import logging
import os
import shutil
import warnings
warnings.simplefilter("ignore")
import matplotlib
import pandas as pd
matplotlib.use('agg') # no need for tk
from autogluon.task.tabular_prediction.tabular_prediction import TabularPrediction as task
from autogluon.utils.tabular.utils.savers import save_pd, save_pkl
import autogluon.utils.tabular.metrics as metrics
from frameworks.shared.callee import call_run, result, output_subdir, utils
log = logging.getLogger(__name__)
def run(dataset, config):
log.info("\n**** AutoGluon ****\n")
metrics_mapping = dict(
acc=metrics.accuracy,
auc=metrics.roc_auc,
f1=metrics.f1,
logloss=metrics.log_loss,
mae=metrics.mean_absolute_error,
mse=metrics.mean_squared_error,
r2=metrics.r2,
# rmse=metrics.root_mean_squared_error, # metrics.root_mean_squared_error incorrectly registered in autogluon REGRESSION_METRICS
rmse=metrics.mean_squared_error, # for now, we can let autogluon optimize training on mse: anyway we compute final score from predictions.
)
perf_metric = metrics_mapping[config.metric] if config.metric in metrics_mapping else None
if perf_metric is None:
# TODO: figure out if we are going to blindly pass metrics through, or if we use a strict mapping
log.warning("Performance metric %s not supported.", config.metric)
is_classification = config.type == 'classification'
training_params = {k: v for k, v in config.framework_params.items() if not k.startswith('_')}
column_names, _ = zip(*dataset.columns)
column_types = dict(dataset.columns)
train = pd.DataFrame(dataset.train.data, columns=column_names).astype(column_types, copy=False)
label = dataset.target.name
print(f"Columns dtypes:\n{train.dtypes}")
output_dir = output_subdir("models", config)
with utils.Timer() as training:
predictor = task.fit(
train_data=train,
label=label,
problem_type=dataset.problem_type,
output_directory=output_dir,
time_limits=config.max_runtime_seconds,
eval_metric=perf_metric.name,
**training_params
)
test = pd.DataFrame(dataset.test.data, columns=column_names).astype(column_types, copy=False)
X_test = test.drop(columns=label)
y_test = test[label]
with utils.Timer() as predict:
predictions = predictor.predict(X_test)
probabilities = predictor.predict_proba(dataset=X_test, as_pandas=True, as_multiclass=True) if is_classification else None
prob_labels = probabilities.columns.values.tolist() if probabilities is not None else None
leaderboard = predictor._learner.leaderboard(X_test, y_test, silent=True)
with | pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 1000) | pandas.option_context |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": | pandas.StringDtype() | pandas.StringDtype |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Wrappers around spark that correspond to common pandas functions.
"""
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Set,
Sized,
Tuple,
Type,
Union,
cast,
no_type_check,
)
from collections.abc import Iterable
from datetime import tzinfo
from functools import reduce
from io import BytesIO
import json
import warnings
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like # type: ignore[attr-defined]
from pandas.tseries.offsets import DateOffset
import pyarrow as pa
import pyarrow.parquet as pq
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import (
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
BooleanType,
TimestampType,
TimestampNTZType,
DecimalType,
StringType,
DateType,
StructType,
DataType,
)
from pyspark import pandas as ps
from pyspark.pandas._typing import Axis, Dtype, Label, Name
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.utils import (
align_diff_frames,
default_session,
is_name_like_tuple,
is_name_like_value,
name_like_string,
same_anchor,
scol_for,
validate_axis,
log_advice,
)
from pyspark.pandas.frame import DataFrame, _reduce_spark_multi
from pyspark.pandas.internal import (
InternalFrame,
DEFAULT_SERIES_NAME,
HIDDEN_COLUMNS,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from pyspark.pandas.indexes import Index, DatetimeIndex, TimedeltaIndex
from pyspark.pandas.indexes.multi import MultiIndex
__all__ = [
"from_pandas",
"range",
"read_csv",
"read_delta",
"read_table",
"read_spark_io",
"read_parquet",
"read_clipboard",
"read_excel",
"read_html",
"to_datetime",
"date_range",
"to_timedelta",
"timedelta_range",
"get_dummies",
"concat",
"melt",
"isna",
"isnull",
"notna",
"notnull",
"read_sql_table",
"read_sql_query",
"read_sql",
"read_json",
"merge",
"merge_asof",
"to_numeric",
"broadcast",
"read_orc",
]
def from_pandas(pobj: Union[pd.DataFrame, pd.Series, pd.Index]) -> Union[Series, DataFrame, Index]:
"""Create a pandas-on-Spark DataFrame, Series or Index from a pandas DataFrame, Series or Index.
This is similar to Spark's `SparkSession.createDataFrame()` with pandas DataFrame,
but this also works with pandas Series and picks the index.
Parameters
----------
pobj : pandas.DataFrame or pandas.Series
pandas DataFrame or Series to read.
Returns
-------
Series or DataFrame
If a pandas Series is passed in, this function returns a pandas-on-Spark Series.
If a pandas DataFrame is passed in, this function returns a pandas-on-Spark DataFrame.
"""
if isinstance(pobj, pd.Series):
return Series(pobj)
elif isinstance(pobj, pd.DataFrame):
return DataFrame(pobj)
elif isinstance(pobj, pd.Index):
return DataFrame(pd.DataFrame(index=pobj)).index
else:
raise TypeError("Unknown data type: {}".format(type(pobj).__name__))
_range = range # built-in range
def range(
start: int, end: Optional[int] = None, step: int = 1, num_partitions: Optional[int] = None
) -> DataFrame:
"""
Create a DataFrame with some range of numbers.
The resulting DataFrame has a single int64 column named `id`, containing elements in a range
from ``start`` to ``end`` (exclusive) with step value ``step``. If only the first parameter
(i.e. start) is specified, we treat it as the end value with the start value being 0.
This is similar to the range function in SparkSession and is used primarily for testing.
Parameters
----------
start : int
the start value (inclusive)
end : int, optional
the end value (exclusive)
step : int, optional, default 1
the incremental step
num_partitions : int, optional
the number of partitions of the DataFrame
Returns
-------
DataFrame
Examples
--------
When the first parameter is specified, we generate a range of values up till that number.
>>> ps.range(5)
id
0 0
1 1
2 2
3 3
4 4
When start, end, and step are specified:
>>> ps.range(start = 100, end = 200, step = 20)
id
0 100
1 120
2 140
3 160
4 180
"""
sdf = default_session().range(start=start, end=end, step=step, numPartitions=num_partitions)
return DataFrame(sdf)
def read_csv(
path: str,
sep: str = ",",
header: Union[str, int, None] = "infer",
names: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
usecols: Optional[Union[List[int], List[str], Callable[[str], bool]]] = None,
squeeze: bool = False,
mangle_dupe_cols: bool = True,
dtype: Optional[Union[str, Dtype, Dict[str, Union[str, Dtype]]]] = None,
nrows: Optional[int] = None,
parse_dates: bool = False,
quotechar: Optional[str] = None,
escapechar: Optional[str] = None,
comment: Optional[str] = None,
encoding: Optional[str] = None,
**options: Any,
) -> Union[DataFrame, Series]:
"""Read CSV (comma-separated) file into DataFrame or Series.
Parameters
----------
path : str
The path string storing the CSV file to be read.
sep : str, default ‘,’
Delimiter to use. Must be a single character.
header : int, default ‘infer’
Whether to to use as the column names, and the start of the data.
Default behavior is to infer the column names: if no names are passed
the behavior is identical to `header=0` and column names are inferred from
the first line of the file, if column names are passed explicitly then
the behavior is identical to `header=None`. Explicitly pass `header=0` to be
able to replace existing names
names : str or array-like, optional
List of column names to use. If file contains no header row, then you should
explicitly pass `header=None`. Duplicates in this list will cause an error to be issued.
If a string is given, it should be a DDL-formatted string in Spark SQL, which is
preferred to avoid schema inference for better performance.
index_col: str or list of str, optional, default: None
Index column of table in Spark.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either be
positional (i.e. integer indices into the document columns) or strings that
correspond to column names provided either by the user in names or inferred
from the document header row(s).
If callable, the callable function will be evaluated against the column names,
returning names where the callable function evaluates to `True`.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X0', 'X1', ... 'XN', rather
than 'X' ... 'X'. Passing in False will cause data to be overwritten if
there are duplicate names in the columns.
Currently only `True` is allowed.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {‘a’: np.float64, ‘b’: np.int32} Use str or object
together with suitable na_values settings to preserve and not interpret dtype.
nrows : int, default None
Number of rows to read from the CSV file.
parse_dates : boolean or list of ints or names or list of lists or dict, default `False`.
Currently only `False` is allowed.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted items can include
the delimiter and it will be ignored.
escapechar : str (length 1), default None
One-character string used to escape delimiter
comment: str, optional
Indicates the line should not be parsed.
encoding: str, optional
Indicates the encoding to read file
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
Examples
--------
>>> ps.read_csv('data.csv') # doctest: +SKIP
"""
# For latin-1 encoding is same as iso-8859-1, that's why its mapped to iso-8859-1.
encoding_mapping = {"latin-1": "iso-8859-1"}
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
if mangle_dupe_cols is not True:
raise ValueError("mangle_dupe_cols can only be `True`: %s" % mangle_dupe_cols)
if parse_dates is not False:
raise ValueError("parse_dates can only be `False`: %s" % parse_dates)
if usecols is not None and not callable(usecols):
usecols = list(usecols) # type: ignore[assignment]
if usecols is None or callable(usecols) or len(usecols) > 0:
reader = default_session().read
reader.option("inferSchema", True)
reader.option("sep", sep)
if header == "infer":
header = 0 if names is None else None
if header == 0:
reader.option("header", True)
elif header is None:
reader.option("header", False)
else:
raise ValueError("Unknown header argument {}".format(header))
if quotechar is not None:
reader.option("quote", quotechar)
if escapechar is not None:
reader.option("escape", escapechar)
if comment is not None:
if not isinstance(comment, str) or len(comment) != 1:
raise ValueError("Only length-1 comment characters supported")
reader.option("comment", comment)
reader.options(**options)
if encoding is not None:
reader.option("encoding", encoding_mapping.get(encoding, encoding))
column_labels: Dict[Any, str]
if isinstance(names, str):
sdf = reader.schema(names).csv(path)
column_labels = {col: col for col in sdf.columns}
else:
sdf = reader.csv(path)
if is_list_like(names):
names = list(names)
if len(set(names)) != len(names):
raise ValueError("Found non-unique column index")
if len(names) != len(sdf.columns):
raise ValueError(
"The number of names [%s] does not match the number "
"of columns [%d]. Try names by a Spark SQL DDL-formatted "
"string." % (len(sdf.schema), len(names))
)
column_labels = dict(zip(names, sdf.columns))
elif header is None:
column_labels = dict(enumerate(sdf.columns))
else:
column_labels = {col: col for col in sdf.columns}
if usecols is not None:
missing: List[Union[int, str]]
if callable(usecols):
column_labels = {
label: col for label, col in column_labels.items() if usecols(label)
}
missing = []
elif all(isinstance(col, int) for col in usecols):
usecols_ints = cast(List[int], usecols)
new_column_labels = {
label: col
for i, (label, col) in enumerate(column_labels.items())
if i in usecols_ints
}
missing = [
col
for col in usecols_ints
if (
col >= len(column_labels)
or list(column_labels)[col] not in new_column_labels
)
]
column_labels = new_column_labels
elif all(isinstance(col, str) for col in usecols):
new_column_labels = {
label: col for label, col in column_labels.items() if label in usecols
}
missing = [col for col in usecols if col not in new_column_labels]
column_labels = new_column_labels
else:
raise ValueError(
"'usecols' must either be list-like of all strings, "
"all unicode, all integers or a callable."
)
if len(missing) > 0:
raise ValueError(
"Usecols do not match columns, columns expected but not " "found: %s" % missing
)
if len(column_labels) > 0:
sdf = sdf.select([scol_for(sdf, col) for col in column_labels.values()])
else:
sdf = default_session().createDataFrame([], schema=StructType())
else:
sdf = default_session().createDataFrame([], schema=StructType())
column_labels = {}
if nrows is not None:
sdf = sdf.limit(nrows)
index_spark_column_names: List[str]
index_names: List[Label]
if index_col is not None:
if isinstance(index_col, (str, int)):
index_col = [index_col]
for col in index_col:
if col not in column_labels:
raise KeyError(col)
index_spark_column_names = [column_labels[col] for col in index_col]
index_names = [(col,) for col in index_col]
column_labels = {
label: col for label, col in column_labels.items() if label not in index_col
}
else:
log_advice(
"If `index_col` is not specified for `read_csv`, "
"the default index is attached which can cause additional overhead."
)
index_spark_column_names = []
index_names = []
psdf: DataFrame = DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_spark_column_names],
index_names=index_names,
column_labels=[
label if is_name_like_tuple(label) else (label,) for label in column_labels
],
data_spark_columns=[scol_for(sdf, col) for col in column_labels.values()],
)
)
if dtype is not None:
if isinstance(dtype, dict):
for col, tpe in dtype.items():
psdf[col] = psdf[col].astype(tpe)
else:
for col in psdf.columns:
psdf[col] = psdf[col].astype(dtype)
if squeeze and len(psdf.columns) == 1:
return first_series(psdf)
else:
return psdf
def read_json(
path: str, lines: bool = True, index_col: Optional[Union[str, List[str]]] = None, **options: Any
) -> DataFrame:
"""
Convert a JSON string to DataFrame.
Parameters
----------
path : string
File path
lines : bool, default True
Read the file as a json object per line. It should be always True for now.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/read_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1, lineSep='___')
>>> ps.read_json(
... path=r'%s/read_json/foo.json' % path, lineSep='___'
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
You can preserve the index in the roundtrip as below.
>>> df.to_json(path=r'%s/read_json/bar.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/read_json/bar.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1 col 2
index
0 a b
1 c d
"""
if index_col is None:
log_advice(
"If `index_col` is not specified for `read_json`, "
"the default index is attached which can cause additional overhead."
)
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
return read_spark_io(path, format="json", index_col=index_col, **options)
def read_delta(
path: str,
version: Optional[str] = None,
timestamp: Optional[str] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any,
) -> DataFrame:
"""
Read a Delta Lake table on some file system and return a DataFrame.
If the Delta Lake table is already stored in the catalog (aka the metastore), use 'read_table'.
Parameters
----------
path : string
Path to the Delta Lake table.
version : string, optional
Specifies the table version (based on Delta's internal transaction version) to read from,
using Delta's time travel feature. This sets Delta's 'versionAsOf' option. Note that
this parameter and `timestamp` parameter cannot be used together, otherwise it will raise a
`ValueError`.
timestamp : string, optional
Specifies the table version (based on timestamp) to read from,
using Delta's time travel feature. This must be a valid date or timestamp string in Spark,
and sets Delta's 'timestampAsOf' option. Note that this parameter and `version` parameter
cannot be used together, otherwise it will raise a `ValueError`.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options
Additional options that can be passed onto Delta.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_delta
read_table
read_spark_io
read_parquet
Examples
--------
>>> ps.range(1).to_delta('%s/read_delta/foo' % path) # doctest: +SKIP
>>> ps.read_delta('%s/read_delta/foo' % path) # doctest: +SKIP
id
0 0
>>> ps.range(10, 15, num_partitions=1).to_delta('%s/read_delta/foo' % path,
... mode='overwrite') # doctest: +SKIP
>>> ps.read_delta('%s/read_delta/foo' % path) # doctest: +SKIP
id
0 10
1 11
2 12
3 13
4 14
>>> ps.read_delta('%s/read_delta/foo' % path, version=0) # doctest: +SKIP
id
0 0
You can preserve the index in the roundtrip as below.
>>> ps.range(10, 15, num_partitions=1).to_delta(
... '%s/read_delta/bar' % path, index_col="index") # doctest: +SKIP
>>> ps.read_delta('%s/read_delta/bar' % path, index_col="index") # doctest: +SKIP
id
index
0 10
1 11
2 12
3 13
4 14
"""
if index_col is None:
log_advice(
"If `index_col` is not specified for `read_delta`, "
"the default index is attached which can cause additional overhead."
)
if version is not None and timestamp is not None:
raise ValueError("version and timestamp cannot be used together.")
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
if version is not None:
options["versionAsOf"] = version
if timestamp is not None:
options["timestampAsOf"] = timestamp
return read_spark_io(path, format="delta", index_col=index_col, **options)
def read_table(name: str, index_col: Optional[Union[str, List[str]]] = None) -> DataFrame:
"""
Read a Spark table and return a DataFrame.
Parameters
----------
name : string
Table name in Spark.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_table
read_delta
read_parquet
read_spark_io
Examples
--------
>>> ps.range(1).to_table('%s.my_table' % db)
>>> ps.read_table('%s.my_table' % db)
id
0 0
>>> ps.range(1).to_table('%s.my_table' % db, index_col="index")
>>> ps.read_table('%s.my_table' % db, index_col="index") # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
if index_col is None:
log_advice(
"If `index_col` is not specified for `read_table`, "
"the default index is attached which can cause additional overhead."
)
sdf = default_session().read.table(name)
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
def read_spark_io(
path: Optional[str] = None,
format: Optional[str] = None,
schema: Union[str, "StructType"] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any,
) -> DataFrame:
"""Load a DataFrame from a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
schema : string or StructType, optional
Input schema. If none, Spark tries to infer the schema automatically.
The schema can either be a Spark StructType, or a DDL-formatted string like
`col0 INT, col1 DOUBLE`.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
DataFrame.to_spark_io
DataFrame.read_table
DataFrame.read_delta
DataFrame.read_parquet
Examples
--------
>>> ps.range(1).to_spark_io('%s/read_spark_io/data.parquet' % path)
>>> ps.read_spark_io(
... '%s/read_spark_io/data.parquet' % path, format='parquet', schema='id long')
id
0 0
>>> ps.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.json' % path,
... format='json', lineSep='__')
>>> ps.read_spark_io(
... '%s/read_spark_io/data.json' % path, format='json', schema='id long', lineSep='__')
id
0 10
1 11
2 12
3 13
4 14
You can preserve the index in the roundtrip as below.
>>> ps.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.orc' % path,
... format='orc', index_col="index")
>>> ps.read_spark_io(
... path=r'%s/read_spark_io/data.orc' % path, format="orc", index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 10
1 11
2 12
3 13
4 14
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
sdf = default_session().read.load(path=path, format=format, schema=schema, **options)
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
def read_parquet(
path: str,
columns: Optional[List[str]] = None,
index_col: Optional[List[str]] = None,
pandas_metadata: bool = False,
**options: Any,
) -> DataFrame:
"""Load a parquet object from the file path, returning a DataFrame.
Parameters
----------
path : string
File path
columns : list, default=None
If not None, only these columns will be read from the file.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
pandas_metadata : bool, default: False
If True, try to respect the metadata if the Parquet file is written from pandas.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_parquet
DataFrame.read_table
DataFrame.read_delta
DataFrame.read_spark_io
Examples
--------
>>> ps.range(1).to_parquet('%s/read_spark_io/data.parquet' % path)
>>> ps.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'])
id
0 0
You can preserve the index in the roundtrip as below.
>>> ps.range(1).to_parquet('%s/read_spark_io/data.parquet' % path, index_col="index")
>>> ps.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'], index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
if index_col is None:
log_advice(
"If `index_col` is not specified for `read_parquet`, "
"the default index is attached which can cause additional overhead."
)
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
if columns is not None:
columns = list(columns)
index_names = None
if index_col is None and pandas_metadata:
# Try to read pandas metadata
@no_type_check
@pandas_udf("index_col array<string>, index_names array<string>")
def read_index_metadata(pser: pd.Series) -> pd.DataFrame:
binary = pser.iloc[0]
metadata = pq.ParquetFile(pa.BufferReader(binary)).metadata.metadata
if b"pandas" in metadata:
pandas_metadata = json.loads(metadata[b"pandas"].decode("utf8"))
if all(isinstance(col, str) for col in pandas_metadata["index_columns"]):
index_col = []
index_names = []
for col in pandas_metadata["index_columns"]:
index_col.append(col)
for column in pandas_metadata["columns"]:
if column["field_name"] == col:
index_names.append(column["name"])
break
else:
index_names.append(None)
return pd.DataFrame({"index_col": [index_col], "index_names": [index_names]})
return pd.DataFrame({"index_col": [None], "index_names": [None]})
index_col, index_names = (
default_session()
.read.format("binaryFile")
.load(path)
.limit(1)
.select(read_index_metadata("content").alias("index_metadata"))
.select("index_metadata.*")
.head()
)
psdf = read_spark_io(path=path, format="parquet", options=options, index_col=index_col)
if columns is not None:
new_columns = [c for c in columns if c in psdf.columns]
if len(new_columns) > 0:
psdf = psdf[new_columns]
else:
sdf = default_session().createDataFrame([], schema=StructType())
index_spark_columns, index_names = _get_index_map(sdf, index_col)
psdf = DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
index_names=index_names,
)
)
if index_names is not None:
psdf.index.names = index_names
return psdf
def read_clipboard(sep: str = r"\s+", **kwargs: Any) -> DataFrame:
r"""
Read text from clipboard and pass to read_csv. See read_csv for the
full argument list
Parameters
----------
sep : str, default '\s+'
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
See Also
--------
DataFrame.to_clipboard : Write text out to clipboard.
Returns
-------
parsed : DataFrame
"""
return cast(DataFrame, from_pandas(pd.read_clipboard(sep, **kwargs)))
def read_excel(
io: Union[str, Any],
sheet_name: Union[str, int, List[Union[str, int]], None] = 0,
header: Union[int, List[int]] = 0,
names: Optional[List] = None,
index_col: Optional[List[int]] = None,
usecols: Optional[Union[int, str, List[Union[int, str]], Callable[[str], bool]]] = None,
squeeze: bool = False,
dtype: Optional[Dict[str, Union[str, Dtype]]] = None,
engine: Optional[str] = None,
converters: Optional[Dict] = None,
true_values: Optional[Any] = None,
false_values: Optional[Any] = None,
skiprows: Optional[Union[int, List[int]]] = None,
nrows: Optional[int] = None,
na_values: Optional[Any] = None,
keep_default_na: bool = True,
verbose: bool = False,
parse_dates: Union[bool, List, Dict] = False,
date_parser: Optional[Callable] = None,
thousands: Optional[str] = None,
comment: Optional[str] = None,
skipfooter: int = 0,
convert_float: bool = True,
mangle_dupe_cols: bool = True,
**kwds: Any,
) -> Union[DataFrame, Series, Dict[str, Union[DataFrame, Series]]]:
"""
Read an Excel file into a pandas-on-Spark DataFrame or Series.
Support both `xls` and `xlsx` file extensions from a local filesystem or URL.
Support an option to read a single sheet or a list of sheets.
Parameters
----------
io : str, file descriptor, pathlib.Path, ExcelFile or xlrd.Book
The string could be a URL. The value URL must be available in Spark's DataFrameReader.
.. note::
If the underlying Spark is below 3.0, the parameter as a string is not supported.
You can use `ps.from_pandas(pd.read_excel(...))` as a workaround.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions. Lists of strings/integers are used to request
multiple sheets. Specify None to get all sheets.
Available cases:
* Defaults to ``0``: 1st sheet as a `DataFrame`
* ``1``: 2nd sheet as a `DataFrame`
* ``"Sheet1"``: Load sheet with name "Sheet1"
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
as a dict of `DataFrame`
* None: All sheets.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
usecols : int, str, list-like, or callable default None
Return a subset of the columns.
* If None, then parse all columns.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed.
* If list of string, then indicates list of column names to be parsed.
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd.
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
false_values : list, default None
Values to consider as False.
skiprows : list-like
Rows to skip at the beginning (0-indexed).
nrows : int, default None
Number of rows to parse.
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* bool. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. For non-standard
datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``
Note: A fast-path exists for iso8601-formatted dates.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. pandas-on-Spark will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
convert_float : bool, default True
Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
See Also
--------
DataFrame.to_excel : Write DataFrame to an Excel file.
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
The file can be read using the file name as string or an open file object:
>>> ps.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
Name Value
0 string1 1
1 string2 2
2 #Comment 3
>>> ps.read_excel(open('tmp.xlsx', 'rb'),
... sheet_name='Sheet3') # doctest: +SKIP
Unnamed: 0 Name Value
0 0 string1 1
1 1 string2 2
2 2 #Comment 3
Index and header can be specified via the `index_col` and `header` arguments
>>> ps.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 #Comment 3
Column types are inferred but can be explicitly specified
>>> ps.read_excel('tmp.xlsx', index_col=0,
... dtype={'Name': str, 'Value': float}) # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 #Comment 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> ps.read_excel('tmp.xlsx', index_col=0,
... na_values=['string1', 'string2']) # doctest: +SKIP
Name Value
0 None 1
1 None 2
2 #Comment 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> ps.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 None NaN
"""
def pd_read_excel(
io_or_bin: Any, sn: Union[str, int, List[Union[str, int]], None], sq: bool
) -> pd.DataFrame:
return pd.read_excel(
io=BytesIO(io_or_bin) if isinstance(io_or_bin, (bytes, bytearray)) else io_or_bin,
sheet_name=sn,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=sq,
dtype=dtype,
engine=engine,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
keep_default_na=keep_default_na,
verbose=verbose,
parse_dates=parse_dates, # type: ignore[arg-type]
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds,
)
if isinstance(io, str):
# 'binaryFile' format is available since Spark 3.0.0.
binaries = default_session().read.format("binaryFile").load(io).select("content").head(2)
io_or_bin = binaries[0][0]
single_file = len(binaries) == 1
else:
io_or_bin = io
single_file = True
pdf_or_psers = pd_read_excel(io_or_bin, sn=sheet_name, sq=squeeze)
if single_file:
if isinstance(pdf_or_psers, dict):
return {
sn: cast(Union[DataFrame, Series], from_pandas(pdf_or_pser))
for sn, pdf_or_pser in pdf_or_psers.items()
}
else:
return cast(Union[DataFrame, Series], from_pandas(pdf_or_psers))
else:
def read_excel_on_spark(
pdf_or_pser: Union[pd.DataFrame, pd.Series],
sn: Union[str, int, List[Union[str, int]], None],
) -> Union[DataFrame, Series]:
if isinstance(pdf_or_pser, pd.Series):
pdf = pdf_or_pser.to_frame()
else:
pdf = pdf_or_pser
psdf = cast(DataFrame, from_pandas(pdf))
return_schema = force_decimal_precision_scale(
as_nullable_spark_type(psdf._internal.spark_frame.drop(*HIDDEN_COLUMNS).schema)
)
def output_func(pdf: pd.DataFrame) -> pd.DataFrame:
pdf = pd.concat(
[pd_read_excel(bin, sn=sn, sq=False) for bin in pdf[pdf.columns[0]]]
)
reset_index = pdf.reset_index()
for name, col in reset_index.iteritems():
dt = col.dtype
if | is_datetime64_dtype(dt) | pandas.api.types.is_datetime64_dtype |
# -*- coding: utf-8 -*-
from sklearn.base import TransformerMixin
#from category_encoders.ordinal import OrdinalEncoder
#import numpy as np
import pandas as pd
import copy
from pandas.api.types import is_numeric_dtype,is_string_dtype
from joblib import Parallel,delayed,effective_n_jobs
import numpy as np
from BDMLtools.fun import raw_to_bin_sc,Specials
from BDMLtools.base import Base
class woeTransformer(Base,Specials,TransformerMixin):
"""
对数据进行WOE编码
Params:
------
varbin:BDMLtools.varReport(...).fit(...).var_report_dict,dict格式,woe编码参照此编码产生
special_values,特殊值指代值,若数据中某些值或某列某些值需特殊对待(这些值不是np.nan)时设定
请特别注意:special_values必须与产生varbin的函数的special_values一致,否则special_values的woe编码将出现错误结果
+ None,保证数据默认
+ list=[value1,value2,...],数据中所有列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan
+ dict={col_name1:[value1,value2,...],...},数据中指定列替换,被指定的列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan
woe_missing=None,float,缺失值的woe调整值,默认None即不调整.当missing箱样本量极少时,woe值可能不具备代表性,此时可调整varbin中的woe替换值至合理水平,例如设定为0
经过替换后的varbin=保存在self.varbin中.本模块暂不支持对不同特征的woe调整值做区别处理,所有特征的woe调整值均为woe_missing
woe_special=None,float,特殊值的woe调整值,默认None即不调整.当special箱样本量极少时,woe值可能不具备代表性,此时可调整varbin中的woe替换值至合理水平,例如设定为0
经过替换后的varbin=保存在self.varbin中.本模块暂不支持对不同特征的woe调整值做区别处理,所有特征的woe调整值均为woe_special
distr_limit=0.01,float,当woe_missing或woe_special不为None时,若missing或special箱占比低于distr_limit时才执行替换
check_na:bool,为True时,若经woe编码后编码数据出现了缺失值,程序将报错终止,可能的错误原因:
+ 某箱样本量太少,且该列是字符列的可能性极高
+ test或oot数据相应列的取值超出了train的范围,且该列是字符列的可能性极高
+ special_value设定前后不一致(产生varbin的speical value与本模块的speical value要一致)
dtype,可选'float32'与'float64',转换woe数据为np.float32/np.float64格式,breaks也会以np.float32/np.float64格式分段数据
+ 模块会使用varbin中的breaks分段数据,其本身为np.float64,因此fit中的数据的number列也必须为float64,否则会因为格式不一致产生精度问题
+ 若fit中的数据的number列为float32型,则请设定为float32以保证不因格式不一致而产生精度问题
+ 请不要在原始数据中共用不同的数值精度格式,例如float32与float64共用,int32与int64共用...,请使用bm.dtypeAllocator统一建模数据的格式
n_jobs,int,并行数量,默认1(所有core),在数据量非常大,列非常多的情况下可提升效率但会增加内存占用,若数据量较少可设定为1
verbose,int,并行信息输出等级
Attributes:
-------
"""
def __init__(self,varbin,n_jobs=1,verbose=0,special_values=None,woe_special=None,check_na=True,woe_missing=None,distr_limit=0.01,dtype='float64'):
self.varbin=varbin
self.n_jobs=n_jobs
self.verbose=verbose
self.check_na=check_na
self.special_values=special_values
self.woe_missing=woe_missing
self.woe_special=woe_special
self.distr_limit=distr_limit
self.dtype=dtype
def transform(self,X,y=None):
"""
WOE转换
"""
self._check_param_dtype(self.dtype)
self._check_X(X)
self.varbin=copy.deepcopy(self.varbin)
if isinstance(self.woe_missing,(int,float)):
for key in self.varbin:
if 'missing' in self.varbin[key].index.tolist() and self.varbin[key].loc['missing','count_distr']<self.distr_limit:
self.varbin[key].loc['missing','woe'] = self.woe_missing
elif self.woe_missing is None:
pass
else:
raise ValueError("woe_missing in (None,int,float).")
if isinstance(self.woe_special,(int,float)):
for key in self.varbin:
if 'missing' in self.varbin[key].index.tolist() and self.varbin[key].loc['missing','count_distr']<self.distr_limit:
self.varbin[key].loc['special','woe'] = self.woe_special
elif self.woe_special is None:
pass
else:
raise ValueError("woe_special in (None,int,float).")
n_jobs=effective_n_jobs(self.n_jobs)
p=Parallel(n_jobs=n_jobs,verbose=self.verbose)
res=p(delayed(self._woe_map)(X[key],self.varbin[key],self.check_na,self.special_values,self.dtype)
for key in self.varbin)
X_woe=pd.concat({col:col_woe for col,col_woe in res},axis=1)
return X_woe
def fit(self,X=None,y=None):
return self
def _woe_map(self,col,bin_df,check_na=True,special_values=None,dtype='float64'):
col=self._sp_replace_single(col,self._check_spvalues(col.name,special_values),fill_num=np.finfo(np.float32).max,fill_str='special')
if is_numeric_dtype(col):
bin_df_drop= bin_df[~bin_df['breaks'].isin([-np.inf,'missing','special',np.inf])]
woe_nan= bin_df[bin_df['breaks'].eq("missing")]['woe'][0]
woe_sp= bin_df[bin_df['breaks'].eq("special")]['woe'][0]
breaks=bin_df_drop['breaks'].astype('float64').tolist()
woe=bin_df[~bin_df['breaks'].isin(['missing','special'])]['woe'].tolist()
if special_values:
breaks_cut=breaks+[np.finfo(np.float32).max] if dtype=='float64' else np.float32(breaks+[np.finfo(np.float32).max]).tolist()
col_woe= | pd.cut(col,[-np.inf]+breaks_cut+[np.inf],labels=woe+[woe_sp],right=False,ordered=False).astype(dtype) | pandas.cut |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from numpy import dtype
from matplotlib.pyplot import ylabel
from matplotlib.cm import ScalarMappable
from matplotlib.pyplot import savefig
import math
from getCpuUsageForStage import *
import sys
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-i", "--inputFile")
parser.add_argument("-t", "--topFile")
parser.add_argument("-o", "--outputFile")
args = parser.parse_args(sys.argv[1:])
inputFileName = args.inputFile
topFileName = args.topFile
outputFileName = args.outputFile
pd.set_option('display.max_rows', 500)
| pd.set_option('display.max_columns', 500) | pandas.set_option |
import itertools
from collections import deque
import networkx as nx
import numpy as np
import pandas as pd
import scanpy as sc
from .._util import CapitalData
class Tree_Alignment:
def __init__(self):
self.__successors1 = None
self.__postorder1 = None
self.__tree1 = None
self.__successors2 = None
self.__postorder2 = None
self.__tree2 = None
self.__forestdistance = None
self.__traceforest = None
self.__treedistance = None
self.__tracetree = None
self.__alignmentcost = None
def tree_alignment(
self,
adata1,
adata2,
cost=1.0,
N_1=2000,
N_2=2000
):
COST = cost
gene_list = self.sort_data(
adata1, adata2, N_1, N_2)
adata1.uns["capital"]["intersection_genes"] = np.array(
gene_list, dtype=object)
adata2.uns["capital"]["intersection_genes"] = np.array(
gene_list, dtype=object)
self._dp(adata1, adata2, gene_list, COST)
alignedtree = self._traceback()
path_cluster_list = []
source_node = list(nx.topological_sort(alignedtree))[0]
for node in list(alignedtree.nodes):
if alignedtree.out_degree(node) == 0:
cluster_list = nx.shortest_path(
alignedtree, source=source_node, target=node)
route1 = [i[0] for i in cluster_list]
route2 = [i[1] for i in cluster_list]
path_cluster_list.append([route1, route2])
alignmentdict = {"alignment{:03d}".format(i):
{"data1": clusters[0],
"data2": clusters[1]}
for i, clusters in enumerate(path_cluster_list)}
aligned_data = CapitalData(
adata1.copy(),
adata2.copy(),
alignedtree,
np.array([self.__alignmentcost], dtype=int),
np.array(gene_list, dtype=object),
alignmentdict,
)
return aligned_data
def _set_initial_condition(
self,
data1,
data2,
cost=1.0
):
self.__successors1 = data1.uns["capital"]["tree"]["successors"]
self.__postorder1 = data1.uns["capital"]["tree"]["postorder"]
self.__tree1 = nx.convert_matrix.from_pandas_adjacency(
data1.uns["capital"]["tree"]["tree"], create_using=nx.DiGraph)
self.__successors2 = data2.uns["capital"]["tree"]["successors"]
self.__postorder2 = data2.uns["capital"]["tree"]["postorder"]
self.__tree2 = nx.convert_matrix.from_pandas_adjacency(
data2.uns["capital"]["tree"]["tree"],create_using=nx.DiGraph)
# get combination of children
# D(F1[i],F2[j]) is stored in forestdistance.loc[i,j]
# D(F1[i1,i2],F2[j]) is stored in forestdistance.loc["(i1,i2)",j]
# D({T1[i]},F2[j]) is stored in forestdistance.loc["(i,)", j]
forest1_combinations = []
for child in self.__successors1.values():
if child.size == 1:
children = list(itertools.combinations(child, 1))
forest1_combinations.extend(children)
elif child.size >= 1:
for k in range(1, child.size):
children = list(itertools.combinations(child, k))
forest1_combinations.extend(children)
forest2_combinations = []
for child in self.__successors2.values():
if child.size == 1:
children = list(itertools.combinations(child, 1))
forest2_combinations.extend(children)
elif child.size >= 1:
for k in range(1, child.size):
children = list(itertools.combinations(child, k))
forest2_combinations.extend(children)
forest1 = [i for i in list(self.__tree1.nodes)] + \
forest1_combinations + ["#"]
forest2 = [j for j in list(self.__tree2.nodes)] + \
forest2_combinations + ["#"]
forest1 = list(map(str, forest1))
forest2 = list(map(str, forest2))
forest = pd.DataFrame(index=forest1, columns=forest2)
forest.loc["#", "#"] = 0
tree = pd.DataFrame(
index=list(map(str, list(self.__tree1))) + ["#"],
columns=list(map(str, list(self.__tree2))) + ["#"])
tree.loc["#", "#"] = 0
self.__forestdistance = forest
self.__traceforest = | pd.DataFrame(index=forest1, columns=forest2) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import cStringIO as StringIO
import nose
from numpy import nan
import numpy as np
import numpy.ma as ma
from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
from pandas.util import py3compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
#-------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEquals(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEquals(result.name, self.ts.name)
# def test_copy_index_name_checking(self):
# # don't want to be able to modify the index stored elsewhere after
# # making a copy
# self.ts.index.name = None
# cp = self.ts.copy()
# cp.index.name = 'foo'
# self.assert_(self.ts.index.name is None)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEquals(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEquals(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEquals(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assert_(result.name is None)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEquals(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEquals(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEquals(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEquals(result.name, self.ts.name)
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(range(0,len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth"]
expected = "\n".join(expected)
self.assertEquals(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEquals(result.name, s.name)
self.assertEquals(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
# test big series (diff code path)
s = Series(range(0,1000))
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip(self.ts)
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEquals(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEquals(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEquals(result.name, self.ts.name)
class SafeForSparse(object):
pass
class TestSeries(unittest.TestCase, CheckNameIntegration):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=range(10))
empty2 = Series(np.nan, index=range(10))
assert_series_equal(empty, empty2)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dict(self):
d = {'a' : 0., 'b' : 1., 'c' : 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(tm.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
def test_iget(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
def test_getitem_regression(self):
s = Series(range(5), index=range(5))
result = s[range(5)]
assert_series_equal(result, s)
def test_getitem_slice_bug(self):
s = Series(range(10), range(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_(np.array_equal(result.index, s.index[mask]))
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
cop = s.copy()
cop[omask] = 5
s[mask] = 5
assert_series_equal(cop, s)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, offset=datetools.bday) > ts.median()
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assert_((s[:4] == 0).all())
self.assert_(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
def test_getitem_box_float64(self):
value = self.ts[5]
self.assert_(isinstance(value, np.float64))
def test_getitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_setitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__setitem__, 1, 5)
self.assertRaises(KeyError, s.ix.__setitem__, 1, 5)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assert_((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
# set item that's not contained
self.assertRaises(Exception, self.series.__setitem__,
'foobar', 1)
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assert_(res is self.ts)
self.assertEqual(self.ts[idx], 0)
res = self.series.set_value('foobar', 0)
self.assert_(res is not self.series)
self.assert_(res.index[-1] == 'foobar')
self.assertEqual(res['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
self.assertRaises(Exception, self.ts.__getitem__,
(slice(None, None), 2))
self.assertRaises(Exception, self.ts.__setitem__,
(slice(None, None), 2), 2)
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
def test_ix_getitem(self):
inds = self.series.index[[3,4,7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEquals(self.ts.ix[d1], self.ts[d1])
self.assertEquals(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][::-1]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assert_((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assert_((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s[::-1]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_ix_setitem(self):
inds = self.series.index[[3,4,7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3,4,7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEquals(self.series[d1], 4)
self.assertEquals(self.series[d2], 6)
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.order()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# tuple name, e.g. from hierarchical index
self.series.name = ('foo', 'bar', 'baz')
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
def test_to_string(self):
from cStringIO import StringIO
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
self.assert_(retval is None)
self.assertEqual(buf.getvalue().strip(), s)
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')]
expected = [format(x) for x in self.ts]
self.assertEqual(result, expected)
# empty string
result = self.ts[:0].to_string()
self.assertEqual(result, '')
result = self.ts[:0].to_string(length=0)
self.assertEqual(result, '')
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True)
last_line = result.split('\n')[-1].strip()
self.assertEqual(last_line, "Name: foo, Length: %d" % len(cp))
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = ('0 foo\n'
'1 NaN\n'
'2 -1.23\n'
'3 4.56')
self.assertEqual(result, expected)
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n'
'1 NaN\n'
'2 bar\n'
'3 baz')
self.assertEqual(result, expected)
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n'
'1 5\n'
'2 bar\n'
'3 baz')
self.assertEqual(result, expected)
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = ('0 NaN\n'
'1 1.568\n'
'2 NaN\n'
'3 -3.000\n'
'4 NaN')
self.assertEqual(result, expected)
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
self.assert_(getkeys() is self.ts.index)
def test_values(self):
self.assert_(np.array_equal(self.ts, self.ts.values))
def test_iteritems(self):
for idx, val in self.series.iteritems():
self.assertEqual(val, self.series[idx])
for idx, val in self.ts.iteritems():
self.assertEqual(val, self.ts[idx])
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_sum_inf(self):
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert_almost_equal(s.sum(), s2.sum())
import pandas.core.nanops as nanops
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
res = nanops.nansum(arr, axis=1)
expected = nanops._nansum(arr, axis=1)
assert_almost_equal(res, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = TimeSeries(np.ones(10, dtype=int), index=range(10))
self.assertAlmostEqual(np.median(int_ts), int_ts.median())
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_var(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_skew(self):
from scipy.stats import skew
alt =lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_argsort(self):
self._check_accum_op('argsort')
argsorted = self.ts.argsort()
self.assert_(issubclass(argsorted.dtype.type, np.integer))
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def _check_stat_op(self, name, alternate, check_objects=False):
from pandas import DateRange
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assert_(notnull(f(self.series)))
self.assert_(isnull(f(self.series, skipna=False)))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona))
allna = self.series * nan
self.assert_(np.isnan(f(allna)))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# check DateRange
if check_objects:
s = Series(DateRange('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
self.assertEqual(res, exp)
testit()
try:
import bottleneck as bn
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except ImportError:
pass
def _check_accum_op(self, name):
func = getattr(np, name)
self.assert_(np.array_equal(func(self.ts), func(np.array(self.ts))))
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_(np.array_equal(result, expected))
def test_round(self):
# numpy.round doesn't preserve metadata, probably a numpy bug,
# re: GH #314
result = np.round(self.ts, 2)
expected = Series(np.round(self.ts.values, 2), index=self.ts.index)
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.] , index=range(3))
result = s.prod()
self.assert_(not isinstance(result, Series))
def test_quantile(self):
from scipy.stats import scoreatpercentile
q = self.ts.quantile(0.1)
self.assertEqual(q, scoreatpercentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, scoreatpercentile(self.ts.valid(), 90))
def test_describe(self):
_ = self.series.describe()
_ = self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count' : 7, 'unique' : 4,
'top' : 'a', 'freq' : 3}, index=result.index)
assert_series_equal(result, expected)
def test_append(self):
appendedSeries = self.series.append(self.ts)
for idx, value in appendedSeries.iteritems():
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.ts.index:
self.assertEqual(value, self.ts[idx])
else:
self.fail("orphaned index!")
self.assertRaises(Exception, self.ts.append, self.ts)
def test_append_many(self):
pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, self.ts)
def test_all_any(self):
np.random.seed(12345)
ts = tm.makeTimeSeries()
bool_series = ts > 0
self.assert_(not bool_series.all())
self.assert_(bool_series.any())
def test_operators(self):
series = self.ts
other = self.ts[::2]
def _check_op(other, op, pos_only=False):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_almost_equal(cython_or_numpy, python)
def check(other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv',
'gt', 'ge', 'lt', 'le']
for opname in simple_ops:
_check_op(other, getattr(operator, opname))
_check_op(other, operator.pow, pos_only=True)
_check_op(other, lambda x, y: operator.add(y, x))
_check_op(other, lambda x, y: operator.sub(y, x))
_check_op(other, lambda x, y: operator.truediv(y, x))
_check_op(other, lambda x, y: operator.floordiv(y, x))
_check_op(other, lambda x, y: operator.mul(y, x))
_check_op(other, lambda x, y: operator.pow(y, x),
pos_only=True)
check(self.ts * 2)
check(self.ts * 0)
check(self.ts[::2])
check(5)
def check_comparators(other):
_check_op(other, operator.gt)
_check_op(other, operator.ge)
_check_op(other, operator.eq)
_check_op(other, operator.lt)
_check_op(other, operator.le)
check_comparators(5)
check_comparators(self.ts + 1)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x' : 0.})
# it works!
_ = s1 * s2
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
self.assert_(isnull(self.series.idxmin(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmin()], nona.min())
self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),
nona.values.argmin())
# all NaNs
allna = self.series * nan
self.assert_(isnull(allna.idxmin()))
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
self.assert_(isnull(self.series.idxmax(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmax()], nona.max())
self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),
nona.values.argmax())
# all NaNs
allna = self.series * nan
self.assert_(isnull(allna.idxmax()))
def test_operators_date(self):
result = self.objSeries + timedelta(1)
result = self.objSeries - timedelta(1)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assert_(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assert_(len(result) == 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_(np.array_equal(added[:-5], expected))
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10),
dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_series_frame_radd_bug(self):
from pandas.util.testing import rands
import operator
# GH 353
vals = Series([rands(5) for _ in xrange(10)])
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals' : vals})
result = 'foo_' + frame
expected = DataFrame({'vals' : vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A' : self.ts})
tm.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])
tm.assert_almost_equal(self.ts ** self.ts, (self.ts ** df)['A'])
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
ops = [Series.add, Series.sub, Series.mul, Series.div]
equivs = [operator.add, operator.sub, operator.mul]
if py3compat.PY3:
equivs.append(operator.truediv)
else:
equivs.append(operator.div)
fillvals = [0, 0, 1, 1]
for op, equiv_op, fv in zip(ops, equivs, fillvals):
result = op(a, b)
exp = equiv_op(a, b)
| assert_series_equal(result, exp) | pandas.util.testing.assert_series_equal |
import json
from elasticsearch import Elasticsearch
from elasticsearch import logger as es_logger
from collections import defaultdict, Counter
import re
import os
from pathlib import Path
from datetime import datetime, date
# Preprocess terms for TF-IDF
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from num2words import num2words
# end of preprocess
# LDA
from gensim import corpora, models
import pyLDAvis.gensim
# print in color
from termcolor import colored
# end LDA
import pandas as pd
import geopandas
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from nltk.corpus import wordnet
# SPARQL
import sparql
# progress bar
from tqdm import tqdm
# ploting
import matplotlib.pyplot as plt
from matplotlib_venn_wordcloud import venn3_wordcloud
# multiprocessing
# BERT
from transformers import pipeline
# LOG
import logging
from logging.handlers import RotatingFileHandler
def biotexInputBuilder(tweetsofcity):
"""
Build and save a file formated for Biotex analysis
:param tweetsofcity: dictionary of { tweets, created_at }
:return: none
"""
biotexcorpus = []
for city in tweetsofcity:
# Get all tweets for a city :
listOfTweetsByCity = [tweets['tweet'] for tweets in tweetsofcity[city]]
# convert this list in a big string of tweets by city
document = '\n'.join(listOfTweetsByCity)
biotexcorpus.append(document)
biotexcorpus.append('\n')
biotexcorpus.append("##########END##########")
biotexcorpus.append('\n')
textToSave = "".join(biotexcorpus)
corpusfilename = "elastic-UK"
biotexcopruspath = Path('elasticsearch/analyse')
biotexCorpusPath = str(biotexcopruspath) + '/' + corpusfilename
print("\t saving file : " + str(biotexCorpusPath))
f = open(biotexCorpusPath, 'w')
f.write(textToSave)
f.close()
def preprocessTerms(document):
"""
Pre process Terms according to
https://towardsdatascience.com/tf-idf-for-document-ranking-from-scratch-in-python-on-real-world-dataset-796d339a4089
/!\ Be carefull : it has a long execution time
:param:
:return:
"""
def lowercase(t):
return np.char.lower(t)
def removesinglechar(t):
words = word_tokenize(str(t))
new_text = ""
for w in words:
if len(w) > 1:
new_text = new_text + " " + w
return new_text
def removestopwords(t):
stop_words = stopwords.words('english')
words = word_tokenize(str(t))
new_text = ""
for w in words:
if w not in stop_words:
new_text = new_text + " " + w
return new_text
def removeapostrophe(t):
return np.char.replace(t, "'", "")
def removepunctuation(t):
symbols = "!\"#$%&()*+-./:;<=>?@[\]^_`{|}~\n"
for i in range(len(symbols)):
data = np.char.replace(t, symbols[i], ' ')
data = np.char.replace(t, " ", " ")
data = np.char.replace(t, ',', '')
return data
def convertnumbers(t):
tokens = word_tokenize(str(t))
new_text = ""
for w in tokens:
try:
w = num2words(int(w))
except:
a = 0
new_text = new_text + " " + w
new_text = np.char.replace(new_text, "-", " ")
return new_text
doc = lowercase(document)
doc = removesinglechar(doc)
doc = removestopwords(doc)
doc = removeapostrophe(doc)
doc = removepunctuation(doc)
doc = removesinglechar(doc) # apostrophe create new single char
return doc
def biotexAdaptativeBuilderAdaptative(listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Build a input biotex file well formated at the level wanted by concatenate cities's tweets
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return:
"""
matrixAggDay = pd.read_csv("elasticsearch/analyse/matrixAggDay.csv")
# concat date with city
matrixAggDay['city'] = matrixAggDay[['city', 'day']].agg('_'.join, axis=1)
del matrixAggDay['day']
## change index
matrixAggDay.set_index('city', inplace=True)
matrixFiltred = spatiotemporelFilter(matrix=matrixAggDay, listOfcities=listOfcities,
spatialLevel='state', period=period)
## Pre-process :Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixFiltred["city"], matrixFiltred["state"], matrixFiltred["country"], matrixFiltred["date"] = \
zip(*matrixFiltred.index.map(splitindex))
# Agregate by level
if spatialLevel == 'city':
# do nothing
pass
elif spatialLevel == 'state':
matrixFiltred = matrixFiltred.groupby('state')['tweetsList'].apply('.\n'.join).reset_index()
elif spatialLevel == 'country':
matrixFiltred = matrixFiltred.groupby('country')['tweetsList'].apply('.\n'.join).reset_index()
# Format biotex input file
biotexcorpus = []
for index, row in matrixFiltred.iterrows():
document = row['tweetsList']
biotexcorpus.append(document)
biotexcorpus.append('\n')
biotexcorpus.append("##########END##########")
biotexcorpus.append('\n')
textToSave = "".join(biotexcorpus)
corpusfilename = "elastic-UK-adaptativebiotex"
biotexcopruspath = Path('elasticsearch/analyse')
biotexCorpusPath = str(biotexcopruspath) + '/' + corpusfilename
print("\t saving file : " + str(biotexCorpusPath))
f = open(biotexCorpusPath, 'w')
f.write(textToSave)
f.close()
def ldHHTFIDF(listOfcities):
""" /!\ for testing only !!!!
Only work if nb of states = nb of cities
i.e for UK working on 4 states with their capitals...
"""
print(colored("------------------------------------------------------------------------------------------", 'red'))
print(colored(" - UNDER DEV !!! - ", 'red'))
print(colored("------------------------------------------------------------------------------------------", 'red'))
tfidfwords = pd.read_csv("elasticsearch/analyse/TFIDFadaptativeBiggestScore.csv", index_col=0)
texts = pd.read_csv("elasticsearch/analyse/matrixAggDay.csv", index_col=1)
listOfStatesTopics = []
for i, citystate in enumerate(listOfcities):
city = str(listOfcities[i].split("_")[0])
state = str(listOfcities[i].split("_")[1])
# print(str(i) + ": " + str(state) + " - " + city)
# tfidfwords = [tfidfwords.iloc[0]]
dictionary = corpora.Dictionary([tfidfwords.loc[state]])
textfilter = texts.loc[texts.index.str.startswith(city + "_")]
corpus = [dictionary.doc2bow(text.split()) for text in textfilter.tweetsList]
# Find the better nb of topics :
## Coherence measure C_v : Normalised PointWise Mutual Information (NPMI : co-occurence probability)
## i.e degree of semantic similarity between high scoring words in the topic
## and cosine similarity
nbtopics = range(2, 35)
coherenceScore = pd.Series(index=nbtopics, dtype=float)
for n in nbtopics:
lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=n)
# Compute coherence score
## Split each row values
textssplit = textfilter.tweetsList.apply(lambda x: x.split()).values
coherence = models.CoherenceModel(model=lda, texts=textssplit, dictionary=dictionary, coherence='c_v')
coherence_result = coherence.get_coherence()
coherenceScore[n] = coherence_result
# print("level: " + str(state) + " - NB: " + str(n) + " - coherence LDA: " + str(coherenceScore[n]))
# Relaunch LDA with the best nbtopic
nbTopicOptimal = coherenceScore.idxmax()
lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=nbTopicOptimal)
# save and visualisation
## save
for topic, listwords in enumerate(lda.show_topics()):
stateTopic = {'state': state}
ldaOuput = str(listwords).split(" + ")[1:]
for i, word in enumerate(ldaOuput):
# reformat lda output for each word of topics
stateTopic[i] = ''.join(x for x in word if x.isalpha())
listOfStatesTopics.append(stateTopic)
## Visualisation
try:
vis = pyLDAvis.gensim.prepare(lda, corpus, dictionary)
pyLDAvis.save_html(vis, "elasticsearch/analyse/lda/lda-tfidf_" + str(state) + ".html")
except:
print("saving pyLDAvis failed. Nb of topics for " + state + ": " + nbTopicOptimal)
# Save file
listOfStatesTopicsCSV = pd.DataFrame(listOfStatesTopics)
listOfStatesTopicsCSV.to_csv("elasticsearch/analyse/lda/topicBySate.csv")
def wordnetCoverage(pdterms):
"""
add an additionnal column with boolean term is in wordnet
:param pdterms: pd.dataframes of terms. Must have a column with "terms" as a name
:return: pdterms with additionnal column with boolean term is in wordnet
"""
# Add a wordnet column boolean type : True if word is in wordnet, False otherwise
pdterms['wordnet'] = False
# Loop on terms and check if there are in wordnet
for index, row in pdterms.iterrows():
if len(wordnet.synsets(row['terms'])) != 0:
pdterms.at[index, 'wordnet'] = True
return pdterms
def sparqlquery(thesaurus, term):
"""
Sparql query. This methods have be factorize to be used in case of multiprocessign
:param thesaurus: which thesaurus to query ? agrovoc or mesh
:param term: term to align with thesaurus
:return: sparql result querry
"""
# Define MeSH sparql endpoint and query
endpointmesh = 'http://id.nlm.nih.gov/mesh/sparql'
qmesh = (
'PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>'
'PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>'
'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>'
'PREFIX owl: <http://www.w3.org/2002/07/owl#>'
'PREFIX meshv: <http://id.nlm.nih.gov/mesh/vocab#>'
'PREFIX mesh: <http://id.nlm.nih.gov/mesh/>'
'PREFIX mesh2020: <http://id.nlm.nih.gov/mesh/2020/>'
'PREFIX mesh2019: <http://id.nlm.nih.gov/mesh/2019/>'
'PREFIX mesh2018: <http://id.nlm.nih.gov/mesh/2018/>'
''
'ask '
'FROM <http://id.nlm.nih.gov/mesh> '
'WHERE { '
' ?meshTerms a meshv:Term .'
' ?meshTerms meshv:prefLabel ?label .'
' FILTER(lang(?label) = "en").'
' filter(REGEX(?label, "^' + str(term) + '$", "i"))'
''
'}'
)
# Define agrovoc sparql endpoint and query
endpointagrovoc = 'http://agrovoc.uniroma2.it/sparql'
qagrovoc = ('PREFIX skos: <http://www.w3.org/2004/02/skos/core#> '
'PREFIX skosxl: <http://www.w3.org/2008/05/skos-xl#> '
'ask WHERE {'
'?myterm skosxl:literalForm ?labelAgro.'
'FILTER(lang(?labelAgro) = "en").'
'filter(REGEX(?labelAgro, "^' + str(term) + '(s)*$", "i"))'
'}')
# query mesh
if thesaurus == "agrovoc":
q = qagrovoc
endpoint = endpointagrovoc
elif thesaurus == "mesh":
q = qmesh
endpoint = endpointmesh
else:
raise Exception('Wrong thesaurus given')
try:
result = sparql.query(endpoint, q, timeout=30)
# Sometimes Endpoint can bug on a request.
# SparqlException raised by sparql-client if timeout is reach
# other exception (That I have not identify yet) when endpoint send non well formated answer
except:
result = "endpoint error"
return result
def agrovocCoverage(pdterms):
"""
Add an additionnal column with boolean if term is in agrovoc
:param pdterms: same as wordnetCoverage
:return: same as wornetCoverage
"""
# Log number of error raised by sparql endpoint
endpointerror = 0
# Add a agrovoc column boolean type : True if terms is in Agrovoc
pdterms['agrovoc'] = False
# Loop on term
for index, row in tqdm(pdterms.iterrows(), total=pdterms.shape[0], desc="agrovoc"):
# Build SPARQL query
term = row['terms']
result = sparqlquery('agrovoc', term)
if result == "endpoint error":
endpointerror += 1
pdterms.at[index, 'agrovoc'] = "Error"
elif result.hasresult():
pdterms.at[index, 'agrovoc'] = True
print("Agrovoc number of error: " + str(endpointerror))
return pdterms
def meshCoverage(pdterms):
"""
Add an additionnal column with boolean if term is in MeSH
:param pdterms: same as wordnetCoverage
:return: same as wornetCoverage
"""
# Log number of error raised by sparql endpoint
endpointerror = 0
# Add a MeSH column boolean type : True if terms is in Mesh
pdterms['mesh'] = False
# Loop on term with multiprocessing
for index, row in tqdm(pdterms.iterrows(), total=pdterms.shape[0], desc="mesh"):
# Build SPARQL query
term = row['terms']
result = sparqlquery('mesh', term)
if result == "endpoint error":
endpointerror += 1
pdterms.at[index, 'mesh'] = "Error"
elif result.hasresult():
pdterms.at[index, 'mesh'] = True
print("Mesh number of error: " + str(endpointerror))
return pdterms
def compareWithHTFIDF(number_of_term, dfToCompare, repToSave):
"""
Only used for ECIR2020 not for NLDB2021
:param number_of_term:
:param dfToCompare:
:param repToSave:
:return:
"""
# Stack / concatenate all terms from all states in one column
HTFIDFUniquedf = concatenateHTFIDFBiggestscore()[:number_of_term]
# select N first terms
dfToCompare = dfToCompare[:number_of_term]
common = pd.merge(dfToCompare, HTFIDFUniquedf, left_on='terms', right_on='terms', how='inner')
# del common['score']
common = common.terms.drop_duplicates()
common = common.reset_index()
del common['index']
common.to_csv("elasticsearch/analyse/" + repToSave + "/common.csv")
# Get what terms are specific to Adapt-TF-IDF
print(dfToCompare)
HTFIDFUniquedf['terms'][~HTFIDFUniquedf['terms'].isin(dfToCompare['terms'])].dropna()
condition = HTFIDFUniquedf['terms'].isin(dfToCompare['terms'])
specificHTFIDF = HTFIDFUniquedf.drop(HTFIDFUniquedf[condition].index)
specificHTFIDF = specificHTFIDF.reset_index()
del specificHTFIDF['index']
specificHTFIDF.to_csv("elasticsearch/analyse/" + repToSave + "/specific-H-TFIDF.csv")
# Get what terms are specific to dfToCompare
dfToCompare['terms'][~dfToCompare['terms'].isin(HTFIDFUniquedf['terms'])].dropna()
condition = dfToCompare['terms'].isin(HTFIDFUniquedf['terms'])
specificdfToCompare = dfToCompare.drop(dfToCompare[condition].index)
specificdfToCompare = specificdfToCompare.reset_index()
del specificdfToCompare['index']
specificdfToCompare.to_csv("elasticsearch/analyse/" + repToSave + "/specific-reference.csv")
# Print stats
percentIncommon = len(common) / len(HTFIDFUniquedf) * 100
percentOfSpecificHTFIDF = len(specificHTFIDF) / len(HTFIDFUniquedf) * 100
print("Percent in common " + str(percentIncommon))
print("Percent of specific at H-TFIDF : " + str(percentOfSpecificHTFIDF))
def HTFIDF_comparewith_TFIDF_TF():
"""
Only used for ECIR2020 not for NLDB2021
.. warnings:: /!\ under dev !!!. See TODO below
.. todo::
- Remove filter and pass it as args :
- period
- list of Cities
- Pass files path in args
- Pass number of term to extract for TF-IDF and TF
Gives commons and specifics terms between H-TFIDF and TF & TF-IDF classics
Creates 6 csv files : 3 for each classical measures :
- Common.csv : list of common terms
- specific-htfidf : terms only in H-TF-IDF
- specific-reference : terms only in one classical measurs
"""
tfidfStartDate = date(2020, 1, 23)
tfidfEndDate = date(2020, 1, 30)
tfidfPeriod = pd.date_range(tfidfStartDate, tfidfEndDate)
listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
# Query Elasticsearch to get all tweets from UK
tweets = elasticsearchQuery()
# reorganie tweets (dict : tweets by cities) into dataframe (city and date)
col = ['tweets', 'created_at']
matrixAllTweets = pd.DataFrame(columns=col)
for tweetByCity in tweets.keys():
# pprint(tweets[tweetByCity])
# Filter cities :
if str(tweetByCity).split("_")[0] in listOfCity:
matrix = pd.DataFrame(tweets[tweetByCity])
matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True)
# NB : 28354 results instead of 44841 (from ES) because we work only on tweets with a city found
# Split datetime into date and time
matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']]
matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']]
# Filter by a period
mask = ((matrixAllTweets["date"] >= tfidfPeriod.min()) & (matrixAllTweets["date"] <= tfidfPeriod.max()))
matrixAllTweets = matrixAllTweets.loc[mask]
# Compute TF-IDF
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(matrixAllTweets['tweet'])
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
## matrixTFIDF
TFIDFClassical = pd.DataFrame(denselist, columns=feature_names)
### Remove stopword
for term in TFIDFClassical.keys():
if term in stopwords.words('english'):
del TFIDFClassical[term]
# TFIDFClassical.to_csv("elasticsearch/analyse/TFIDFClassical/tfidfclassical.csv")
## Extract N TOP ranking score
top_n = 500
extractBiggest = TFIDFClassical.stack().nlargest(top_n)
### Reset index becaus stack create a multi-index (2 level : old index + terms)
extractBiggest = extractBiggest.reset_index(level=[0, 1])
extractBiggest.columns = ['old-index', 'terms', 'score']
del extractBiggest['old-index']
extractBiggest = extractBiggest.drop_duplicates(subset='terms', keep="first")
extractBiggest.to_csv("elasticsearch/analyse/TFIDFClassical/TFIDFclassicalBiggestScore.csv")
# Compare with H-TFIDF
repToSave = "TFIDFClassical"
compareWithHTFIDF(200, extractBiggest, repToSave)
# Compute TF
tf = CountVectorizer()
tf.fit(matrixAllTweets['tweet'])
tf_res = tf.transform(matrixAllTweets['tweet'])
listOfTermsTF = tf.get_feature_names()
countTerms = tf_res.todense()
## matrixTF
TFClassical = pd.DataFrame(countTerms.tolist(), columns=listOfTermsTF)
### Remove stopword
for term in TFClassical.keys():
if term in stopwords.words('english'):
del TFClassical[term]
### save in file
# TFClassical.to_csv("elasticsearch/analyse/TFClassical/tfclassical.csv")
## Extract N TOP ranking score
top_n = 500
extractBiggestTF = TFClassical.stack().nlargest(top_n)
### Reset index becaus stack create a multi-index (2 level : old index + terms)
extractBiggestTF = extractBiggestTF.reset_index(level=[0, 1])
extractBiggestTF.columns = ['old-index', 'terms', 'score']
del extractBiggestTF['old-index']
extractBiggestTF = extractBiggestTF.drop_duplicates(subset='terms', keep="first")
extractBiggestTF.to_csv("elasticsearch/analyse/TFClassical/TFclassicalBiggestScore.csv")
# Compare with H-TFIDF
repToSave = "TFClassical"
compareWithHTFIDF(200, extractBiggestTF, repToSave)
def concatenateHTFIDFBiggestscore():
"""
This function return a dataframe of one column containing all terms. i.e regroup all terms
:param:
:return: dataframe of 1 column with all terms from states stacked
"""
HTFIDF = pd.read_csv('elasticsearch/analyse/TFIDFadaptativeBiggestScore.csv', index_col=0)
# Transpose A-TF-IDF (inverse rows and columns)
HTFIDF = HTFIDF.transpose()
# group together all states' terms
HTFIDFUnique = pd.Series(dtype='string')
## loop on row for append states' terms in order to take into account their rank
## If there are 4 states, It will add the 4 first terms by iterow
for index, row in HTFIDF.iterrows():
HTFIDFUnique = HTFIDFUnique.append(row.transpose(), ignore_index=True)
## drop duplicate
HTFIDFUnique = HTFIDFUnique.drop_duplicates()
# merge to see what terms have in common
## convert series into dataframe before merge
HTFIDFUniquedf = HTFIDFUnique.to_frame().rename(columns={0: 'terms'})
HTFIDFUniquedf['terms'] = HTFIDFUnique
return HTFIDFUniquedf
def spatiotemporelFilter(matrix, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Filter matrix with list of cities and a period
:param matrix:
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return: matrix filtred
"""
if spatialLevel not in spatialLevels or temporalLevel not in temporalLevels:
print("wrong level, please double check")
return 1
# Extract cities and period
## cities
if listOfcities != 'all': ### we need to filter
### Initiate a numpy array of False
filter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for city in listOfcities:
### edit filter if index contains the city (for each city of the list)
filter += matrix.index.str.startswith(str(city) + "_")
matrix = matrix.loc[filter]
## period
if str(period) != 'all': ### we need a filter on date
datefilter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for date in period:
datefilter += matrix.index.str.contains(date.strftime('%Y-%m-%d'))
matrix = matrix.loc[datefilter]
return matrix
def compute_occurence_word_by_state():
"""
Count words for tweets aggregate by state.
For each state, we concatenate all tweets related.
Then we build a table :
- columns : all word (our vocabulary)
- row : the 4 states of UK
- cell : occurence of the word by state
:return: pd.Dataframe of occurence of word by states
"""
listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
tfidfStartDate = date(2020, 1, 23)
tfidfEndDate = date(2020, 1, 30)
tfidfPeriod = pd.date_range(tfidfStartDate, tfidfEndDate)
## Compute a table : (row : state; column: occurence of each terms present in state's tweets)
es_tweets_results = pd.read_csv('elasticsearch/analyse/matrixOccurence.csv', index_col=0)
es_tweets_results_filtred = spatiotemporelFilter(es_tweets_results, listOfcities=listOfCity, spatialLevel='state',
period=tfidfPeriod)
## Aggregate by state
### Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
es_tweets_results_filtred["city"], es_tweets_results_filtred["state"], es_tweets_results_filtred["country"], \
es_tweets_results_filtred["date"] = zip(*es_tweets_results_filtred.index.map(splitindex))
es_tweets_results_filtred_aggstate = es_tweets_results_filtred.groupby("state").sum()
return es_tweets_results_filtred_aggstate
def get_tweets_by_terms(term):
"""
Return tweets content containing the term for Eval 11
Warning: Only work on
- the spatial window : capital of UK
- the temporal windows : 2020-01-22 to 30
Todo:
- if you want to generelized this method at ohter spatial & temporal windows. You have to custom the
elastic serarch query.
:param term: term for retrieving tweets
:return: Dictionnary of tweets for the term
"""
list_of_tweets = []
client = Elasticsearch("http://localhost:9200")
index = "twitter"
# Define a Query : Here get only city from UK
query = {"query": {
"bool": {
"must": [],
"filter": [
{
"bool": {
"filter": [
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "London"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "Glasgow"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "Belfast"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"match": {
"rest.features.properties.city.keyword": "Cardiff"
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"match": {
"full_text": term
}
}
],
"minimum_should_match": 1
}
}
]
}
},
{
"range": {
"created_at": {
"gte": "2020-01-22T23:00:00.000Z",
"lte": "2020-01-30T23:00:00.000Z",
"format": "strict_date_optional_time"
}
}
}
],
}
}
}
try:
result = Elasticsearch.search(client, index=index, body=query, size=10000)
except Exception as e:
print("Elasticsearch deamon may not be launched for term: " + term)
print(e)
result = ""
for hit in result['hits']['hits']:
content = hit["_source"]["full_text"]
state = hit["_source"]["rest"]["features"][0]["properties"]["state"]
tweet = {
"full_text": content,
"state": state
}
list_of_tweets.append(tweet)
return list_of_tweets
def get_nb_of_tweets_with_spatio_temporal_filter():
"""
Return tweets content containing the term for Eval 11
Warning: Only work on
- the spatial window : capital of UK
- the temporal windows : 2020-01-22 to 30
Todo:
- if you want to generelized this method at ohter spatial & temporal windows. You have to custom the
elastic serarch query.
:param term: term for retrieving tweets
:return: Dictionnary of nb of tweets by state
"""
list_of_tweets = []
client = Elasticsearch("http://localhost:9200")
index = "twitter"
# Define a Query : Here get only city from UK
query = {"query": {
"bool": {
"must": [],
"filter": [
{
"bool": {
"filter": [
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "London"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "Glasgow"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "Belfast"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"match": {
"rest.features.properties.city.keyword": "Cardiff"
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
},
]
}
},
{
"range": {
"created_at": {
"gte": "2020-01-22T23:00:00.000Z",
"lte": "2020-01-30T23:00:00.000Z",
"format": "strict_date_optional_time"
}
}
}
],
}
}
}
try:
result = Elasticsearch.search(client, index=index, body=query, size=10000)
except Exception as e:
print("Elasticsearch deamon may not be launched")
print(e)
result = ""
nb_tweets_by_state = pd.DataFrame(index=["nb_tweets"], columns=('England', 'Northern Ireland', 'Scotland', 'Wales'))
nb_tweets_by_state.iloc[0] = (0, 0, 0, 0)
list_of_unboundaries_state = []
for hit in result['hits']['hits']:
try:
state = hit["_source"]["rest"]["features"][0]["properties"]["state"]
nb_tweets_by_state[state].iloc[0] += 1
except:
state_no_uk = str(hit["_source"]["rest"]["features"][0]["properties"]["city"] + " " + state)
list_of_unboundaries_state.append(state_no_uk)
print("get_nb_of_tweets_with_spatio_temporal_filter(): List of unique location outside of UK: " + str(
set(list_of_unboundaries_state)))
return nb_tweets_by_state
def ECIR20():
# matrixOccurence = pd.read_csv('elasticsearch/analyse/matrixOccurence.csv', index_col=0)
"""
### Filter city and period
"""
listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
tfidfStartDate = date(2020, 1, 23)
tfidfEndDate = date(2020, 1, 30)
tfidfPeriod = pd.date_range(tfidfStartDate, tfidfEndDate)
# LDA clustering on TF-IDF adaptative vocabulary
listOfCityState = ['London_England', 'Glasgow_Scotland', 'Belfast_Northern Ireland', 'Cardiff_Wales']
ldHHTFIDF(listOfCityState)
"""
"""
## Build biotex input for adaptative level state
biotexAdaptativeBuilderAdaptative(listOfcities=listOfCity, spatialLevel='state',
period=tfidfPeriod, temporalLevel='day')
"""
# Compare Biotex with H-TFIDF
"""
biotex = pd.read_csv('elasticsearch/analyse/biotexonhiccs/biotexUKbyStates.csv',
names=['terms', 'UMLS', 'score'], sep=';')
repToSave = "biotexonhiccs"
compareWithHTFIDF(200, biotex, repToSave)
"""
# declare path for comparison H-TFIDF with TF-IDF and TF (scikit measures)
"""
tfidfpath = "elasticsearch/analyse/TFIDFClassical/TFIDFclassicalBiggestScore.csv"
tfpath = "elasticsearch/analyse/TFClassical/TFclassicalBiggestScore.csv"
"""
"""
# Compare classical TF-IDF with H-TFIDF
## HTFIDF_comparewith_TFIDF_TF() gives commun and spectific terms between H-TFIDF and TF-ISF & TF classics
HTFIDF_comparewith_TFIDF_TF()
"""
# Thesaurus coverage : Are the terms in Wordnet / Agrovoc / MeSH
## open measures results and add a column for each thesaurus
### TF-IDF
"""
tfidf = pd.read_csv(tfidfpath)
tfidf = wordnetCoverage(tfidf)
tfidf = agrovocCoverage(tfidf)
tfidf = meshCoverage(tfidf)
tfidf.to_csv(tfidfpath)
print("TF-IDF thesaurus comparison: done")
### TF
tf = pd.read_csv(tfpath)
tf = wordnetCoverage(tf)
tf = agrovocCoverage(tf)
tf = meshCoverage(tf)
tf.to_csv(tfpath)
print("TF thesaurus comparison: done")
### H-TFIDF
htfidfStackedPAth = "elasticsearch/analyse/h-tfidf-stacked-wordnet.csv"
#### Stacked H-TFIDF
htfidf = concatenateHTFIDFBiggestscore()
htfidf = wordnetCoverage(htfidf)
htfidf = agrovocCoverage(htfidf)
htfidf = meshCoverage(htfidf)
htfidf.to_csv(htfidfStackedPAth)
print("H-TFIDF thesaurus comparison: done")
"""
## Percent of Coverage : print
"""
tfidf = pd.read_csv(tfidfpath)
tf = pd.read_csv(tfpath)
htfidfStackedPAth = "elasticsearch/analyse/h-tfidf-stacked-wordnet.csv"
htfidf = pd.read_csv(htfidfStackedPAth)
"""
"""
### Limit to a maximun numbers of terms
nfirstterms = 50
### TF-IDF
tfidfd = tfidf[0:nfirstterms]
tfidfPercentInWordnet = len(tfidfd[tfidfd.wordnet == True]) / nfirstterms
print("TF-IDF wordnet coverage for the ", nfirstterms, "first terms: ", tfidfPercentInWordnet)
tfidfPercentInAgrovoc = len(tfidfd[tfidfd.agrovoc == True]) / nfirstterms
print("TF-IDF agrovoc coverage for the ", nfirstterms, "first terms: ", tfidfPercentInAgrovoc)
### TF
tfd = tf[0:nfirstterms]
tfPercentInWordnet = len(tfd[tfd.wordnet == True]) / nfirstterms
print("TF wordnet coverage for the ", nfirstterms, "first terms: ", tfPercentInWordnet)
### H-TFIDF
htfidfd = htfidf[0:nfirstterms]
htfidfPercentInWordnet = len(htfidfd[htfidfd.wordnet == True]) / nfirstterms
print("H-TFIDF wordnet coverage for the", nfirstterms, "first terms: ", htfidfPercentInWordnet)
"""
"""
# Point 6 Comment thesaurus coverage
## plot graph coverage depending nb first elements
### Retrieve the mimimun len (i.e. nb of terms extracted) for the three measure :
min_len = min(tfidf.shape[0], tf.shape[0], htfidf.shape[0])
### Building dataframes containing percent of thesaurus coverage to plot
nbfirstelementsRange = range(1, min_len)
col = ['h-tfidf', 'tf-idf', 'tf', 'Number_of_the_first_terms_extracted']
wordnetCoverageByNbofterms = pd.DataFrame(columns=col)
agrovocCoverageByBbofterms = pd.DataFrame(columns=col)
meshCoverageByBbofterms = pd.DataFrame(columns=col)
for i, nb in enumerate(nbfirstelementsRange):
htfidfd = htfidf[0:nb]
tfidfd = tfidf[0:nb]
tfd = tf[0:nb]
row = {
"h-tfidf": len(htfidfd[htfidfd.wordnet == True]) / nb,
'tf-idf': len(tfidfd[tfidfd.wordnet == True]) / nb,
'tf': len(tfd[tfd.wordnet == True]) / nb,
'Number_of_the_first_terms_extracted': nb
}
wordnetCoverageByNbofterms.loc[i] = row
row = {
"h-tfidf": len(htfidfd[htfidfd.agrovoc == True]) / nb,
'tf-idf': len(tfidfd[tfidfd.agrovoc == True]) / nb,
'tf': len(tfd[tfd.agrovoc == True]) / nb,
'Number_of_the_first_terms_extracted': nb
}
agrovocCoverageByBbofterms.loc[i] = row
row = {
"h-tfidf": len(htfidfd[htfidfd.mesh == True]) / nb,
'tf-idf': len(tfidfd[tfidfd.mesh == True]) / nb,
'tf': len(tfd[tfd.mesh == True]) / nb,
'Number_of_the_first_terms_extracted': nb
}
meshCoverageByBbofterms.loc[i] = row
### Define the figure and its axes
fig, axes = plt.subplots(nrows=3, ncols=1)
axes[0].set(
xlabel='Number of the first n elements',
ylabel='Percentage of terms in wordnet',
title='Wordnet'
)
axes[0].xaxis.set_visible(False)
wordnetCoverageByNbofterms.plot(x='Number_of_the_first_terms_extracted', y=['h-tfidf', 'tf-idf', 'tf'], kind='line',
ax=axes[0])
axes[1].set(
xlabel='Number of the first n elements',
ylabel='Percentage of terms in Agrovoc',
title='Agrovoc'
)
axes[1].xaxis.set_visible(False)
agrovocCoverageByBbofterms.plot(x='Number_of_the_first_terms_extracted', y=['h-tfidf', 'tf-idf', 'tf'], kind='line',
ax=axes[1])
axes[2].set(
xlabel='Number of the first n elements',
ylabel='Percentage of terms in MeSH',
title='MeSH'
)
# axes[2].xaxis.set_visible(False)
meshCoverageByBbofterms.plot(x='Number_of_the_first_terms_extracted', y=['h-tfidf', 'tf-idf', 'tf'], kind='line',
ax=axes[2])
# As we hide xlabel for each subplots, we want to share one xlabel below the figure
# fig.text(0.32, 0.04, "Number of the first n elements")
fig.suptitle("Percentage of terms in Wordnet / Agrovoc / MesH \nby measures H-TFIDF / TF-IDF / TF")
fig.set_size_inches(8, 15)
# plt.show()
# fig.savefig("elasticsearch/analyse/thesaurus_coverage.png")
## Venn diagram & wordcloud
## /!\ I have to modify source of matplotlib_venn_wordcloud/_main.py to have a good layout ...
nb_of_terms = 99
htfidfd = htfidf[0:nb_of_terms]
tfidfd = tfidf[0:nb_of_terms]
tfd = tf[0:nb_of_terms]
### Plot by measure, venn diagram of Wordnet / Agrovoc / MeSH
figvenn, axvenn = plt.subplots(1, 3)
figvenn.set_size_inches(15, 8)
#### H-TFIDF
sets = []
sets.append(set(htfidfd.terms[htfidfd.wordnet == True]))
sets.append(set(htfidfd.terms[htfidfd.agrovoc == True]))
sets.append(set(htfidfd.terms[htfidfd.mesh == True]))
axvenn[0].set_title("H-TFIDF Thesaurus coverage", fontsize=20)
htfidf_ven = venn3_wordcloud(sets,
set_labels=['wordnet', ' agrovoc', ' mesh'],
wordcloud_kwargs=dict(min_font_size=4),
ax=axvenn[0])
for label in htfidf_ven.set_labels:
label.set_fontsize(15)
#### TFIDF
sets = []
sets.append(set(tfidfd.terms[tfidfd.wordnet == True]))
sets.append(set(tfidfd.terms[tfidfd.agrovoc == True]))
sets.append(set(tfidfd.terms[tfidfd.mesh == True]))
axvenn[1].set_title("TF-IDF Thesaurus coverage", fontsize=20)
tfidf_venn = venn3_wordcloud(sets,
set_labels=['wordnet', ' agrovoc', ' mesh'],
wordcloud_kwargs=dict(min_font_size=4),
ax=axvenn[1])
print(tfidf_venn.get_words_by_id("100"))
print(tfidf_venn.get_words_by_id("110"))
print(tfidf_venn.get_words_by_id("111"))
print(tfidf_venn.get_words_by_id("101"))
print(tfidfd.shape)
for label in tfidf_venn.set_labels:
label.set_fontsize(15)
#### TF
sets = []
sets.append(set(tfd.terms[tfd.wordnet == True]))
sets.append(set(tfd.terms[tfd.agrovoc == True]))
sets.append(set(tfd.terms[tfd.mesh == True]))
axvenn[2].set_title("TF Thesaurus coverage", fontsize=20)
tf_venn = venn3_wordcloud(sets,
set_labels=['wordnet', ' agrovoc', ' mesh'],
wordcloud_kwargs=dict(min_font_size=4),
# wordcloud_kwargs=dict(max_font_size=10, min_font_size=10),
# set_colors=['r', 'g', 'b'],
# alpha=0.8,
ax=axvenn[2])
for label in tf_venn.set_labels:
label.set_fontsize(15)
plt.show()
# End of thesaurus coverage
"""
# Point 7 : count the number of TF / TF-IDF / H-TFIDF terms for each states
"""
nb_of_extracted_terms_from_mesure = 300
## Compute a table : (row : state; column: occurence of each terms present in state's tweets)
es_tweets_results_filtred_aggstate = compute_occurence_word_by_state()
## Build a table for each measures and compute nb of occurences by states
### TF-IDF
tfidf_state_coverage = \
tfidf[['terms', 'score', 'wordnet', 'agrovoc', 'mesh']].iloc[0:nb_of_extracted_terms_from_mesure]
tfidf_state_coverage.set_index('terms', inplace=True)
for state in es_tweets_results_filtred_aggstate.index:
tfidf_state_coverage = \
tfidf_state_coverage.join(es_tweets_results_filtred_aggstate.loc[state], how='left')
tfidf_state_coverage.to_csv("elasticsearch/analyse/state_coverage/tfidf_state_coverage.csv")
### TF
tf_state_coverage = \
tf[['terms', 'score', 'wordnet', 'agrovoc', 'mesh']].iloc[0:nb_of_extracted_terms_from_mesure]
tf_state_coverage.set_index('terms', inplace=True)
for state in es_tweets_results_filtred_aggstate.index:
tf_state_coverage = \
tf_state_coverage.join(es_tweets_results_filtred_aggstate.loc[state], how='left')
tf_state_coverage.to_csv("elasticsearch/analyse/state_coverage/tf_state_coverage.csv")
### H-TFIDF
htfidf = | pd.read_csv("elasticsearch/analyse/TFIDFadaptativeBiggestScore.csv", index_col=0) | pandas.read_csv |
## Script to add load, generators, missing lines and transformers to SciGRID
#
#
## WARNING: This script is no longer supported, since the libraries and data no longer exist in their former versions
#
## It is kept here for interest's sake
#
## See https://github.com/PyPSA/pypsa-eur for a newer model that covers all of Europe
#
#
#This Jupyter Notebook is also available to download at: <http://www.pypsa.org/examples/add_load_gen_trafos_to_scigrid.ipynb> and can be viewed as an HTML page at: http://pypsa.org/examples/add_load_gen_trafos_to_scigrid.html.
#
#This script does some post-processing on the original SciGRID dataset version 0.2 and then adds load, generation, transformers and missing lines to the SciGRID dataset.
#
#The intention is to create a model of the German electricity system that is transparent in the sense that all steps from openly-available raw data to the final model can be followed. The model is NOT validated and may contain errors.
#
#Some of the libraries used for attaching the load and generation are not on github, but can be downloaded at
#
#http://fias.uni-frankfurt.de/~hoersch/
#
#The intention is to release these as free software soon. We cannot guarantee to support you when using these libraries.
#
#
#
### Data sources
#
#Grid: based on [SciGRID](http://scigrid.de/) Version 0.2 which is based on [OpenStreetMap](http://www.openstreetmap.org/).
#
#Load size and location: based on Landkreise (NUTS 3) GDP and population.
#
#Load time series: from ENTSO-E hourly data, scaled up uniformly by factor 1.12 (a simplification of the methodology in Schumacher, Hirth (2015)).
#
#Conventional power plant capacities and locations: BNetzA list.
#
#Wind and solar capacities and locations: EEG Stammdaten, based on http://www.energymap.info/download.html, which represents capacities at the end of 2014. Units without PLZ are removed.
#
#Wind and solar time series: REatlas, Andresen et al, "Validation of Danish wind time series from a new global renewable energy atlas for energy system analysis," Energy 93 (2015) 1074 - 1088.
#
#NB:
#
#All times in the dataset are UTC.
#
#Where SciGRID nodes have been split into 220kV and 380kV substations, all load and generation is attached to the 220kV substation.
#
### Warning
#
#This dataset is ONLY intended to demonstrate the capabilities of PyPSA and is NOT (yet) accurate enough to be used for research purposes.
#
#Known problems include:
#
#i) Rough approximations have been made for missing grid data, e.g. 220kV-380kV transformers and connections between close sub-stations missing from OSM.
#
#ii) There appears to be some unexpected congestion in parts of the network, which may mean for example that the load attachment method (by Voronoi cell overlap with Landkreise) isn't working, particularly in regions with a high density of substations.
#
#iii) Attaching power plants to the nearest high voltage substation may not reflect reality.
#
#iv) There is no proper n-1 security in the calculations - this can either be simulated with a blanket e.g. 70% reduction in thermal limits (as done here) or a proper security constrained OPF (see e.g. <http://www.pypsa.org/examples/scigrid-sclopf.ipynb>).
#
#v) The borders and neighbouring countries are not represented.
#
#vi) Hydroelectric power stations are not modelled accurately.
#
#viii) The marginal costs are illustrative, not accurate.
#
#ix) Only the first day of 2011 is in the github dataset, which is not representative. The full year of 2011 can be downloaded at <http://www.pypsa.org/examples/scigrid-with-load-gen-trafos-2011.zip>.
#
#x) The ENTSO-E total load for Germany may not be scaled correctly; it is scaled up uniformly by factor 1.12 (a simplification of the methodology in Schumacher, Hirth (2015), which suggests monthly factors).
#
#xi) Biomass from the EEG Stammdaten are not read in at the moment.
#
#xii) Power plant start up costs, ramping limits/costs, minimum loading rates are not considered.
import pypsa
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
#%matplotlib inline
### Read in the raw SciGRID data
#You may have to adjust this path to where
#you downloaded the github repository
#https://github.com/PyPSA/PyPSA
folder_prefix = os.path.dirname(pypsa.__file__) + "/../examples/scigrid-de/"
#note that some columns have 'quotes because of fields containing commas'
vertices = pd.read_csv(folder_prefix+"scigrid-151109/vertices_de_power_151109.csvdata",sep=",",quotechar="'",index_col=0)
vertices.rename(columns={"lon":"x","lat":"y","name":"osm_name"},inplace=True)
print(vertices["voltage"].value_counts(dropna=False))
links = pd.read_csv(folder_prefix+"scigrid-151109/links_de_power_151109.csvdata",sep=",",quotechar="'",index_col=0)
links.rename(columns={"v_id_1":"bus0","v_id_2":"bus1","name":"osm_name"},inplace=True)
links["cables"].fillna(3,inplace=True)
links["wires"].fillna(2,inplace=True)
links["length"] = links["length_m"]/1000.
print(links["voltage"].value_counts(dropna=False))
## Drop the DC lines
for voltage in [300000,400000,450000]:
links.drop(links[links.voltage == voltage].index,inplace=True)
## Build the network
network = pypsa.Network()
pypsa.io.import_components_from_dataframe(network,vertices,"Bus")
pypsa.io.import_components_from_dataframe(network,links,"Line")
### Add specific missing AC lines
# Add AC lines known to be missing in SciGRID
# E.g. lines missing because of OSM mapping errors.
# This is no systematic list, just what we noticed;
# please tell SciGRID and/or <NAME> (<EMAIL>)
# if you know of more examples
columns = ["bus0","bus1","wires","cables","voltage"]
data = [["100","255",2,6,220000], # Niederstedem to Wengerohr
["384","351",4,6,380000], # Raitersaich to Ingolstadt
["351","353",4,6,380000], # Ingolstadt to Irsching
]
last_scigrid_line = int(network.lines.index[-1])
index = [str(i) for i in range(last_scigrid_line+1,last_scigrid_line+1 + len(data))]
missing_lines = pd.DataFrame(data,index,columns)
#On average, SciGRID lines are 25% longer than the direct distance
length_factor = 1.25
missing_lines["length"] = [length_factor*pypsa.geo.haversine(network.buses.loc[r.bus0,["x","y"]],network.buses.loc[r.bus1,["x","y"]])[0,0] for i,r in missing_lines.iterrows()]
pypsa.io.import_components_from_dataframe(network,missing_lines,"Line")
network.lines.tail()
### Determine the voltage of the buses by the lines which end there
network.lines.voltage.value_counts()
buses_by_voltage = {}
for voltage in network.lines.voltage.value_counts().index:
buses_by_voltage[voltage] = set(network.lines[network.lines.voltage == voltage].bus0)\
| set(network.lines[network.lines.voltage == voltage].bus1)
# give priority to 380 kV
network.buses["v_nom"] = 380
network.buses.loc[buses_by_voltage[220000],"v_nom"] = 220
network.buses.loc[buses_by_voltage[380000],"v_nom"] = 380
network.buses.v_nom.value_counts(dropna=False)
### Connect buses which are < 850m apart
#
#There are pairs of buses less than 850m apart which are not connected in SciGRID, but clearly connected in OpenStreetMap (OSM).
#
#The reason is that the relations for connections between close substations do not appear in OSM.
#
#Here they are connected with 2 circuits of the appropriate voltage level (an asumption).
#
#850m is chosen as a limit based on manually looking through the examples.
#
#The example 46-48 (Marzahn) at 892 m apart is the first example of close substations which are not connected in reality.
# Compute the distances for unique pairs
pairs = pd.Series()
for i,u in enumerate(network.buses.index):
vs = network.buses[["x","y"]].iloc[i+1:]
distance_km = pypsa.geo.haversine(vs,network.buses.loc[u,["x","y"]])
to_add = | pd.Series(data=distance_km[:,0],index=[(u,v) for v in vs.index]) | pandas.Series |
import sys
import os
import numpy as np
import scipy.io
import scipy.sparse
import numba
import random
import multiprocessing as mp
import subprocess
import cytoolz as toolz
import collections
from itertools import chain
import regex as re
import yaml
import logging
import time
import gzip
import pandas as pd
from functools import partial
from typing import NamedTuple
from pysam import AlignmentFile
from .util import compute_edit_distance, read_gene_map_from_gtf
from .fastq_io import read_fastq
from .barcode import ErrorBarcodeHash, ErrorBarcodeHashConstraint
from .estimate_cell_barcode import get_cell_whitelist
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s: %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
def format_fastq(*fastq, config, method, fastq_out, cb_count,
num_thread=4, max_num_cell=1000000):
"""
Merging fastq reads by putting the cell barcodes and UMI sequences
to the headers of the cDNA reads
:param config: the config file
:param method: the library preparation protocol, e.g., can be one of
10X, Drop-seq, InDrop, Seq-Well, CEL-seq2, sci-RNA-seq, SPLiT-seq,
you can add protocol to the configure file easily
by specifying the read structures.
A template configuration file is provided in scumi/config.yaml
:param fastq: input fastq files
:param fastq_out: the output fastq file
:param cb_count: an output file containing the # reads for each cell barcode
:param num_thread: int
the number of cpu cores to use
:param max_num_cell: int
the maximum number of cells
"""
with open(config, 'r') as stream:
config_dict = yaml.safe_load(stream)
config_dict = config_dict[method]
num_read = config_dict['num_read']
num_fastq = len(fastq)
if num_fastq != num_read:
logger.error(f'Error: the number of input fastq files {num_fastq} is different '
f'from the number of fastq files {num_read} detected in the config file')
sys.exit(-1)
read_regex_str, barcode_filter, read_regex_str_qual = \
zip(*[_extract_input_read_template('read' + str(i), config_dict)
for i in range(1, num_read + 1)])
barcode_filter_dict = dict()
for d in barcode_filter:
barcode_filter_dict.update(d)
read_template = _infer_read_template(read_regex_str)
# select
read_regex_list = [re.compile(z) for z in read_regex_str_qual]
format_read = partial(_format_read, read_regex_list=read_regex_list,
read_template=read_template.read_template,
cb_tag=read_template.cb_tag,
ub_len=read_template.ub_len,
barcode_filter_dict=barcode_filter_dict)
chunk_size = 8000
fastq_reader = [read_fastq(fastq_i) for fastq_i in fastq]
chunks = toolz.partition_all(chunk_size, zip(*fastq_reader))
num_cpu = mp.cpu_count()
num_thread = num_thread if num_cpu > num_thread else num_cpu
seq_chunk_obj = toolz.partition_all(num_thread, chunks)
fastq_out_all = [fastq_out + str(x) + '.gz' for x in range(num_thread)]
[gzip.open(x, 'wb').close() for x in fastq_out_all]
cb_count_all = [cb_count + str(x) + '.csv' for x in range(num_thread)]
[open(x, 'wt').close() for x in cb_count_all]
fastq_info = collections.defaultdict(collections.Counter)
iteration = 0
results = []
time_start = time.time()
pool = mp.Pool(num_thread)
for fastq_chunk in seq_chunk_obj:
res = pool.starmap_async(format_read, zip(fastq_chunk, fastq_out_all, cb_count_all))
results.append(res)
if len(results) == num_thread * 10:
results[0].wait()
while results and results[0].ready():
iteration += 1
if not (iteration % 10):
logger.info(f'Processed {iteration * chunk_size * num_thread:,d} reads!')
res = results.pop(0)
chunk_info = res.get()
_update_fastq_info(fastq_info, chunk_info)
pool.close()
pool.join()
for res in results:
chunk_info = res.get()
_update_fastq_info(fastq_info, chunk_info)
with open('.fastq_count.tsv', 'w') as f:
for k, v in fastq_info['read'].most_common():
f.write(f'{k}\t{v}\n')
cmd_cat_fastq = ' '.join(['cat'] + fastq_out_all + ['>'] + [fastq_out])
try:
subprocess.check_output(cmd_cat_fastq, shell=True)
[os.remove(fastq_file) for fastq_file in fastq_out_all]
except subprocess.CalledProcessError:
logger.info(f'Errors in concatenate fastq files')
sys.exit(-1)
except OSError:
logger.info(f'Errors in deleting fastq files')
sys.exit(-1)
time_used = time.time() - time_start
logger.info(f'Formatting fastq done, taking {time_used/3600.0:.3f} hours')
if not cb_count:
cb_count = fastq_out + '.cb_count'
df = _count_cell_barcode_umi(cb_count_all[0])
for cb_file in cb_count_all[1:]:
df1 = _count_cell_barcode_umi(cb_file)
df = pd.concat([df, df1], axis=0)
df = df.groupby(df.index).sum()
if df.shape[0] > max_num_cell * 2:
df = df.sort_values(by=df.columns[0], ascending=False)
df = df.iloc[:max_num_cell, :]
try:
[os.remove(cb_file) for cb_file in cb_count_all]
except OSError:
logger.info(f'Errors in deleting cell barcode files')
sys.exit(-1)
df = df.sort_values(by=df.columns[0], ascending=False)
if df.shape[0] > 0:
df.columns = [str(x) for x in range(df.shape[1])]
df.index.name = 'cb'
column_name = list(df.columns.values)
column_name[0] = 'cb_count'
df.columns = column_name
df.to_csv(cb_count, sep='\t')
def _update_fastq_info(fastq_info, chunk_info):
for fastq_count in chunk_info:
fastq_info['read'].update(read_pass=fastq_count[0],
read_pass_barcode=fastq_count[1],
read_pass_polyt=fastq_count[2],
read_total=fastq_count[3])
def _count_cell_barcode_umi(cb_file, chunk_size=10 ** 7):
cb_reader = pd.read_csv(cb_file, header=None, iterator=True,
sep='\t', index_col=0)
chunks = cb_reader.get_chunk(chunk_size)
chunks = chunks.groupby(chunks.index).sum()
status = True
while status:
try:
chunk = cb_reader.get_chunk(chunk_size)
chunks = pd.concat([chunks, chunk], axis=0)
chunks = chunks.groupby(chunks.index).sum()
except StopIteration:
status = False
logger.info('Read cell barcode counts done.')
return chunks
def _extract_barcode_pos(barcode_dict, config):
barcode_reg = []
pos_all = []
barcode_filter = dict()
for barcode_and_pos in barcode_dict:
barcode, pos = barcode_and_pos
pos_all.append(pos)
barcode_reg.append('(?P<' + barcode + '>.{' +
str(pos[1] - pos[0] + 1) + '})')
try:
value = config[barcode + '_value']
barcode_filter.update({barcode: ErrorBarcodeHash(value, 1)})
except KeyError:
pass
return barcode_reg, pos_all, barcode_filter
def _extract_input_read_template(read, config):
read_name = '(@.*)\\n'
read_plus = '(\\+.*)\\n'
read_qual = '(.*)\\n'
filter_dict = dict()
seq = [(key, value) for key, value in config[read].items()
if key.startswith('cDNA')]
if seq:
read_name = '@(?P<name>.*)\\n'
read_seq = '(?P<seq>.*)\\n'
read_qual = '(?P<qual>.*)\\n'
read_template = read_name + read_seq + read_plus + read_qual
return read_template, filter_dict, read_template
cell_barcode = [(key, value) for key, value in config[read].items()
if key.startswith('CB') and not key.endswith('value')]
umi = [(key, value) for key, value in config[read].items()
if key.startswith('UMI')]
poly_t = [(key, value) for key, value in config[read].items()
if key.startswith('polyT')]
cb_reg, cb_pos, cb_filter = _extract_barcode_pos(cell_barcode, config[read])
filter_dict.update(cb_filter)
umi_reg, umi_pos, _ = _extract_barcode_pos(umi, config[read])
umi_reg = [z.replace('UMI', 'UB') for z in umi_reg]
pt_reg, pt_pos, _ = _extract_barcode_pos(poly_t, config[read])
read_pos_start = [z[0] for z in cb_pos]
read_pos_start += [z[0] for z in umi_pos]
read_pos_start += [z[0] for z in pt_pos]
read_pos_end = [z[1] for z in cb_pos]
read_pos_end += [z[1] for z in umi_pos]
read_pos_end += [z[1] for z in pt_pos]
idx = sorted(range(len(read_pos_start)),
key=lambda k: read_pos_start[k])
barcode_tag = cb_reg + umi_reg + pt_reg
read_pos_start = [read_pos_start[i] for i in idx]
read_pos_end = [read_pos_end[i] for i in idx]
barcode_tag = [barcode_tag[i] for i in idx]
idx_skip = [read_pos_start[i+1] - read_pos_end[i] - 1
for i in range(0, len(read_pos_start)-1)]
barcode_skip = ['[ACGTN]{' + str(i) + '}' for i in idx_skip]
read_seq = barcode_tag[0]
for i in range(len(read_pos_start)-1):
if idx_skip[i] == 0:
read_seq += barcode_tag[i+1]
else:
read_seq += barcode_skip[i]
read_seq += barcode_tag[i+1]
filter_dict.update(_filter_ploy_t(read_seq))
if read_pos_start[0] > 1:
read_seq = '[ACGTN]{' + str(read_pos_start[0]-1) + '}'
read_seq += '[ACGTN]*'
read_seq = read_seq + '\\n'
read_template = read_name + read_seq + read_plus + read_qual
read_qual = re.sub('>', r'_qual>', read_seq)
read_qual = re.sub('\[ACGTN\]', '.', read_qual)
read_template_qual = read_name + read_seq + read_plus + read_qual
return read_template, filter_dict, read_template_qual
def _filter_ploy_t(read_seq):
match = re.findall('\?P<polyT>\.{[0-9]+}', read_seq)
poly_t_count = [int(re.findall(r'\d+', z)[0]) for z in match]
poly_t_filter = {'polyT': ErrorBarcodeHash('T' * z, 1) for z in poly_t_count}
return poly_t_filter
def _replace_poly_t(read_seq):
match = re.findall('\?P<polyT>\.{[0-9]+}', read_seq)
poly_t_count = [int(re.findall(r'\d+', z)[0]) for z in match]
poly_t = ['(' + 'T'*z + ')' + '{s<=1}' for z in poly_t_count]
for z in range(len(match)):
read_seq = read_seq.replace(match[z], poly_t[z])
return read_seq
def _infer_read_template(reg_list):
class ReadInfo(NamedTuple):
cb: bool
cb_tag: list
cb_len: list
ub: bool
ub_tag: list
ub_len: list
read_template: str
cb = ub = False
cb_tag = ub_tag = []
cb_len = ub_len = []
read_template = '@'
reg = ''.join(k for k in reg_list)
if 'CB' in reg:
logger.info('Cell barcode in configure file')
cb = True
cb_seq_template = _accumulate_barcode('CB', reg)
cb_template = ':CB_' + cb_seq_template[1]
read_template += cb_template
cb_tag = cb_seq_template[0]
cb_len = cb_seq_template[2]
if 'UB' in reg:
logger.info('UMI in config file')
ub = True
ub_seq_template = _accumulate_barcode('UB', reg)
ub_template = ':UB_' + ub_seq_template[1]
read_template += ub_template
ub_tag = ub_seq_template[0]
ub_len = ub_seq_template[2]
read_template += ':{name}'
read_template += '\n{seq}\n+\n{qual}\n'
return ReadInfo(cb=cb, cb_tag=cb_tag, cb_len=cb_len,
ub=ub, ub_tag=ub_tag, ub_len=ub_len,
read_template=read_template)
def _accumulate_barcode(barcode, seq):
barcode_num = [sub_str[0] for sub_str in
seq.split('?P<' + re.escape(barcode))][1:]
status = '>' in barcode_num
barcode_num = ['0' if x == '>' else x for x in barcode_num]
barcode_num = sorted(barcode_num, key=int)
if status:
barcode_num[0] = ''
barcode_seq = [barcode + num for num in barcode_num]
barcode_template = ['{' + tag + '}' for tag in barcode_seq]
barcode_template = '-'.join(barcode_template)
str_split = 'P<' + barcode + '[0-9]*>.{'
barcode_len = [sub_str for sub_str in re.split(str_split, seq)][1:]
barcode_len = [int(re.findall(r'(\d+)', barcode_i)[0])
for barcode_i in barcode_len]
return barcode_seq, barcode_template, barcode_len
def _format_read(chunk, fastq_file, cb_count_file, read_regex_list,
read_template, cb_tag, ub_len, barcode_filter_dict):
reads = []
num_read = len(chunk)
num_read_pass = num_read_barcode = num_read_polyt = 0
num_regex = len(read_regex_list)
barcode_counter = collections.defaultdict(
partial(np.zeros, shape=(ub_len[0] + 1), dtype=np.uint32))
ignore_read = False
for read_i in chunk:
read_dict_list = []
for i, regex_i in enumerate(read_regex_list):
read_match = regex_i.match(read_i[i])
if not read_match:
ignore_read = True
break
read_dict_list.append(read_match.groupdict())
if ignore_read:
ignore_read = False
continue
read1_dict = read_dict_list[0]
if num_regex > 1:
for regex_id in range(1, num_regex):
read1_dict.update(read_dict_list[regex_id])
cb = [barcode_filter_dict[tag][read1_dict[tag]]
if tag in barcode_filter_dict.keys() else read1_dict[tag]
for tag in cb_tag]
if all(cb):
cb = '-'.join(cb)
num_read_barcode += 1
else:
ignore_read = True
ub = read1_dict['UB']
try:
poly_t = read1_dict['polyT']
if not barcode_filter_dict['polyT'][poly_t]:
ignore_read = True
else:
num_read_polyt += 1
except KeyError:
pass
if ignore_read:
ignore_read = False
continue
num_read_pass += 1
if len(read1_dict['seq']) >= 1:
read1_dict = read_template.format_map(read1_dict)
reads.append(read1_dict)
barcode_counter[cb] += [x == 'T' for x in 'T' + ub]
with gzip.open(fastq_file, 'ab') as fastq_hd:
for read in reads:
fastq_hd.write(bytes(read, 'utf8'))
df = pd.DataFrame.from_dict(barcode_counter, orient='index')
if df.shape[0] > 0:
df = df.sort_values(by=df.columns[0], ascending=False)
df.index.name = 'cb'
column_name = list(df.columns.values)
column_name[0] = 'cb_count'
df.columns = column_name
df.to_csv(cb_count_file, sep='\t', mode='a', header=False)
return num_read_pass, num_read_barcode, num_read_polyt, num_read
def _construct_barcode_regex(bam):
read_mode = 'r' if bam.endswith('.sam') else 'rb'
bam_file = AlignmentFile(bam, mode=read_mode)
first_alignment = next(bam_file)
bam_file.close()
barcodes = set()
for barcode in ['CB_', 'UB_']:
if barcode in first_alignment.qname:
barcodes.add(barcode)
barcode_parser = '.*'
if 'CB_' in barcodes:
barcode_parser += ':CB_(?P<CB>[A-Z\-]+)'
if 'UB_' in barcodes:
barcode_parser += ':UB_(?P<UB>[A-Z\-]+)'
if barcode_parser == '.*':
logger.error('Error: no cell barcodes and UMIs.')
sys.exit(-1)
barcode_parser += ':*'
barcode_parser = re.compile(barcode_parser)
match = barcode_parser.match(first_alignment.qname)
cb = _extract_tag(match, 'CB')
return barcode_parser, cb, read_mode
def _extract_tag(match, tag):
try:
tag = match.group(tag)
except IndexError:
tag = None
return tag
def count_feature(*cb, bam, molecular_info_h5, gtf, cb_count, feature_tag='XT:Z',
expect_cell=False, force_cell=False, all_cell=False,
depth_threshold=1, cell_barcode_whitelist=None):
"""
Count the number of reads/UMIs mapped to each gene
:param bam: the input sam/bam file
:param molecular_info_h5: output the molecular info
:param cb: the input cell barcode files, can be empty or None
:param cell_barcode_whitelist: a file contain the selected cell barcodes
:param gtf: a GTF file
:param cb_count: a file containing the number of reads mapped to each cell barcode,
output from format_fastq
:param feature_tag: the tag representing genes in the input bam file
:param depth_threshold: only considering UMIs that have at least
depth_threshold reads support
:param expect_cell: the expected number of cells in the bam file
:param force_cell: force to return the number of cells set by expect_cell
:param all_cell: keep all cell barcodes - can be very slow
"""
barcode_parser, first_cb, read_mode = _construct_barcode_regex(bam)
num_cb = len(first_cb.split('-'))
num_cb_file = len(cb)
if 0 == num_cb_file:
cb = [None] * num_cb
elif num_cb != num_cb_file:
logger.error(f'Error: the number of input cell barcodes files {num_cb_file} '
f'is different from the number of cell barcodes {num_cb} '
f'detected in the bam file')
if num_cb > num_cb_file:
cb = cb + [None] * (num_cb - num_cb_file)
else:
cb = cb[:num_cb]
# TODO: no cell barcodes detected
correct_cb_fun, cb_list, cb_remove = _construct_cb_filter(
cb_count, cb, expect_cell, force_cell, all_cell, cell_barcode_whitelist)
gene_map_dict = read_gene_map_from_gtf(gtf)
logger.info('Counting molecular info')
time_start_count = time.time()
sam_file = AlignmentFile(bam, mode=read_mode)
_count_feature_partial = partial(_count_feature,
gene_map_dict=gene_map_dict,
barcode_parser=barcode_parser,
correct_cb_fun=correct_cb_fun,
sam_file=sam_file,
feature_tag=feature_tag)
track = sam_file.fetch(until_eof=True)
map_info, read_in_cell, molecular_info = _count_feature_partial(track)
time_count = time.time() - time_start_count
logger.info(f'Counting molecular info done - {time_count/3600.0:.3f} hours, '
f'{int(3600.0 * map_info["num_alignment"]/time_count):,d} '
f'alignments/hour\n')
# TODO: still output results
if len(molecular_info) == 0:
logger.error('Error: no reads mapped to features.')
sys.exit(-1)
name = ['cell',
'gene',
'umi',
'depth',
]
logger.info('Converting to a dataframe')
convert_time = time.time()
molecular_info = pd.Series(molecular_info).reset_index()
molecular_info.columns = name
for col in name[:3]:
molecular_info.loc[:, col] = molecular_info[col].astype('category')
convert_time = time.time() - convert_time
logger.info(f'Converting to a dataframe done, '
f'taking {convert_time/60.0:.3f} minutes\n')
molecular_info.columns = name
if num_cb > 1 and cb_list:
molecular_info = molecular_info.loc[molecular_info['cell'].isin(cb_list), :]
if cb_remove:
molecular_info = molecular_info.loc[~molecular_info['cell'].isin(cb_remove), :]
molecular_info = molecular_info.loc[molecular_info['depth'] >= 0.95, :]
molecular_info['depth'] = \
np.floor(molecular_info['depth'].values + 0.5).astype('uint32')
molecular_info = molecular_info.sort_values(name[:3])
molecular_info = molecular_info.reset_index(drop=True)
map_info = | pd.Series(map_info) | pandas.Series |
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual(Timedelta(np.nan).value, iNaT)
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
self.assertRaises(OverflowError, pd.Timedelta, value)
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days, -2)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
def test_nat_converters(self):
self.assertEqual(to_timedelta(
'nat', box=False).astype('int64'), iNaT)
self.assertEqual(to_timedelta(
'nan', box=False).astype('int64'), iNaT)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0, 'ns'))
self.assertEqual(ct(10), np.timedelta64(10, 'ns'))
self.assertEqual(ct(10, unit='ns'), np.timedelta64(
10, 'ns').astype('m8[ns]'))
self.assertEqual(ct(10, unit='us'), np.timedelta64(
10, 'us').astype('m8[ns]'))
self.assertEqual(ct(10, unit='ms'), np.timedelta64(
10, 'ms').astype('m8[ns]'))
self.assertEqual(ct(10, unit='s'), np.timedelta64(
10, 's').astype('m8[ns]'))
self.assertEqual(ct(10, unit='d'), np.timedelta64(
10, 'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)),
np.timedelta64(1, 's').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)),
np.timedelta64(1, 'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)),
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
self.assertEqual(r1, s1)
r2 = t2.round(freq)
self.assertEqual(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertFalse((v in td))
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertTrue((v in td))
def test_identity(self):
td = Timedelta(10, unit='d')
self.assertTrue(isinstance(td, Timedelta))
self.assertTrue(isinstance(td, timedelta))
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('100'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000, 'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000, 'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000, 'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000, 'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000, 'ns'))
self.assertEqual( | ct('100s') | pandas.tseries.timedeltas._coerce_scalar_to_timedelta_type |
import pandas as pd
import numpy as np
import os
import glob
import shutil
import json
import statistics
from PIL import Image
import random
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.metrics import jaccard_score
class AdjacencyMatrices():
def __init__(self) -> None:
self.filename = '/home/agun/mimic/dataset/VG/xray_coco_test.json'
self.outputdir = "/home/agun/mimic/dataset/VG/"
self.diseaselist = ['lung opacity', 'pleural effusion', 'atelectasis', 'enlarged cardiac silhouette',
'pulmonary edema/hazy opacity', 'pneumothorax', 'consolidation', 'fluid overload/heart failure', 'pneumonia']
self.organs = ["right lung", "right apical zone", "right upper lung zone", "right mid lung zone",
"right lower lung zone", "right hilar structures", "right costophrenic angle", "left lung", "left apical zone",
"left upper lung zone", "left mid lung zone", "left lower lung zone", "left hilar structures",
"left costophrenic angle", "mediastinum", "upper mediastinum", "cardiac silhouette", "trachea"]
print("Loading json data ...")
f = open(str(self.filename),)
self.data = json.load(f)
self.data_size = len(self.data)
print(self.data_size)
print("Done loading json data ...")
'''
The Similarity measure between each pair of anatomy objects A and B
Jaccard similarity measure is used to measure the similarity between
each object, by measuring the average similarity over every disease class
'''
def anatomy(self):
error = 1e-9
anatomy_len = len(self.organs)
row = self.organs
column = self.organs
adj_matrix = []
for ind, B in enumerate(row):
print("Processing {} from row {}".format(B, str(ind)))
rows = np.zeros([len(self.organs)])
for inde, A in enumerate(column):
# print("Processing {} from column {}".format(A, str(inde)))
AnB_count = 0
B_count = 0
row_counter = Counter()
column_counter = Counter()
a_val = []
b_val = []
p_anb = 0
for img in self.data:
ids = [self.organs[int(obj['category_id'])] for obj in img['annotations']]
aa = []
bb = []
if set(ids) == set(self.organs):
for relation in img['annotations']:
if int(relation['category_id']) == ind:
bb = relation['attributes']
for relations in img['annotations']:
if int(relations['category_id']) == inde:
aa = relations['attributes']
if np.count_nonzero(np.array(aa)) > 0 or np.count_nonzero(np.array(bb)) > 0:
b_val.append(bb)
a_val.append(aa)
else:
continue
df_A = pd.DataFrame(a_val, columns=self.diseaselist)
df_B = pd.DataFrame(b_val, columns=self.diseaselist)
assert len(b_val) == len(a_val)
if not df_A.empty:
jaccard_list = []
for disease in self.diseaselist:
jaccard = jaccard_score(df_B[disease], df_A[disease], average='macro')
jaccard_list.append(jaccard)
p_anb = statistics.mean(jaccard_list)
if ind == inde:
p_anb = 1
if p_anb > 0.5:
p_anb = 1
else:
p_anb = 0
rows[inde] = p_anb
adj_matrix.append(rows.tolist())
df = pd.DataFrame(adj_matrix, columns=self.organs)
# print(df)
filename = os.path.join(self.outputdir, 'anatomy_matrix.csv')
df.to_csv(filename, sep='\t', index=False)
return df
'''
The Conditional Probability of A (disease row) given B (disease Column)
P(A|B) = P(AnB)/P(B)
'''
def findings(self):
filename = os.path.join(self.outputdir, 'findings_matrix.csv')
error = 1e-9
row = self.diseaselist
column = self.diseaselist
adj_matrix = []
for ind, B in enumerate(row):
print("Processing {} from row {}".format(B, str(ind)))
rows = np.zeros([len(self.diseaselist)])
for inde, A in enumerate(column):
# print("Processing {} from column {}".format(A, str(inde)))
AnB_count = 0
B_count = 0
for img in self.data:
for relation in img['annotations']:
if relation['attributes'][ind] == 1:
B_count += 1
if (relation['attributes'][inde] == 1) and (relation['attributes'][ind] == 1):
AnB_count += 1
p_anb = AnB_count/self.data_size
p_b = B_count/self.data_size
a_given_b = p_anb / (p_b + error)
if a_given_b > 0.4:
a_given_b = 1
else:
a_given_b = 0
rows[inde] = a_given_b
adj_matrix.append(rows.tolist())
print(adj_matrix)
df = | pd.DataFrame(adj_matrix, columns=self.diseaselist) | pandas.DataFrame |
"""Unit tests for cartoframes.data.utils"""
import unittest
import pandas as pd
from shapely.geometry import Point
from shapely.geos import lgeos
from geopandas.geoseries import GeoSeries
from cartoframes.data import Dataset
from cartoframes.auth import Credentials
from cartoframes.data.utils import compute_query, compute_geodataframe, \
decode_geometry, detect_encoding_type, ENC_SHAPELY, \
ENC_WKB, ENC_WKB_HEX, ENC_WKB_BHEX, ENC_WKT, ENC_EWKT
from cartoframes import context
from ..mocks.context_mock import ContextMock
class TestDataUtils(unittest.TestCase):
"""Tests for functions in data.utils module"""
def setUp(self):
self.credentials = Credentials(username='', api_key='1234')
self.geom = [
'010100000000000000000000000000000000000000',
'010100000000000000000024400000000000002e40',
'010100000000000000000034400000000000003e40'
]
self.lng = [0, 10, 20]
self.lat = [0, 15, 30]
self.geometry = GeoSeries([
Point([0, 0]),
Point([10, 15]),
Point([20, 30])
], name='geometry')
self.msg = 'No geographic data found. '
'If a geometry exists, change the column name '
'(geometry, the_geom, wkt_geometry, wkb_geometry, geom, wkt, wkb) '
'or ensure it is a DataFrame with a valid geometry. '
'If there are latitude/longitude columns, rename to '
'(latitude, lat), (longitude, lng, lon, long).'
self._context_mock = ContextMock()
# Mock create_context method
self.original_create_context = context.create_context
context.create_context = lambda c: self._context_mock
def tearDown(self):
context.create_context = self.original_create_context
def test_compute_query(self):
"""data.utils.compute_query"""
ds = Dataset('table_name', schema='schema', credentials=self.credentials)
query = compute_query(ds._strategy)
self.assertEqual(query, 'SELECT * FROM "schema"."table_name"')
def test_compute_query_default_schema(self):
"""data.utils.compute_query"""
ds = Dataset('table_name', credentials=self.credentials)
query = compute_query(ds._strategy)
self.assertEqual(query, 'SELECT * FROM "public"."table_name"')
def test_compute_geodataframe_geometry(self):
ds = Dataset(pd.DataFrame({'geometry': self.geom}))
gdf = compute_geodataframe(ds)
self.assertEqual(str(gdf.geometry), str(self.geometry))
def test_compute_geodataframe_the_geom(self):
ds = Dataset(pd.DataFrame({'the_geom': self.geom}))
gdf = compute_geodataframe(ds)
self.assertEqual(str(gdf.geometry), str(self.geometry))
def test_compute_geodataframe_wkt_geometry(self):
ds = Dataset(pd.DataFrame({'wkt_geometry': self.geom}))
gdf = compute_geodataframe(ds)
self.assertEqual(str(gdf.geometry), str(self.geometry))
def test_compute_geodataframe_wkb_geometry(self):
ds = Dataset(pd.DataFrame({'wkb_geometry': self.geom}))
gdf = compute_geodataframe(ds)
self.assertEqual(str(gdf.geometry), str(self.geometry))
def test_compute_geodataframe_geom(self):
ds = Dataset( | pd.DataFrame({'geom': self.geom}) | pandas.DataFrame |
# user define imports
from my_package.analysis_info import AnalysisInfo, DataInfo, ResultsInfo
from my_package.data_cleaner import DataCleaner
from my_package import visualizer as visualizer
# python imports
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
class DataProcessor:
def __init__(self):
return
@staticmethod
def data_cleanup(df):
return DataCleaner.perform_cleanup(df)
@staticmethod
def train_test_split(dataset, x_name, y_name, test_size):
X = dataset[x_name].values.reshape(-1, 1)
y = dataset[y_name].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
return X_train, X_test, y_train, y_test
@staticmethod
def population_visitors(df, data_map, config):
df_selected = df[['population', 'visitor']]
df_selected.loc[:, 'population'] = pd.to_numeric(df_selected.loc[:, 'population'])
df_selected.loc[:, 'visitor'] = pd.to_numeric(df_selected.loc[:, 'visitor'])
df_clean = df_selected.dropna()
file_name = "distribution_All_MuseumVisitors_Function_population_visitors.png"
silent_mode_enabled = config.silent_mode_enabled
visualizer.plot_data_distribution(df_clean['visitor'], file_name, silent_mode_enabled)
file_name = "distribution_All_CityPopulation_Function_population_visitors.png"
silent_mode_enabled = config.silent_mode_enabled
visualizer.plot_data_distribution(df_clean['population'], file_name, silent_mode_enabled)
labels = {"x": ["population", "City Population"], "y": ["visitor", "Museum Visitors"]}
visualizer.plot_data(df_clean, labels, silent_mode_enabled)
x_population = df_clean['population'].to_numpy()
y_visitor = df_clean['visitor'].to_numpy()
visualizer.quantile_quantile_plot(y_visitor, silent_mode_enabled)
visualizer.quantile_quantile_plot(x_population, silent_mode_enabled)
X_train, X_test, y_train, y_test = DataProcessor.train_test_split(df_clean, "population", "visitor", 0.2)
x_data_info = {"values": x_population.tolist(), "label": "City Population", "train":X_train, "test":X_test}
y_data_info = {"values": y_visitor.tolist(), "label": "Museum Visitors", "train":y_train, "test":y_test}
return DataInfo(x_data_info=x_data_info, y_data_info=y_data_info)
@staticmethod
def population_visitors_sum(df, data_map):
df_clean = df[data_map.keys()]
grouped_df = df_clean.groupby(["city"])
number_cities = grouped_df.ngroups
print("number of cities: ", number_cities)
number_features = 2
index = -1
train_id_info = np.zeros((number_cities, number_features), dtype=int)
for city_data in grouped_df:
city_info = city_data[1]
index = index + 1
for data in city_info.iterrows():
str_population = data[1]['population']
if str_population:
col = 0
population = float(data[1]['population'])
train_id_info[index, col] = population
col = col + 1
visitor = float(data[1]['visitor'])
train_id_info[index, col] = train_id_info[index, col] + visitor
x_population = []
y_visitor = []
for data in train_id_info:
if data[0] != 0:
# population
x_population.append(data[0])
# visitor
y_visitor.append(data[1])
x_data_info = {"values": x_population, "label": "City Population"}
y_data_info = {"values": y_visitor, "label": "Museum Visitors"}
return DataInfo(x_data_info=x_data_info, y_data_info=y_data_info)
@staticmethod
def population_visitors_max(df, data_map):
df_clean = df[data_map.keys()]
grouped_df = df_clean.groupby(["city"])
number_cities = grouped_df.ngroups
print("number of cities: ", number_cities)
number_features = 2
index = -1
train_id_info = np.zeros((number_cities, number_features), dtype=int)
for city_data in grouped_df:
city_info = city_data[1]
index = index + 1
for data in city_info.iterrows():
str_population = data[1]['population']
if str_population:
col = 0
population = float(data[1]['population'])
train_id_info[index, col] = population
col = col + 1
visitor = float(data[1]['visitor'])
train_id_info[index, col] = max(train_id_info[index, col], visitor)
x_population = []
y_visitor = []
for data in train_id_info:
if data[0] != 0:
# population
x_population.append(data[0])
# visitor
y_visitor.append(data[1])
x_data_info = {"values": x_population, "label": "City Population"}
y_data_info = {"values": y_visitor, "label": "Museum Visitors"}
return DataInfo(x_data_info=x_data_info, y_data_info=y_data_info)
@staticmethod
def city_visitor_museum_visitors(df, data_map, config):
df_selected = df[['city_visitor', 'visitor']]
df_selected.loc[:, 'city_visitor'] = pd.to_numeric(df_selected.loc[:, 'city_visitor'])
df_selected.loc[:, 'visitor'] = pd.to_numeric(df_selected.loc[:, 'visitor'])
df_clean = df_selected.dropna()
file_name = "distribution_All_MuseumVisitors_Function_city_visitor_museum_visitors.png"
silent_mode_enabled = config.silent_mode_enabled
visualizer.plot_data_distribution(df_clean['visitor'], file_name, silent_mode_enabled)
file_name = "distribution_All_CityVisitors_Function_city_visitor_museum_visitors.png"
silent_mode_enabled = config.silent_mode_enabled
visualizer.plot_data_distribution(df_clean['city_visitor'], file_name, silent_mode_enabled)
labels = {"x": ["city_visitor", "City Visitors"], "y": ["visitor", "Museum Visitors"]}
visualizer.plot_data(df_clean, labels, silent_mode_enabled)
x_city_visitor = df_clean['city_visitor'].to_numpy()
y_visitor = df_clean['visitor'].to_numpy()
X_train, X_test, y_train, y_test = DataProcessor.train_test_split(df_clean, "city_visitor", "visitor", 0.2)
x_data_info = {"values": x_city_visitor.tolist(), "label": "City Visitors", "train":X_train, "test":X_test}
y_data_info = {"values": y_visitor.tolist(), "label": "Museum Visitors", "train":y_train, "test":y_test}
return DataInfo(x_data_info=x_data_info, y_data_info=y_data_info)
@staticmethod
def city_visitor_museum_visitors_sum(df, data_map):
df_clean = df[data_map.keys()]
grouped_df = df_clean.groupby(["city"])
number_cities = grouped_df.ngroups
print("number of cities: ", number_cities)
number_features = 2
index = -1
train_id_info = np.zeros((number_cities, number_features), dtype=int)
for city_data in grouped_df:
train_id_label_info = 0
city_info = city_data[1]
total_visitors = 0
index = index + 1
for data in city_info.iterrows():
str_city_visitor = data[1]['city_visitor']
if str_city_visitor:
col = 0
city_visitor = float(data[1]['city_visitor'])
train_id_info[index, col] = city_visitor
col = col + 1
visitor = float(data[1]['visitor'])
train_id_info[index, col] = train_id_info[index, col] + visitor
x_city_visitor = []
y_visitor = []
for data in train_id_info:
if data[0] != 0:
# population
x_city_visitor.append(data[0])
# visitor
y_visitor.append(data[1])
x_data_info = {"values": x_city_visitor, "label": "City Visitors"}
y_data_info = {"values": y_visitor, "label": "Museum Visitors"}
return DataInfo(x_data_info=x_data_info, y_data_info=y_data_info)
@staticmethod
def city_visitor_museum_visitors_max(df, data_map):
df_clean = df[data_map.keys()]
grouped_df = df_clean.groupby(["city"])
number_cities = grouped_df.ngroups
print("number of cities: ", number_cities)
number_features = 2
index = -1
train_id_info = np.zeros((number_cities, number_features), dtype=int)
for city_data in grouped_df:
city_info = city_data[1]
index = index + 1
for data in city_info.iterrows():
str_city_visitor = data[1]['city_visitor']
if str_city_visitor:
col = 0
city_visitor = float(data[1]['city_visitor'])
train_id_info[index, col] = city_visitor
col = col + 1
visitor = float(data[1]['visitor'])
train_id_info[index, col] = max(train_id_info[index, col], visitor)
x_city_visitor = []
y_visitor = []
for data in train_id_info:
if data[0] != 0:
# population
x_city_visitor.append(data[0])
# visitor
y_visitor.append(data[1])
x_data_info = {"values": x_city_visitor, "label": "City Visitors"}
y_data_info = {"values": y_visitor, "label": "Museum Visitors"}
return DataInfo(x_data_info=x_data_info, y_data_info=y_data_info)
@staticmethod
def multiple_linear_data(dataset, data_map, config):
#todo: this function should be refactor,
# new machine _learning_component is needed
dataset = dataset[data_map.keys()]
dataset.loc[:, 'population'] = | pd.to_numeric(dataset.loc[:, 'population']) | pandas.to_numeric |
"""
Miscellaneous functions useful for Threat Hunting and cybersecurity data analytics
"""
from __future__ import division
from builtins import input
import getpass
import math
from jellyfish import levenshtein_distance, damerau_levenshtein_distance, hamming_distance, jaro_similarity, jaro_winkler_similarity
import sys
import platform
import multiprocessing
import re
import pandas as pd
import numpy as np
from pandas.api.types import is_list_like
from math import trunc
from scipy.stats import chisquare
__all__ = ['entropy', 'entropy_per_byte', 'promptCreds', 'edit_distance']
def entropy(string):
'''
Calculates the Shannon entropy of a string.
string: A string for which to compute the entropy.
'''
# get probability of chars in string
prob = [ string.count(c) / len(string) for c in dict.fromkeys(list(string)) ]
# calculate the entropy
entropy = - sum([ p * math.log(p) / math.log(2.0) for p in prob ])
return entropy
def entropy_per_byte(string):
'''
Calculates the Shannon entropy of a string, divided by the total bytes
in the string. This is an attempt to normalize entropy values between
strings of different lengths.
string: A string for which to compute the entropy per byte
'''
e = entropy(string)
return e / len(string)
def promptCreds(uprompt="Username: ", pprompt="Password: "):
'''
Prompt the user for login credentials for some service. This is a
helpful convenience when using things like Jupyter notebook, where it
may not always be obvious how to collect input from the user. The function
returns a (username, password) tuple.
uprompt: A string containing the username prompt. Default is "Username: ".
pprompt: A string containing the password prompt. Default is "Password: ".
'''
u = input(uprompt)
p = getpass.getpass(pprompt)
return (u,p)
def edit_distance(str1, str2, method="damerau-levenshtein"):
'''
Calculate the edit distance between 'str1' and 'str2' using any of a
number of algorithms.
'str1', 'str2': Input strings
'method': The algorithm to use.
Available algorithms:
* levenshtein
* damerau-levenshtein (DEFAULT)
* hamming
* jaro
* jaro-winkler
Return values:
"levenshtein", "damerau-levenshtein" and "hamming" return integers
"jaro" and "jaro-winkler" return floats in the range of 0.0 (completely
different) to 1.0 (identical strings).
'''
algos = {
"levenshtein":levenshtein_distance,
"damerau-levenshtein":damerau_levenshtein_distance,
"hamming":hamming_distance,
"jaro":jaro_similarity,
"jaro-winkler":jaro_winkler_similarity
}
if not method in list(algos.keys()):
raise ValueError("Unsupported algorithm type: %s" % method)
if str1 is None or str2 is None or not isinstance(str1, str) or not isinstance(str2, str):
raise TypeError("Arguments must be strings.")
distance_function = algos[method]
# All the jellyfish distance functions expect unicode, which is the default
# for Python3. If we're running in Python2, we need to convert them.
python_version = sys.version_info
if python_version.major == 2:
str1 = unicode(str1)
str2 = unicode(str2)
return distance_function(str1, str2)
def benfords(numbers):
'''
Examine the distribution of the first digits in a given corpus of numbers to see
if they correspond to Benford's Law using a chi square test.
Benford's Law, also known as the "first digit law" or the "law of anomalous numbers"
states that there is a specific distribution pattern of the first digits of certain
groups of numbers. See https://en.wikipedia.org/wiki/Benford%27s_law for more
info.
:param numbers: The set of numbers to check against Benford's Law
:type numbers: A list-like object (list, tuple, set, Pandas DataFrame or Series)
containing floats or integers
:Return Value:
The function returns three values in a tuple (chi2, p, counts):
* The 'chi2' value is a float in the range 0..1 that describes how well the observed
distribution of first digits matched the predictions of Benford's Law. Lower is
better.
* The 'p' value is the probability that the computed 'chi2' is significant (i.e., it
tells you whether the chi2 value can be trusted). Its range is also 0..1, but in
this case, higher is better. Generally speaking, if the p-value is >= 0.95 then
the chi2 value is considered significant.
* 'counts' is a Pandas series where the indices are the possible first digits 1-9 and
the values are the observed distributions of those digits. If the observed distributions
didn't match up with Benford's law, the counts may help you identify the anomalous values.
'''
def _first_digit(i: float):
# This doesn't apply to zeros!
if i == 0:
return np.nan
# Make negative numbers positive
if i < 0:
i = abs(i)
# If the number is between 0 and 1, multiply by 10 until it becomes > 1
# so the repeated divisions will work
elif i < 1:
while i < 1:
i *= 10
while i >= 10:
i //= 10
return trunc(i)
_BENFORDS = [
0.301, # 1
0.176, # 2
0.125, # 3
0.097, # 4
0.079, # 5
0.067, # 6
0.058, # 7
0.051, # 8
0.046 # 9
]
if not | is_list_like(numbers) | pandas.api.types.is_list_like |
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal dispatcher for training loops."""
import collections
import contextlib
import os.path
import pprint
import time
from typing import Any, Callable, Dict, List, Optional
from absl import flags
from absl import logging
import pandas as pd
import tensorflow as tf
from tensorflow_federated.python.research.utils import adapters
from tensorflow_federated.python.research.utils import checkpoint_manager
from tensorflow_federated.python.research.utils import metrics_manager
from tensorflow_federated.python.research.utils import utils_impl
# Defining training loop flags
with utils_impl.record_hparam_flags():
# Training rounds
flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')
# Root output directory.
flags.DEFINE_string('root_output_dir', '/tmp/fed_opt/',
'Root directory for writing experiment output.')
flags.DEFINE_string(
'experiment_name', None, 'The name of this experiment. Will be append to '
'--root_output_dir to separate experiment results.')
# Checkpoint and evaluation flags.
flags.DEFINE_integer('rounds_per_eval', 1,
'How often to evaluate the global model.')
flags.DEFINE_integer('rounds_per_checkpoint', 50,
'How often to checkpoint the global model.')
flags.DEFINE_integer(
'rounds_per_profile', 0,
'(Experimental) How often to run the experimental TF profiler, if >0.')
FLAGS = flags.FLAGS
def create_if_not_exists(path):
try:
tf.io.gfile.makedirs(path)
except tf.errors.OpError:
logging.info('Skipping creation of directory [%s], already exists', path)
def _setup_outputs(root_output_dir, experiment_name, hparam_dict):
"""Set up directories for experiment loops, write hyperparameters to disk."""
if not experiment_name:
raise ValueError('experiment_name must be specified.')
create_if_not_exists(root_output_dir)
checkpoint_dir = os.path.join(root_output_dir, 'checkpoints', experiment_name)
create_if_not_exists(checkpoint_dir)
checkpoint_mngr = checkpoint_manager.FileCheckpointManager(checkpoint_dir)
results_dir = os.path.join(root_output_dir, 'results', experiment_name)
create_if_not_exists(results_dir)
metrics_mngr = metrics_manager.ScalarMetricsManager(results_dir)
summary_logdir = os.path.join(root_output_dir, 'logdir', experiment_name)
create_if_not_exists(summary_logdir)
summary_writer = tf.compat.v2.summary.create_file_writer(summary_logdir)
hparam_dict['metrics_file'] = metrics_mngr.metrics_filename
hparams_file = os.path.join(results_dir, 'hparams.csv')
utils_impl.atomic_write_to_csv( | pd.Series(hparam_dict) | pandas.Series |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
| assert_frame_equal(unstacked, expected) | pandas.util.testing.assert_frame_equal |
import pandas
from my_lambdata.my_mod import enlarge
df = | pandas.DataFrame({"x":[1,2,3], "y":[4,5,6]}) | pandas.DataFrame |
# feature selection
import numpy as np
import pandas as pd
from statsmodels.stats.outliers_influence import variance_inflation_factor as vif
from sklearn.feature_selection import f_regression
np.seterr(divide='ignore', invalid='ignore') # hide error warning for vif
from sklearn.feature_selection import f_regression, RFE
from sklearn.linear_model import LinearRegression
from typing import List, Tuple, Union
from .build import run_regression
def guess_date_column(df:pd.DataFrame) -> None:
guesses = ['date', 'Date', 'day', 'Day', 'week', 'Week', 'Month', 'month']
for x in guesses:
if x in df.columns:
return x
return None
def guess_y_column(df:pd.DataFrame) -> None:
guesses = ['revenue', 'Revenue', 'sales', 'Sales', 'conversions', 'Conversions', 'Purchases', 'purchases']
for x in guesses:
if x in df.columns:
return x
return None
def add_X_labels(X_labels:List[str], add_cols:List[str]) -> List[str]:
for x in add_cols:
if x not in X_labels:
X_labels.append(x)
return X_labels
def del_X_labels(X_labels:List[str], del_cols:List[str]) -> List[str]:
for x in del_cols:
if x in X_labels:
X_labels.remove(x)
return X_labels
def get_all_X_labels(df:pd.DataFrame, y_label:str, date_label:str=None) -> List[str]:
X_labels = list(df.columns)
X_labels.remove(y_label)
if date_label:
X_labels.remove(date_label)
return X_labels
def get_cols_containing(df:pd.DataFrame, containing:str) -> List[str]:
return [x for x in list(df.columns) if containing in x]
def y_variable_correlation(df:pd.DataFrame, y_label:str, X_labels:List[str], min_corr:float=0.3) -> Tuple[List, pd.DataFrame]:
# 1.0 = Perfect
# 0.7 = Strong
# 0.5 = Moderate
# 0.3 = Weak
# 0 = None
all_variables = X_labels.copy()
all_variables.extend([y_label])
corr = df[all_variables].corr()
corr_df = pd.DataFrame({'corr':abs(corr[y_label].drop(y_label))})
corr_df['corr_keep'] = corr_df['corr'] > min_corr
corr_keep = list(corr_df[corr_df['corr_keep']==True].index.values)
return corr_keep, corr_df
def variance_inflation_factor(df:pd.DataFrame, X_labels:List[str], max_vif:int=5) -> Tuple[List, pd.DataFrame]:
# Variance Inflation Factor (VIF)
# tests for colinearity: A VIF of over 10 for some feature indicates that over 90%
# of the variance in that feature is explained by the remaining features. Over 100
# indicates over 99%. Best practice is to keep variables with a VIF less than 5.
X = df[X_labels]
X_np = np.array(X)
vif_results = [(X.columns[i], vif(X_np, i)) for i in range(X_np.shape[1])]
vif_df = | pd.DataFrame(vif_results) | pandas.DataFrame |
# import start
import ast
import asyncio
import calendar
import platform
import subprocess as sp
import time
import traceback
import xml.etree.ElementTree as Et
from collections import defaultdict
from datetime import datetime
import math
import numpy as np
import pandas as pd
from Utility.CDPConfigValues import CDPConfigValues
from Utility.Utilities import Utilities
from Utility.WebConstants import WebConstants
from WebConnection.WebConnection import WebConnection
# import end
## Function to reverse a string
#def reverse(string):
# string = string[::-1]
# return string
class Preprocessor:
""" Preprocessor class is used for preparing the extracted data to be fed to the training algorithm
for further processing.
"""
def __init__(self, project, previous_preprocessed_df=None, preprocessed=None):
"""
:param timestamp_column: Contains the committer timestamp
:type timestamp_column: str
:param email_column: Contains the committer timestamp
:type email_column: str
:param project: project key to be processed
:type project: str
:param project_name: project name to be processed
:type project_name: str
:param web_constants: Constants load from file
:type web_constants: class WebConstants
:param base_timestamp: Instantiating committer timestamp
:type base_timestamp: str
:param developer_stats_df: creating dataframe variable for developer stats
:type developer_stats_df: pandas dataframe
:param developer_sub_module_stats_df: creating dataframe variable for developer sub module stats
:type developer_sub_module_stats_df: pandas dataframe
"""
self.timestamp_column = "COMMITTER_TIMESTAMP"
self.email_column = "COMMITTER_EMAIL"
self.project = project
self.project_name = CDPConfigValues.configFetcher.get('name', project)
self.web_constants = WebConstants(project)
self.base_timestamp = ""
self.developer_stats_df = ""
self.developer_sub_module_stats_df = ""
if preprocessed is None:
if previous_preprocessed_df is None:
self.file_path = f"{CDPConfigValues.preprocessed_file_path}/{self.project_name}"
self.github_data_dump_df = pd.read_csv(
f"{CDPConfigValues.cdp_dump_path}/{self.project_name}/{CDPConfigValues.commit_details_file_name}")
self.pre_processed_file_path = f"{CDPConfigValues.preprocessed_file_path}/{self.project_name}"
CDPConfigValues.create_directory(self.pre_processed_file_path)
self.stats_dataframe = pd.DataFrame()
self.sub_module_list = list()
else:
self.file_path = f"{CDPConfigValues.schedule_file_path}/{self.project_name}"
self.github_data_dump_df = pd.DataFrame(previous_preprocessed_df)
self.github_data_dump_df = self.github_data_dump_df.apply(
lambda x: x.str.strip() if x.dtype == "object" else x)
self.github_data_dump_df["COMMITTER_TIMESTAMP"] = self.github_data_dump_df["COMMITTER_TIMESTAMP"].apply(
lambda x: pd.Timestamp(x, tz="UTC"))
self.github_data_dump_df["COMMITTER_TIMESTAMP"] = self.github_data_dump_df["COMMITTER_TIMESTAMP"].apply(
lambda x: | pd.Timestamp(x) | pandas.Timestamp |
# coding: utf-8
# # Numpy Introduction
# ## numpy arrays
# In[91]:
import numpy as np
arr = np.array([1,3,4,5,6])
arr
# In[8]:
arr.shape
# In[9]:
arr.dtype
# In[10]:
arr = np.array([1,'st','er',3])
arr.dtype
# In[5]:
np.sum(arr)
# ### Creating arrays
# In[11]:
arr = np.array([[1,2,3],[2,4,6],[8,8,8]])
arr.shape
# In[12]:
arr
# In[13]:
arr = np.zeros((2,4))
arr
# In[14]:
arr = np.ones((2,4))
arr
# In[15]:
arr = np.identity(3)
arr
# In[16]:
arr = np.random.randn(3,4)
arr
# In[17]:
from io import BytesIO
b = BytesIO(b"2,23,33\n32,42,63.4\n35,77,12")
arr = np.genfromtxt(b, delimiter=",")
arr
# ### Accessing array elements
# #### Simple indexing
# In[18]:
arr[1]
# In[19]:
arr = np.arange(12).reshape(2,2,3)
arr
# In[20]:
arr[0]
# In[21]:
arr = np.arange(10)
arr[5:]
# In[22]:
arr[5:8]
# In[23]:
arr[:-5]
# In[24]:
arr = np.arange(12).reshape(2,2,3)
arr
# In[25]:
arr[1:2]
# In[26]:
arr = np.arange(27).reshape(3,3,3)
arr
# In[27]:
arr[:,:,2]
# In[28]:
arr[...,2]
# #### Advanced Indexing
# In[29]:
arr = np.arange(9).reshape(3,3)
arr
# In[30]:
arr[[0,1,2],[1,0,0]]
# ##### Boolean Indexing
# In[31]:
cities = np.array(["delhi","banglaore","mumbai","chennai","bhopal"])
city_data = np.random.randn(5,3)
city_data
# In[32]:
city_data[cities =="delhi"]
# In[33]:
city_data[city_data >0]
# In[34]:
city_data[city_data >0] = 0
city_data
# #### Operations on arrays
# In[35]:
arr = np.arange(15).reshape(3,5)
arr
# In[36]:
arr + 5
# In[37]:
arr * 2
# In[38]:
arr1 = np.arange(15).reshape(5,3)
arr2 = np.arange(5).reshape(5,1)
arr2 + arr1
# In[39]:
arr1
# In[40]:
arr2
# In[41]:
arr1 = np.random.randn(5,3)
arr1
# In[42]:
np.modf(arr1)
# #### Linear algebra using numpy
# In[43]:
A = np.array([[1,2,3],[4,5,6],[7,8,9]])
B = np.array([[9,8,7],[6,5,4],[1,2,3]])
A.dot(B)
# In[44]:
A = np.arange(15).reshape(3,5)
A.T
# In[45]:
np.linalg.svd(A)
# In[46]:
a = np.array([[7,5,-3], [3,-5,2],[5,3,-7]])
b = np.array([16,-8,0])
x = np.linalg.solve(a, b)
x
# In[47]:
np.allclose(np.dot(a, x), b)
# # Pandas
# ## Data frames
# In[48]:
import pandas as pd
d = [{'city':'Delhi',"data":1000},
{'city':'Banglaore',"data":2000},
{'city':'Mumbai',"data":1000}]
pd.DataFrame(d)
# In[49]:
df = pd.DataFrame(d)
# ### Reading in data
# In[92]:
city_data = | pd.read_csv(filepath_or_buffer='simplemaps-worldcities-basic.csv') | pandas.read_csv |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: test.py
@time: 2019-05-15 15:09
"""
import pandas as pd
if __name__ == '__main__':
mode = 1
if mode == 1:
df = pd.read_excel('zy_all.xlsx', converters={'出险人客户号': str})
df1 = pd.read_csv('../data/zy_all.csv')
df1['出险人客户号_完整'] = df['出险人客户号']
df1.to_excel('zy_all_t.xlsx')
if mode == 0:
df6 = | pd.read_excel('/Users/luoyonggui/Documents/datasets/work/3/82200946506.xlsx', converters={'出险人客户号': str}) | pandas.read_excel |
"""
use cross validation to plot mean ROC curve, show std
ref:
https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc_crossval.html#sphx-glr-auto-examples-model-selection-plot-roc-crossval-py
Note that you have to tune the parameters yourself
"""
from scipy import interp
import argparse
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
# import xgboost as xgb
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import numpy as np
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import pandas as pd
import matplotlib.pylab as plt
import numpy as np
import scipy
import seaborn as sns
import glob
from sklearn.model_selection import KFold,StratifiedKFold
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression,RidgeClassifier,SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
# from mlxtend.classifier import StackingCVClassifier
# import umap
import warnings
from sklearn.metrics import roc_curve,roc_auc_score,average_precision_score,precision_recall_curve
from sklearn.datasets import load_iris
# from mlxtend.classifier import StackingCVClassifier
# from mlxtend.feature_selection import ColumnSelector
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
import warnings
warnings.filterwarnings('ignore')
from sklearn.exceptions import ConvergenceWarning
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=ConvergenceWarning)
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor,RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.scorer import make_scorer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.base import TransformerMixin
from sklearn.datasets import make_regression
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor,GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression, Ridge
import scipy
import numpy as np
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import LeaveOneOut
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn import linear_model
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR,LinearSVC
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge,Lars,BayesianRidge
from copy import deepcopy as dp
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier,RadiusNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.gaussian_process import GaussianProcessClassifier
# from xgboost import XGBClassifier
def sklearn_RF(par=False):
est = RandomForestClassifier(n_estimators=1000,random_state=0,warm_start=False,n_jobs=-1,class_weight={1:4,0:1})
if par:
est = RandomForestClassifier(**par)
myDict = {}
return est, myDict
def plot_top_features(reg,X,y,output):
current_feature_df = pd.DataFrame()
current_feature_df['features'] = X.columns.tolist()
reg.fit(X,y)
try:
current_feature_df['score'] = list(reg.feature_importances_)
except:
try:
current_feature_df['score'] = list(reg.coef_)
except:
current_feature_df['score'] = list(reg.coef_[0])
current_feature_df = current_feature_df.sort_values('score',ascending=False)
plt.figure(figsize=(len(current_feature_df['features']*2),8))
sns.barplot(x=current_feature_df['features'],y=current_feature_df['score'] )
plt.xticks(rotation=90)
plt.xlabel("")
plt.ylabel("Feature importance")
plt.savefig("%s_feature_importance.pdf"%(output), bbox_inches='tight')
def simple_CV_evaluation(model,params,X,y):
outer = StratifiedKFold(n_splits=3,shuffle=False)
my_pred=[]
my_true=[]
best_features = X.columns.tolist()
auPRC_list = []
auROC_list = []
for train_index, test_index in outer.split(X,y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
# print (list(set(X_train.index.tolist()).intersection(X_test.index.tolist())))
current_model = dp(model)
current_model.fit(X_train[best_features].values,y_train)
pred_y = current_model.predict_proba(X_test[best_features].values)
pred_y = [x[1] for x in pred_y]
y_test = y_test.tolist()
auROC = roc_auc_score(y_test,pred_y)
auPRC = average_precision_score(y_test,pred_y)
my_pred += pred_y
my_true += y_test
print ("auPRC: %s. auROC: %s"%(auPRC,auROC))
auPRC_list.append(auPRC)
auROC_list.append(auROC)
df = pd.DataFrame()
df['true']=my_true
df['pred']=my_pred
return df,auROC_list,auPRC_list
def plot_auROC_multi(df,color_dict):
sns.set_style("white")
plt.figure()
for s,d in df.groupby('label'):
plot_df = pd.DataFrame()
x_predict,y_predict,_ = roc_curve(d['true'],d['pred'])
auc = roc_auc_score(d['true'],d['pred'])
print (auc)
plot_df['x'] = x_predict
plot_df['y'] = y_predict
sns.lineplot(data=plot_df,x="x",y="y",ci=0,label="%s AUC:%.2f"%(s,auc),color=color_dict[s])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best',title="")
# plt.savefig("auROC.png")
plt.savefig("auROC.pdf", bbox_inches='tight')
plt.close()
def plot_auPRC_multi(df,color_dict):
sns.set_style("white")
plt.figure()
for s,d in df.groupby('label'):
plot_df = pd.DataFrame()
y_predict,x_predict,_ = precision_recall_curve(d['true'],d['pred'])
auc = average_precision_score(d['true'],d['pred'])
print (auc)
plot_df['x'] = x_predict
plot_df['y'] = y_predict
sns.lineplot(data=plot_df,x="x",y="y",ci=0,label="%s AUC:%.2f"%(s,auc),color=color_dict[s])
# plt.plot([0, 1], [0, 1], 'k--')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall curve')
plt.legend(loc='best',title="")
# plt.savefig("auPRC.png")
plt.savefig("auPRC.pdf", bbox_inches='tight')
plt.close()
def define_high_low(x,mu,sigma):
t = 1
low = mu-t*sigma
high = mu+t*sigma
high2= mu+t*sigma
# print (low,high)
if low <= x <= high:
return 0
if x > high2:
return 1
return -1
def boxplot_paired_t_test(a,b,color_dict,ylabel,output):
sns.set_style("whitegrid")
df = pd.DataFrame()
df['All_variants'] = a
df['GWAS_only'] = b
myMin = df.min().min()
myMax = df.max().max()
plot_df = | pd.melt(df) | pandas.melt |
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
msg = "'NoneType' object has no attribute 'startswith'"
with pytest.raises(Exception, match=msg):
store.select("df2")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(where, expected):
# GH10143
objs = {
"df1": DataFrame([1, 2, 3]),
"df2": DataFrame([4, 5, 6]),
"df3": DataFrame([6, 7, 8]),
"df4": DataFrame([9, 10, 11]),
"s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
msg = f"'HDFStore' object has no attribute '{x}'"
with pytest.raises(AttributeError, match=msg):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, f"_{x}")
def test_store_dropna(setup_path):
df_with_missing = DataFrame(
{"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]},
index=list("abc"),
)
df_without_missing = DataFrame(
{"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac")
)
# # Test to make sure defaults are to not drop.
# # Corresponding to Issue 9382
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table")
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=False)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=True)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_without_missing, reloaded)
def test_to_hdf_with_min_itemsize(setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "ss3"), | concat([df, df2]) | pandas.concat |
import argparse
import re
import itertools
import functools
import operator
import os
import glob
import pandas as pd
from scipy.stats import gmean
trace_file_pat = (
re.compile(r'^CPU (?P<index>\d+) runs (?P<tracename>[-./\w\d]+)$'),
lambda match: os.path.basename(match['tracename']),
functools.partial(functools.reduce, operator.concat)
)
cpu_stats_pat = (
re.compile(r'^CPU (?P<cpu>\d+) cumulative IPC: \d+\.?\d* instructions: (?P<instructions>\d+) cycles: (?P<cycles>\d+)$'),
operator.methodcaller('groupdict',0),
lambda results : pd.DataFrame.from_records(results, index=['cpu']).astype('int64')
)
cache_stats_pat = (
re.compile(r'^(?P<name>\S+) (?P<type>LOAD|RFO|PREFETCH|TRANSLATION)\s+ACCESS:\s+\d+ HIT:\s+(?P<hit>\d+) MISS:\s+(?P<miss>\d+)$'),
operator.methodcaller('groupdict',0),
lambda results : | pd.DataFrame.from_records(results) | pandas.DataFrame.from_records |
"""
caproj.datagen
~~~~~~~~~~~~~~
This module contains functions for generating the interval metrics used in modeling
for each unique capital project
**Module variables:**
.. autosummary::
endstate_columns
endstate_column_rename_dict
info_columns
info_column_rename_dict
**Module functions:**
.. autosummary::
print_record_project_count
generate_interval_data
print_interval_dict
"""
import os
import pandas as pd
#: List of column names containing info for each project's end-state
endstate_columns = [
"Date_Reported_As_Of",
"Change_Years",
"PID",
"Current_Phase",
"Budget_Forecast",
"Forecast_Completion",
"PID_Index",
]
#: Dictionary for mapping members of ``endstate_columns`` to new column names
endstate_column_rename_dict = {
"Date_Reported_As_Of": "Final_Change_Date",
"Current_Phase": "Phase_End",
"Budget_Forecast": "Budget_End",
"Forecast_Completion": "Schedule_End",
"PID_Index": "Number_Changes",
"Change_Years": "Final_Change_Years",
}
#: List of column names containing descriptive info for each project
info_columns = [
"PID",
"Project_Name",
"Description",
"Category",
"Borough",
"Managing_Agency",
"Client_Agency",
"Current_Phase",
"Current_Project_Years",
"Current_Project_Year",
"Design_Start",
"Original_Budget",
"Original_Schedule",
]
#: Dictionary for mapping members of ``info_columns`` to new column names
info_column_rename_dict = {
"Current_Phase": "Phase_Start",
"Original_Budget": "Budget_Start",
"Original_Schedule": "Schedule_Start",
}
def print_record_project_count(dataframe, dataset="full"):
"""Prints summary of records and unique projects in dataframe
:param dataframe: pd.DataFrame object for the version of the NYC capital
projects data you wish to summarize
:param dataset: string, accepts 'full', 'all', 'training', or 'test'
(default 'full')
:return: prints to standard output, no objects returned
"""
if dataset == "full":
print(
"For the ORIGINAL cleansed data, containing all available NYC capital "
"projects change records:\n"
)
elif dataset == "all":
print(
"For the data containing start and end data for all available "
"NYC capital projects for the ENTIRE INTERVAL of changes "
"covered in the ORIGINAL data:\n"
)
else:
print(
"For the final {} data, containing the {} split of 3-year "
"project data used in this analysis:\n".format(
dataset.upper(), dataset
)
)
# entries
print(f"\tNumber of dataset records: {len(dataframe)}")
# num projects
print(
f"\tNumber of unique projects in dataset: {dataframe['PID'].nunique()}\n"
)
# define the functions used for generating our interval dataframe
def ensure_datetime_and_sort(df):
"""Ensures datetime columns are formatted correctly and changes are sorted
:param df: pd.DataFrame of the cleaned capital projects change records data
:return: Original pd.DataFrame with datetime columns formatted and records
sorted
"""
datetime_cols = [
"Date_Reported_As_Of",
"Design_Start",
"Original_Schedule",
"Forecast_Completion",
]
for col in datetime_cols:
df[col] = | pd.to_datetime(df[col]) | pandas.to_datetime |
#%%
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import traceback
url = "https://www.ceniniger.org/presidentielle"
communes = pd.read_csv("../data/communes.csv")
#%%
def parse_results_table(results_page):
results_table = results_page.find(id="resultat-grid_").find(id="tbody").find_all("tr")
data = []
for row in results_table:
cols = row.find_all('td')
cols = [col.text.strip() for col in cols]
data.append(cols[2:])
out = | pd.DataFrame(data) | pandas.DataFrame |
import requests
import json
import pandas as pd
#initializing variables and data structures
teamDict = {1: "ARS", 2: "AVL", 3: "BRE", 4: "BRI", 5: "BUR", 6: "CHE", 7: "CRY", 8: "EVE", 9: "LEE", 10: "LEI", 11: "LIV", 12: "MCI", 13: "MUN", 14: "NEW", 15: "NOR", 16: "SOU", 17: "TOT", 18: "WAT", 19: "WHU", 20: "WOL"}
positionDict = {1: "GKP", 2: "DEF", 3: "MID", 4: "FWD"}
playerColumns = ["Index", "ID", "Name", "Game Name", "Team", "Position", "Current Price"]
seasonDataColumns = ["Index", "Season Points", "Season Minutes", "Season I", "Season C", "Season T", "Season Bonus", "Season Bonus Points", "Season Beginning Price", "Season End Price", "Season Goals", "Season Assists", "Season YC", "Season RC", "Season Saves", "Season Penalty Saves", "Season OG", "Season Penalty Misses", "Season CS", "Season GC"]
df = | pd.DataFrame(columns=playerColumns) | pandas.DataFrame |
"""
====================
The qc.passqc module
====================
The qc.passqc module contains functions for determining which NPs pass
a set of quality control conditions.
"""
import pandas as pd
def get_reference(condition_ref_string, stats_df):
"""
If condition_ref_string matches a column in stats_df, return that
column. If it doesn't match a column, try to convert it to a float.
:param str condition_ref_string: left or right part of a condition \
from a QC conditions file
:param stats_df: QC statistics table
:type stats_df: :class:`~pandas.DataFrame`
:returns: Either a column of stats_df, a float or a string.
"""
if condition_ref_string in stats_df.columns:
return stats_df.loc[:, condition_ref_string]
try:
return float(condition_ref_string)
except ValueError:
return condition_ref_string
def get_references(left_str, right_str, stats_df):
"""
Parse the left and right references in one line of a QC conditions file.
e.g. "left > right" or "left less_than 5.0"
:param str left_str: Left part of a condition \
from a QC conditions file
:param str right_str: Right part of a condition \
from a QC conditions file
:param stats_df: QC statistics table
:type stats_df: :class:`~pandas.DataFrame`
:returns: left reference, right reference (at least one must be a column of \
stats_df. The other can be another column, a float or a string)
"""
left = get_reference(left_str, stats_df)
right = get_reference(right_str, stats_df)
if not (isinstance(left, pd.Series) or isinstance(right, pd.Series)):
raise QcParamError(
"""Neither {} nor {} matches a column in the stats file.
Current options are: \n\t'{}'""".format(
left_str, right_str,
"'\n\t'".join(stats_df.columns)))
return left, right
def comparison_from_operator(operator, left, right):
"""
Perform a comparison between left and right values in a QC
conditions file.
:param str operator: Comparison to carry out.
:param left: Either a series of QC values or a value to compare QC values to.
:param right: Either a series of QC values or a value to compare QC values to.
:returns: :class:`~pandas.Series` indicating which samples pass the condition.
"""
if operator in ['=', '==', 'eq', 'equals']:
comparison = (left == right)
elif operator in ['>', 'gt', 'greater_than']:
comparison = (left > right)
elif operator in ['>=', 'gte', 'greater_than_or_equal_to']:
comparison = (left >= right)
elif operator in ['<', 'lt', 'less_than']:
comparison = (left < right)
elif operator in ['<=', 'lte', 'less_than_or_equal_to']:
comparison = (left <= right)
elif operator in ['!=', 'neq', 'not_equal_to']:
comparison = (left != right)
else:
raise QcParamError(
'Operator {} not recognized'.format(operator))
return comparison
class QcParamError(Exception):
"""
Exception to be raised if the QC Parameters file is malformed.
"""
def do_comparison(left_str, operator, right_str, stats_df):
"""
Given the condition in a QC Parameters file, and a QC statistics table,
calculate which samples pass the condition.
:param str left_str: Left part of the condition (e.g. "mapped_reads")
:param str operator: How to compare left and right (e.g. greater_than)
:param str right_str: Right part of the condition (e.g. "150000")
:param stats_df: QC statistics table
:type stats_df: :class:`~pandas.DataFrame`
:returns: :class:`~pandas.Series` indicating which samples pass the condition.
"""
left, right = get_references(left_str, right_str, stats_df)
comparison = comparison_from_operator(operator, left, right)
if not isinstance(comparison, pd.Series):
raise QcParamError('Comparison did not return a series object')
return comparison
def parse_conditions_file(conditions_file, stats_df):
"""
Iterate through lines of a conditions file and perform
the indicated comparison for each line.
:param file conditions_file: Open file buffer containing conditions.
:param stats_df: QC statistics table
:type stats_df: :class:`~pandas.DataFrame`
:returns: List of :class:`~pandas.Series` each indicating whether each \
NP passed each quality check.
"""
conditions = []
for line_no, line in enumerate(conditions_file, 1):
fields = line.split()
if (line[0] == '#') or (not fields):
continue
left_str, operator, right_str = fields
try:
this_comparison = do_comparison(left_str, operator, right_str, stats_df)
except QcParamError as err_msg:
raise QcParamError(
'Error in QC conditions file line {}: {}'.format(
line_no, err_msg))
conditions.append(this_comparison)
return conditions
def samples_passing_qc(conditions_file_path, stats_df_path):
"""
Read a QC conditions file and a QC stats file, calculating which
samples pass all specified QC checks.
:param str conditions_file_path: Path to QC conditions file.
:param stats_df_path: Path to QC statistics table
:returns: :class:`~pandas.Series` of samples that pass all specified \
QC checks.
"""
stats_df = pd.read_csv(stats_df_path, delim_whitespace=True)
with open(conditions_file_path, 'r') as conditions_file:
conditions = parse_conditions_file(conditions_file, stats_df)
sample_passes_qc = | pd.concat(conditions, axis=1) | pandas.concat |
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
import pandas.core.ops as ops
# Basic test for the arithmetic array ops
# -----------------------------------------------------------------------------
@pytest.mark.parametrize(
"opname, exp",
[("add", [1, 3, None, None, 9]), ("mul", [0, 2, None, None, 20])],
ids=["add", "mul"],
)
def test_add_mul(dtype, opname, exp):
a = pd.array([0, 1, None, 3, 4], dtype=dtype)
b = pd.array([1, 2, 3, None, 5], dtype=dtype)
# array / array
expected = pd.array(exp, dtype=dtype)
op = getattr(operator, opname)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
op = getattr(ops, "r" + opname)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_sub(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a - b
expected = pd.array([1, 1, None, None, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_div(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a / b
expected = pd.array([np.inf, 2, None, None, 1.25], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398, GH#22793
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = FloatingArray(
np.array([np.nan, np.inf, -np.inf, 1], dtype="float64"),
np.array([False, False, False, True]),
)
if negative:
expected *= -1
tm.assert_extension_array_equal(result, expected)
def test_floordiv(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a // b
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
expected = pd.array([0, 2, None, None, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_mod(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a % b
expected = pd.array([0, 0, None, None, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_pow_scalar():
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a**0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a**1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a**pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a**np.nan
expected = FloatingArray(
np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64"),
np.array([False, False, False, True, False]),
)
tm.assert_extension_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0**a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1**a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA**a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan**a
expected = FloatingArray(
np.array([1, np.nan, np.nan, np.nan], dtype="float64"),
np.array([False, False, True, False]),
)
tm.assert_extension_array_equal(result, expected)
def test_pow_array():
a = pd.array([0, 0, 0, 1, 1, 1, None, None, None])
b = pd.array([0, 1, None, 0, 1, None, 0, 1, None])
result = a**b
expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na():
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = pd.array([np.nan, np.nan], dtype="Int64")
result = np.array([1.0, 2.0]) ** arr
expected = pd.array([1.0, np.nan], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("other", [0, 0.5])
def test_numpy_zero_dim_ndarray(other):
arr = pd.array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
# Test generic characteristics / errors
# -----------------------------------------------------------------------------
def test_error_invalid_values(data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
# invalid scalars
msg = "|".join(
[
r"can only perform ops with numeric values",
r"IntegerArray cannot perform the operation mod",
r"unsupported operand type",
r"can only concatenate str \(not \"int\"\) to str",
"not all arguments converted during string",
"ufunc '.*' not supported for the input types, and the inputs could not",
"ufunc '.*' did not contain a loop with signature matching types",
"Addition/subtraction of integers and integer-arrays with Timestamp",
]
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
str_ser = pd.Series("foo", index=s.index)
# with pytest.raises(TypeError, match=msg):
if all_arithmetic_operators in [
"__mul__",
"__rmul__",
]: # (data[~data.isna()] >= 0).all():
res = ops(str_ser)
expected = pd.Series(["foo" * x for x in data], index=s.index)
tm.assert_series_equal(res, expected)
else:
with pytest.raises(TypeError, match=msg):
ops(str_ser)
msg = "|".join(
[
"can only perform ops with numeric values",
"cannot perform .* with this index type: DatetimeArray",
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *",
"unsupported operand type",
r"can only concatenate str \(not \"int\"\) to str",
"not all arguments converted during string",
"cannot subtract DatetimeArray from ndarray",
]
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# Various
# -----------------------------------------------------------------------------
# TODO test unsigned overflow
def test_arith_coerce_scalar(data, all_arithmetic_operators):
op = tm.get_op_from_name(all_arithmetic_operators)
s = pd.Series(data)
other = 0.01
result = op(s, other)
expected = op(s.astype(float), other)
expected = expected.astype("Float64")
# rmod results in NaN that wasn't NA in original nullable Series -> unmask it
if all_arithmetic_operators == "__rmod__":
mask = (s == 0).fillna(False).to_numpy(bool)
expected.array._mask[mask] = False
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = tm.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype == "Float64"
def test_cross_type_arithmetic():
df = pd.DataFrame(
{
"A": pd.Series([1, 2, np.nan], dtype="Int64"),
"B": pd.Series([1, np.nan, 3], dtype="UInt8"),
"C": [1, 2, 3],
}
)
result = df.A + df.C
expected = pd.Series([2, 4, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
result = (df.A + df.C) * 3 == 12
expected = pd.Series([False, True, None], dtype="boolean")
tm.assert_series_equal(result, expected)
result = df.A + df.B
expected = | pd.Series([2, np.nan, np.nan], dtype="Int64") | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.