id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3303918
|
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import sys
def main():
print("Simple F5 Vulnerablity Scanner by @TheCyberViking and Anon-Researcher")
print("This is for scanning f5 BIG-IP aka CVE-2020-5902")
print("This is in attempt to scan without compromise")
print("")
ip = input("Please Enter the IP you wana scan: ")
targetURL=("https://" + ip +"/tmui/login.jsp")
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
response = requests.get(targetURL, verify=False, timeout=2)
print("Scanning ", targetURL)
if response.status_code == 200:
sys.stdout.write("\033[1;31m")
print('The IP is Vulnerable')
elif response.status_code == 404:
sys.stdout.write("\033[0;32m")
print('The IP Does not Seem to be Vulnerable')
main()
|
StarcoderdataPython
|
3380619
|
import numpy as np
import matplotlib.pyplot as plt
import imageio
data = np.loadtxt("adveccion.dat")
print(np.shape(data))
n_times = np.shape(data)[0]
x = np.linspace(0.0, 1.0, np.shape(data)[1])
t = np.linspace(0.0, 2.0, np.shape(data)[0])
for i in range(n_times):
print(i, n_times)
filename = "snap_{}.png".format(i)
fig = plt.figure(figsize=(3,3))
plt.plot(x, data[i,:])
plt.title("Tiempo: {:.2f} segundos".format(t[i]))
plt.xlabel("Posicion [metros]")
plt.ylabel("U")
plt.ylim(-0.05,0.05)
plt.xlim(0.0, 1.0)
plt.grid()
plt.savefig(filename, bbox_inches="tight")
plt.close(fig)
with imageio.get_writer('movie.gif', mode='I') as writer:
for i in range(n_times):
print(i, n_times)
filename = "snap_{}.png".format(i)
image = imageio.imread(filename)
writer.append_data(image)
|
StarcoderdataPython
|
3340338
|
# Copyright (c) 2019 Beta Five Ltd
#
# SPDX-License-Identifier: Apache-2.0
#
"""Example code using the subtest decorator."""
import os
import sys
import unittest
# Expect to find betatest in the working directory when running these examples
# pylint: disable=wrong-import-position
sys.path.insert(0, os.getcwd())
from betatest.amtest import AMTestRunner
from betatest.subtest import subtest
class TestPasses(unittest.TestCase):
@subtest
def phase_1(self):
self.assertEqual(1, 1)
@subtest
def phase_2(self, color):
self.assertIsNotNone(color)
def test_all(self):
self.phase_1()
self.phase_2(color='red')
self.phase_2(color='green')
class TestFails(unittest.TestCase):
@subtest
def phase_1(self):
self.assertEqual(1, 0)
@subtest
def phase_2(self):
self.assertEqual(2, 2)
@subtest
def phase_3(self, color):
self.assertEqual(color, 'red')
def test_all(self):
self.phase_1()
self.phase_2()
self.phase_3(color='red')
self.phase_3(color='green')
# Allow a '--pass' argument to disable tests that will fail. This is useful for
# running the example in CI.
if len(sys.argv) > 1 and sys.argv[1] == '--pass':
del TestFails
del sys.argv[1]
if __name__ == '__main__':
unittest.main(testRunner=AMTestRunner)
|
StarcoderdataPython
|
3349694
|
from ssh2net.core.cisco_iosxe.driver import IOSXEDriver
my_device = {"setup_host": "172.18.0.11", "auth_user": "vrnetlab", "auth_password": "<PASSWORD>"}
iosxe_driver = IOSXEDriver
with IOSXEDriver(**my_device) as conn:
output = conn.send_command("show version")
# send_inputs returns a list of results; print the zeroith result
print(output[0])
conn.send_config_set(["interface loopback123", "description ssh2net was here"])
output = conn.send_command("show run int loopback123")
print(output[0])
conn.send_config_set("no interface loopback123")
|
StarcoderdataPython
|
63233
|
import logging
from .utils.plugins import Plugins, Proxy
from .utils.decos import GenLimiter
from typing import Iterator
class ProxyQuery(Plugins):
"""Handles the querying and operations of plugins"""
@GenLimiter
def exec_iter_plugin(self, method_name: str, sort_asc_fails: bool = True, *args, **kwargs) -> Iterator[Proxy]:
"""Executes a given method in all plugins that return an iterable, then returns an iterable that loops through
each plugins iterable"""
if sort_asc_fails:
self.plugins.sort(key=lambda plugin: plugin.fails)
for plugin in self.plugins:
try:
method = getattr(plugin, method_name)
return_iter = method(*args, **kwargs)
for value in return_iter:
yield value
except Exception:
logging.info(f"FreeProxyScraper plugin \"{plugin.plugin_name}\" has crashed")
plugin.report_fail()
continue
@GenLimiter
def find_proxies(self, test: bool = True) -> Iterator[Proxy]:
"""Uses all plugins to search for proxies"""
proxies = self.exec_iter_plugin("find", True)
if test:
proxies = self.test_proxies(proxies)
return proxies
@GenLimiter
def find_filter(self, country: str = None, ping: int = None,
min_anon_level: int = 0, test: bool = True) -> Iterator[Proxy]:
"""Uses all plugins to finds proxies that meet certain values"""
proxies = self.exec_iter_plugin("find_filter", True, country, ping, min_anon_level)
if test:
proxies = self.test_proxies(proxies)
return proxies
def test_proxies(self, proxies: Iterator[Proxy]) -> Iterator[Proxy]:
"""Takes a iterator of Proxy and returns a generator that skips over every plugin that failed the test"""
for proxy in proxies:
if proxy.test():
yield proxy
|
StarcoderdataPython
|
1786968
|
<filename>app/api/api_interface.py
from abc import ABC, abstractmethod
class ApiInterface(ABC):
"""
This interface is dedicated for modules processing image dataset annotations.
Every annotation reader object, which implements these functions, should work properly within the app.
Object implementing this interface should be passed to DataManager object as init parameter (same way it is done now with CocoApi)
"""
@abstractmethod
def load_dataset(self, dataset):
"""
Function for changing dataset, which means switching input annotations file/files.
Parameters
------------
dataset : str
New dataset name
"""
@abstractmethod
def get_image_ids_from_categories(self, cats_list):
"""
Get images ids that contains objects of all given categories
Parameters
------------
cat_list : list of str
List of object categories
Returns
----------
list of int
List of image ids containing objects of given categories
"""
@abstractmethod
def get_images(self, img_ids):
"""
Gets images metadata for given images
Parameters
------------
img_ids : list of int)
List of image ids
Returns
----------
list of dicts
List of images metadata dicts (see below) for given image ids
IMAGES METADATA DICTS
----------------------
Image metadata dict contain data regarding image itself.
REQUIRED PARAMETERS:
{
'file_name': str,
'url': str,
'height': int,
'width': int,
'id': int
}
EXAMPLE:
{
'file_name': '000000532481.jpg',
'coco_url': 'http://images.cocodataset.org/val2017/000000532481.jpg',
'height': 426,
'width': 640,
'id': 532481
}
"""
@abstractmethod
def get_annotations(self, img_ids):
"""
Gets image annotations (objects data) for given images
Parameters
------------
img_ids : list of int
List of image ids
Returns
----------
dict of int : list of dicts
Dict, where:
- key : image id
- value : list of ANNOTATION DICTS (see below)
ANNOTATION DICTS
-----------------
Annotation dict contains data regarding one object on specific image.
REQUIRED PARAMETERS:
{
'segmentation': list of float
'image_id': int
'bbox': list of float (len=4)
'category_id': int,
'category_name': str,
'id': int
}
EXAMPLE:
{
'segmentation': [253.85, 187.23, ...],
'image_id': 532481,
'bbox': [250.82, 168.26, 70.11, 64.88],
'category_id': 1,
'category_name: 'person'
'id': 508910
}
"""
@abstractmethod
def get_captions(self, img_ids):
"""
Gets image captions for given images
Parameters
------------
img_ids : list of int
List of image ids
Returns
----------
dict of int : str
Dict, where:
- key : image id
- value : image caption
"""
@abstractmethod
def get_dataset(self):
"""
Get current dataset name
Returns:
str
"""
@abstractmethod
def get_available_datasets(self):
"""
Get list of all available datasets
Returns:
list of str
"""
@abstractmethod
def get_all_categories(self):
"""
Get list of all available categories
Returns:
list of str
"""
|
StarcoderdataPython
|
4841984
|
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Imports from this application
from app import app
# 1 column layout
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
When I began working with my dataset the original prediction was to tell if
someone was making more or less than 50K as their annual income. With only a
little time to work with the data the challenge was to use the information to
predict something new so I thought insight into whether the values indicated
a Male of Female would be an interesting Machine Learning model. I was more
more shocked that the accuracy was pretty good for the information that was
fed to the model. I did a few work arounds and made new features that allowed
the data submissions to read each other in a more cohessive manner. I found the
best model was working with the XGBClassifier. It gave me the best scores off the
bat with my training and validation sets and worked great on my Test set as well.
"""
),
],
)
layout = dbc.Row([column1])
column2 = dbc.Col(
[
])
|
StarcoderdataPython
|
35305
|
<filename>mrobpy/examples/vis_utils.py
import numpy as np
import pandas as pd
import mrob
from test_utils import get_mc
from sys import platform
import matplotlib
if platform == "darwin":
matplotlib.use('PS')
import matplotlib.pyplot as plt
# Here the Cholesky decomposition for singular covariance matrix is implemented
def cholesky(sigma):
# obtaining map M between original and truncated matrix
condition =~ (np.all(sigma == 0, axis=1) & (np.all(sigma == 0, axis=0)))
m = [int(x) for x in condition]
counter = 0
res = []
for el in m:
if el > 0:
res.append(counter)
counter +=1
else:
res.append(None)
M = []
for i in range(6):
tmp = []
for j in range(6):
tmp.append([res[i],res[j]])
M.append(tmp)
M = np.array(M)
# obtaining matrix that is obtained by removing zero columns and rows
block = (sigma[condition,:])[:,condition]
# applying regular cholesky decomposition
L = np.linalg.cholesky(block)
# mapping block decomposition into original matrix
LL = np.zeros_like(sigma)
for i in range(LL.shape[0]):
for j in range(LL.shape[1]):
if all(M[i,j] != None):
k = M[i,j][0]
l = M[i,j][1]
LL[i,j] = L[k,l]
# returning resulting factor
return LL
def get_axis_points(T, sigma, N, K = 1, index = -1, A = None):
if A is None:
A = cholesky(sigma)
points = np.zeros((N,6))
points[:,index] = np.linspace(-K, K, num = N)
points_img = ([email protected]).T
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
pts = np.array([x.t() for x in propagated])
pts = pts.reshape((-1,3))
pts = np.vstack((pts,np.array([np.nan,np.nan,np.nan]).reshape(-1,3)))
return pts
def get_circumference(T,sigma,N,K=1, index_1=-1, index_2=-1, A=None):
if A is None:
A = cholesky(sigma)
points = np.zeros((N,6))
points[:,index_1] = K*np.cos(np.linspace(0,2*np.pi, num = N))
points[:,index_2] = K*np.sin(np.linspace(0,2*np.pi, num = N))
points_img = ([email protected]).T
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
pts = np.array([x.t() for x in propagated])
pts = pts.reshape((-1,3))
pts = np.vstack((pts,np.array([np.nan,np.nan,np.nan]).reshape(-1,3)))
return pts
def sigma_visualize_3d(T, sigma, N=100, K=1):
N = 100
colors = list(matplotlib.colors.CSS4_COLORS.keys())
A = cholesky(sigma)
axes = {
'yaw': get_axis_points(T,sigma,N,K,0,A),
'pitch': get_axis_points(T,sigma,N,K,1,A),
'roll': get_axis_points(T,sigma,N,K,2,A),
'x': get_axis_points(T,sigma,N,K,3,A),
'y': get_axis_points(T,sigma,N,K,4,A),
'z': get_axis_points(T,sigma,N,K,5,A)
}
circumferences = {
'yaw vs pitch' : get_circumference(T,sigma,N,K,0,1,A),
'yaw vs roll' : get_circumference(T,sigma,N,K,0,2,A),
'yaw vs x' : get_circumference(T,sigma,N,K,0,3,A),
'yaw vs y' : get_circumference(T,sigma,N,K,0,4,A),
'yaw vs z' : get_circumference(T,sigma,N,K,0,5,A),
'pitch vs roll' : get_circumference(T,sigma,N,K,1,2,A),
'pitch vs x' : get_circumference(T,sigma,N,K,1,3,A),
'pitch vs y' : get_circumference(T,sigma,N,K,1,4,A),
'pitch vs z' : get_circumference(T,sigma,N,K,1,5,A),
'roll vs x' : get_circumference(T,sigma,N,K,2,3,A),
'roll vs y' : get_circumference(T,sigma,N,K,2,4,A),
'roll vs z' : get_circumference(T,sigma,N,K,2,5,A),
'x vs y' : get_circumference(T,sigma,N,K,3,4,A),
'x vs z' : get_circumference(T,sigma,N,K,3,5,A),
'y vs z' : get_circumference(T,sigma,N,K,4,5,A),
}
return axes, circumferences
def sigma_visualize(T, sigma, N=100, K=[1,1], label="", color=None, ax = None):
N = 100
colors = list(matplotlib.colors.CSS4_COLORS.keys())
if color is None:
color = colors[np.random.randint(0, len(colors))]
if ax is None:
ax = matplotlib.pyplot
ax.plot(T.t()[0], T.t()[1],'x',color=color)
ax.annotate(label, (T.t()[0], T.t()[1]))
A = cholesky(sigma)
for k in set(K):
# plotting yaw & x plane
labels = ['+yaw','-yaw','+x','-x']
points = []
points.append([0,0,k,0,0,0])
points.append([0,0,-k,0,0,0])
points.append([0,0,0,k,0,0])
points.append([0,0,0,-k,0,0])
for i in range(N+1):
points.append([0,0,k*np.cos(2*np.pi/N*i), k*np.sin(2*np.pi/N*i),0,0])
points = np.array(points)
points_img = np.dot(A,points.transpose()).transpose()
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
poses = np.array([x.t() for x in propagated])
poses = poses.reshape((-1,3))
ax.plot(poses[4:,0],poses[4:,1], label="{}-sigma yaw & x".format(k), color=color)
for i in range(len(labels)):
# ax.annotate(labels[i],xy = (poses[i,0],poses[i,1]), xytext = (poses[i,0]+0.01,poses[i,1]+0.01))
ax.plot(poses[i,0],poses[i,1],'x',color=color)
# plotting x & y plane
labels = ['+x','-x','+y','-y']
points = []
points.append([0,0,0,k,0,0])
points.append([0,0,0,-k,0,0])
points.append([0,0,0,0,k,0])
points.append([0,0,0,0,-k,0])
for i in range(N+1):
points.append([0,0,0,k*np.cos(2*np.pi/N*i), k*np.sin(2*np.pi/N*i),0])
points = np.array(points)
points_img = np.dot(A,points.transpose()).transpose()
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
poses = np.array([x.t() for x in propagated])
poses = poses.reshape((-1,3))
ax.plot(poses[4:,0],poses[4:,1], label="{}-sigma x & y".format(k), color=color)
for i in range(len(labels)):
# ax.annotate(labels[i],xy = (poses[i,0],poses[i,1]), xytext = (poses[i,0]+0.01,poses[i,1]+0.01))
ax.plot(poses[i,0],poses[i,1],'x',color=color)
# plotting yaw & y plane
labels = ['+yaw','-yaw','+y','-y']
points = []
points.append([0,0,k,0,0,0])
points.append([0,0,-k,0,0,0])
points.append([0,0,0,0,k,0])
points.append([0,0,0,0,k,0])
for i in range(N+1):
points.append([0,0,k*np.cos(2*np.pi/N*i),0, k*np.sin(2*np.pi/N*i),0])
points = np.array(points)
points_img = np.dot(A,points.transpose()).transpose()
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
poses = np.array([x.t() for x in propagated])
poses = poses.reshape((-1,3))
ax.plot(poses[4:,0],poses[4:,1], label="{}-sigma yaw & y".format(k),color=color)
for i in range(len(labels)):
# ax.annotate(labels[i],xy = (poses[i,0],poses[i,1]), xytext = (poses[i,0]+0.01,poses[i,1]+0.01))
ax.plot(poses[i,0],poses[i,1],'x',color=color)
# plotting yaw axis of ellipsoid
points = []
for i in range(N+1):
points.append([0,0,k - i*(2*k)/N, 0,0,0])
points = np.array(points)
points_img = np.dot(A,points.transpose()).transpose()
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
poses = np.array([x.t() for x in propagated])
poses = poses.reshape((-1,3))
ax.plot(poses[:,0],poses[:,1],color=color)
# plotting x axis
points = []
for i in range(N+1):
points.append([0,0,0,k - i*(2*k)/N,0,0])
points = np.array(points)
points_img = np.dot(A,points.transpose()).transpose()
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
poses = np.array([x.t() for x in propagated])
poses = poses.reshape((-1,3))
ax.plot(poses[:,0],poses[:,1],color=color)
# plotting y axis
points = []
for i in range(N+1):
points.append([0,0,0,0,k - i*(2*k)/N, 0])
points = np.array(points)
points_img = np.dot(A,points.transpose()).transpose()
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
poses = np.array([x.t() for x in propagated])
poses = poses.reshape((-1,3))
ax.plot(poses[:,0],poses[:,1],color=color)
def ellipsoid_wireframe_df(T,sigma,N = 100, K = 1):
axes, circumferences = sigma_visualize_3d(T=T,sigma=sigma,N = N, K = K)
df = pd.DataFrame(columns=['x','y','z'])
for key,val in axes.items():
tmp = pd.DataFrame(val,columns=['x','y','z'])
tmp['label'] = key
df = pd.concat([df,tmp])
for key,val in circumferences.items():
tmp = pd.DataFrame(val,columns=['x','y','z'])
tmp['label'] = key
df = pd.concat([df,tmp])
return df
def mc_pointcloud_df(T, sigma, mean=np.zeros(6),N=100):
poses, xi = get_mc(T=T, sigma=sigma, mean=mean,N=N)
particles = pd.DataFrame(poses, columns=['x','y','z'])
return particles
|
StarcoderdataPython
|
41767
|
# Notes from this experiment:
# 1. adapt() is way slower than np.unique -- takes forever for 1M, hangs for 10M
# 2. TF returns error if adapt is inside tf.function. adapt uses graph inside anyway
# 3. OOM in batch mode during sparse_to_dense despite of seting sparse in keras
# 4. Mini-batch works but 15x(g)/20x slower than sklearn
# 5. Always replace NaNs in string cols as np.nan is float
# 6. Full graph mode lazily triggers all models together -- produce OOM
# 7. Partial graph mode sequentially execute graph-models
# TODO1: all sparse intermediates, including the outputs
# TODO2: Tune mini-batch size for best performance
import sys
import time
import numpy as np
import scipy as sp
from scipy.sparse import csr_matrix
import pandas as pd
import math
import warnings
import os
# Force to CPU (default is GPU)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras import layers
# Make numpy values easier to read.
np.set_printoptions(precision=3, suppress=True)
warnings.filterwarnings('ignore') #cleaner, but not recommended
def readNprep(nRows):
# Read the 1M or the 10M dataset
if nRows == 1:
print("Reading file: criteo_day21_1M")
criteo = pd.read_csv("~/datasets/criteo_day21_1M", delimiter=",", header=None)
else:
print("Reading file: criteo_day21_10M")
criteo = pd.read_csv("~/datasets/criteo_day21_10M", delimiter=",", header=None)
print(criteo.head())
# Replace NaNs with 0 for numeric and empty string for categorical
criteo = criteo.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna(''))
# Pandas infer the type of first 14 columns as float and int.
# SystemDS reads those as STRINGS and apply passthrough FT on those.
# For a fair comparision, convert those here to str and later back to float
pt = [*range(0,14)]
criteo[pt] = criteo[pt].astype(str)
#print(criteo.info())
return criteo
def getCategoricalLayer(X, name, useNumpy):
# NaN handling. np.nan is a float, which leads to ValueError for str cols
X[name].fillna('', inplace=True)
if useNumpy:
vocab = np.unique(X[name].astype(np.string_))
onehot = layers.StringLookup(vocabulary=vocab, output_mode="multi_hot", num_oov_indices=0, sparse=True)
# adapt is not required if vocabulary is passed
else:
onehot = layers.StringLookup(output_mode="multi_hot", num_oov_indices=0)
df2tf = tf.convert_to_tensor(np.array(X[name], dtype=np.string_))
onehot.adapt(df2tf)
#print("#uniques in col ", name, " is ", onehot.vocabulary_size())
return onehot
def getLayers(X):
# Passh through transformation -- convert to float
pt = [*range(0,14)]
X[pt] = X[pt].astype(np.float64)
# Build a dictionary with symbolic input tensors w/ proper dtype
inputs = {}
for name, column in X.items():
dtype = column.dtype
if dtype == object:
dtype = tf.string
else:
dtype = tf.float64
inputs[name] = tf.keras.Input(shape=(1,), dtype=dtype, sparse=True)
# Seperate out the numeric inputs
numeric = {name:input for name,input in inputs.items()
if input.dtype==tf.float64}
# Concatenate the numeric inputs together and
# add to the list of layers as is
prepro = [layers.Concatenate()(list(numeric.values()))]
# Recode and dummycode the string inputs
for name, input in inputs.items():
if input.dtype == tf.float64:
continue
onehot = getCategoricalLayer(X, name, True) #use np.unique
encoded = onehot(input)
# Append to the same list
prepro.append(encoded)
# Concatenate all the preprocessed inputs together,
# and build a model to apply batch wise later
cat_layers = layers.Concatenate()(prepro)
print(cat_layers)
model_prep = tf.keras.Model(inputs, cat_layers)
return model_prep
def lazyGraphTransform(X, model, n, isSmall):
# This method builds a graph of all the mini-batch transformations
# by pushing the loop-slicing logic inside a tf.function.
# However, lazily triggering all the models produce OOM
X_dict = {name: tf.convert_to_tensor(np.array(value)) for name, value in X.items()}
res = batchTransform(X_dict, model, X.shape[0], isSmall)
@tf.function
def batchTransform(X, model_prep, n, isSmall):
# Batch-wise transform to avoid OOM
# 10k/1.5k: best performance within memory budget
batch_size = 10000 if isSmall==1 else 1500
beg = 0
allRes = []
while beg < n:
end = beg + batch_size
if end > n:
end = n
batch_dict = {name: X[name][beg:end] for name, value in X.items()}
X_batch = model_prep(batch_dict)
print(X_batch[:1, :]) #print the placeholder
allRes.append(X_batch)
if end == n:
break
else:
beg = end
out = tf.stack(allRes, axis=0) #fix rank
print(out.shape)
return out
def batchGraphTransform(X, model, n, isSmall):
# Batch-wise eager transform to avoid OOM
# 10k/1.5k: best performance within memory budget
batch_size = 10000 if isSmall==1 else 1500
beg = 0
while beg < n:
end = beg + batch_size
if end > n:
end = n
batch_dict = {name: np.array(value)[beg:end] for name, value in X.items()}
X_batch = transform(batch_dict, model)
# Don't stack the results to avoid OOM
print(X_batch[:1, :]) #print first 1 row
if end == n:
break
else:
beg = end
@tf.function
def transform(X_dict, model_prep):
X_prep = model_prep(X_dict)
#print(X_prep[:5, :]) #print to verify lazy execution
return X_prep
isSmall = int(sys.argv[1]) #1M vs 10M subset of Criteo
X = readNprep(isSmall)
t1 = time.time()
model = getLayers(X)
# Lazy transform triggers all models togther -- produce OOM
#res = lazyGraphTransform(X, model, X.shape[0], isSmall)
# Partially lazy mode keeps the slice-look outside of tf.function
batchGraphTransform(X, model, X.shape[0], isSmall)
print("Elapsed time for transformations using tf-keras = %s sec" % (time.time() - t1))
#np.savetxt("X_prep_sk.csv", X_prep, fmt='%1.2f', delimiter=',') #dense
#sp.sparse.save_npz("X_prep_sk.npz", X_prep) #sparse
|
StarcoderdataPython
|
3331752
|
# mapping from itemid -> treasure chest subtype, 0 is normal, 1 is small, 2 is boss, 3 is goddess chest
tboxSubtypes = [
0x00,
0x00,
0x01,
0x01,
0x01,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x02,
0x02,
0x02,
0x00,
0x02,
0x02,
0x02,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x02,
0x02,
0x02,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x01,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x03,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x03,
0x00,
0x00,
0x03,
0x00,
0x00,
0x03,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x03,
0x03,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x03,
0x03,
0x03,
0x03,
0x00,
0x00,
0x00,
0x00,
0x00,
0x03,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x03,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x03,
0x03,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
]
|
StarcoderdataPython
|
4834
|
#!/usr/bin/env python
#
# Copyright (c) 2018, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
"""
Flash the ESP32 (bootloader, partitions table and factory app).
How to call esptool:
python esptool.py '--chip', 'esp32', '--port', /dev/ttyUSB0, '--baud', '921600', 'write_flash', '-z', '--flash_mode', 'dio', '--flash_freq', '40m', '--flash_size', 'detect', '0x1000', bootloader.bin, '0x8000', partitions.bin, '0x10000', application.bin, '0x3FF000', 'config_no_wifi.bin'
"""
from esptool import ESP32ROM
import os
import sys
import struct
import sqlite3
import argparse
import subprocess
import threading
import time
import fw_version
import csv
working_threads = {}
macs_db = None
wmacs = {}
DB_MAC_UNUSED = 0
DB_MAC_ERROR = -1
DB_MAC_LOCK = -2
DB_MAC_OK = 1
def open_macs_db(db_filename):
global macs_db
if not os.path.exists(db_filename):
print("MAC addresses database not found")
sys.exit(1)
macs_db = sqlite3.connect(db_filename)
def fetch_MACs(number):
return [x[0].encode('ascii', 'ignore') for x in macs_db.execute("select mac from macs where status = 0 order by rowid asc limit ?", (number,)).fetchall()]
def set_mac_status(mac, wmac, status):
macs_db.execute("update macs set status = ?, last_touch = strftime('%s','now'), wmac = ? where mac = ?", (status, wmac, mac))
macs_db.commit()
def print_exception(e):
print ('Exception: {}, on line {}'.format(e, sys.exc_info()[-1].tb_lineno))
def erase_flash(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
num_erases = 0
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'Chip erase completed successfully' in nextline:
sys.stdout.write('Board erased OK on port %s\n' % port)
num_erases += 1
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or num_erases != 1:
working_threads[port] = None
def read_wlan_mac(port, command):
global working_threads
global wmacs
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
mac_read = False
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'MAC: ' in nextline:
wmacs[port] = nextline[5:-1].replace(":", "-").upper()
sys.stdout.write('MAC address %s read OK on port %s\n' % (nextline[5:-1], port))
mac_read = True
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or not mac_read:
working_threads[port] = None
def set_vdd_sdio_voltage(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'VDD_SDIO setting complete' in nextline:
sys.stdout.write('Board VDD_SDIO Voltage configured OK on port %s\n' % port)
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0:
working_threads[port] = None
def flash_firmware(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
num_hashes = 0
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'at 0x00001000' in nextline:
sys.stdout.write('Bootloader programmed OK on port %s\n' % port)
elif 'at 0x00008000' in nextline:
sys.stdout.write('Partition table programmed OK on port %s\n' % port)
elif 'at 0x00010000' in nextline:
sys.stdout.write('Application programmed OK on port %s\n' % port)
elif 'Hash of data verified' in nextline:
num_hashes += 1
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or num_hashes != 3:
working_threads[port] = None
def run_initial_test(port, board):
global working_threads
if board == 'LoPy':
import run_initial_lopy_test as run_test
elif board == 'LoPy4':
import run_initial_lopy4_test as run_test
elif board == 'SiPy':
import run_initial_sipy_test as run_test
else:
import run_initial_wipy_test as run_test
try:
if not run_test.test_board(port):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def flash_lpwan_mac(port, mac):
import flash_lpwan_mac
global working_threads
try:
if not flash_lpwan_mac.program_board(port, mac):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def run_final_test(port, board, mac):
if board == 'LoPy':
import run_final_lopy_test as run_test
elif board == 'LoPy4':
import run_final_lopy4_test as run_test
else:
import run_final_sipy_test as run_test
try:
if not run_test.test_board(port, mac, fw_version.number):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def run_qa_test(port, board):
global working_threads
if board == 'LoPy':
import run_qa_lopy_test as run_test
elif board == 'LoPy4':
import run_qa_lopy4_test as run_test
elif board == 'SiPy':
import run_qa_sipy_test as run_test
else:
import run_qa_wipy_test as run_test
try:
if not run_test.test_board(port, fw_version.number):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def main():
cmd_parser = argparse.ArgumentParser(description='Flash the ESP32 and optionally run a small test on it.')
cmd_parser.add_argument('--esptool', default=None, help='the path to the esptool')
cmd_parser.add_argument('--espefuse', default=None, help='the path to the espefuse')
cmd_parser.add_argument('--boot', default=None, help='the path to the bootloader binary')
cmd_parser.add_argument('--table', default=None, help='the path to the partitions table')
cmd_parser.add_argument('--app', default=None, help='the path to the application binary')
cmd_parser.add_argument('--macs', default="macs.db", help='the path to the MAC addresses database')
cmd_parser.add_argument('--ports', default=['/dev/ttyUSB0'], nargs='+', help="the serial ports of the ESP32's to program")
cmd_parser.add_argument('--erase', default=None, help='set to True to erase the boards first')
cmd_parser.add_argument('--qa', action='store_true', help='just do some quality asurance test')
cmd_parser.add_argument('--board', default='LoPy', help='identifies the board to be flashed and tested')
cmd_parser.add_argument('--revision', default='1', help='identifies the hardware revision')
cmd_args = cmd_parser.parse_args()
global working_threads
global wmacs
output = ""
ret = 0
global_ret = 0
if cmd_args.qa:
raw_input("Please reset all the boards, wait until the LED starts blinking and then press enter...")
time.sleep(2.5) # wait for the board to reset
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_qa_test, args=(port, cmd_args.board))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
for port in cmd_args.ports:
if working_threads[port] == None:
print("Failed QA test on board connected to %s" % port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("QA test succeeded on all boards:-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Some boards failed the QA test!")
print("=============================================================")
global_ret = 1
else:
print("Reading the WLAN MAC address...")
try:
for port in cmd_args.ports:
cmd = ['python', 'esptool.py', '--port', port, 'read_mac']
working_threads[port] = threading.Thread(target=read_wlan_mac, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error reading the WLAN MAC on the board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("WLAN MAC address reading succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: WLAN MAC address reading failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
if int(cmd_args.revision) > 1:
# program the efuse bits to set the VDD_SDIO voltage to 1.8V
try:
print('Configuring the VDD_SDIO voltage...')
for port in cmd_args.ports:
cmd = ['python', cmd_args.espefuse, '--port', port, '--do-not-confirm', 'set_flash_voltage', '1.8V']
working_threads[port] = threading.Thread(target=set_vdd_sdio_voltage, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error setting the VDD_SDIO voltage on the board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("VDD_SDIO voltage setting succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: VDD_SDIO voltage setting failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
time.sleep(1.0) # wait for the board to reset
working_threads = {}
if cmd_args.erase:
try:
print('Erasing flash memory... (will take a few seconds)')
for port in cmd_args.ports:
cmd = ['python', cmd_args.esptool, '--chip', 'esp32', '--port', port, '--baud', '921600',
'erase_flash']
working_threads[port] = threading.Thread(target=erase_flash, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error erasing board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch erasing succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch erasing failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
time.sleep(1.0) # wait for the board to reset
working_threads = {}
try:
if cmd_args.board == 'LoPy' or cmd_args.board == 'SiPy' or cmd_args.board == 'LoPy4':
open_macs_db(cmd_args.macs)
macs_list = fetch_MACs(len(cmd_args.ports))
if len(macs_list) < len(cmd_args.ports):
print("No enough remaining MAC addresses to use")
sys.exit(1)
mac_per_port = {}
i = 0
for port in cmd_args.ports:
mac_per_port[port] = macs_list[i]
i += 1
for port in cmd_args.ports:
cmd = ['python', cmd_args.esptool, '--chip', 'esp32', '--port', port, '--baud', '921600',
'write_flash', '-z', '--flash_mode', 'dio', '--flash_freq', '40m', '--flash_size', 'detect', '0x1000', cmd_args.boot,
'0x8000', cmd_args.table, '0x10000', cmd_args.app]
working_threads[port] = threading.Thread(target=flash_firmware, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error programming board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
else:
print("Board on port %s programmed OK" % port)
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch programming succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch firmware programming failed on some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please place all boards into run mode, RESET them and then \n press enter to continue with the testing process...")
time.sleep(5.0) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_initial_test, args=(port, cmd_args.board))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error testing board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
elif cmd_args.board == 'WiPy':
print("Batch test OK on port %s, firmware version %s" % (port, fw_version.number))
with open('%s_Flasher_Results.csv' % (cmd_args.board), 'ab') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['%s' % (cmd_args.board), '%s' % (fw_version.number), ' ', 'OK'])
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch testing succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch testing failed in some boards!")
print("=============================================================")
global_ret = 1
# only do the MAC programming and MAC verificacion for the LoPy, SiPy and LoPy4
if cmd_args.board == 'LoPy' or cmd_args.board == 'SiPy' or cmd_args.board == 'LoPy4':
print("Waiting before programming the LPWAN MAC address...")
time.sleep(3.5) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
set_mac_status(mac_per_port[port], "", DB_MAC_LOCK) # mark them as locked, so if the script fails and doesn't get to save, they wont be accidentally reused
working_threads[port] = threading.Thread(target=flash_lpwan_mac, args=(port, mac_per_port[port]))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error programing MAC address on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_ERROR)
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch MAC programming succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch MAC programming failed in some boards!")
print("=============================================================")
global_ret = 1
print("Waiting for the board(s) to reboot...")
time.sleep(4.5) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_final_test, args=(port, cmd_args.board, mac_per_port[port]))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
for port in cmd_args.ports:
if working_threads[port] == None:
ret = 1
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_ERROR)
print("Error performing MAC address test on port %s" % port)
else:
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_OK)
print("Final test OK on port %s, firmware version %s, MAC address %s" % (port, fw_version.number, mac_per_port[port]))
with open('%s_Flasher_Results.csv' % (cmd_args.board), 'ab') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['%s' % (cmd_args.board), '%s' % (fw_version.number), '%s' % (mac_per_port[port]), 'OK'])
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Final test succeeded on all boards :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Some boards failed the final test!")
print("=============================================================")
global_ret = 1
macs_db.close()
sys.exit(global_ret)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3281665
|
import asyncio
import numpy as np
import ucp.utils
async def worker(rank, eps, args):
futures = []
# Send my rank to all others
for ep in eps.values():
futures.append(ep.send(np.array([rank], dtype="u4")))
# Recv from all others
recv_list = []
for ep in eps.values():
recv_list.append(np.empty(1, dtype="u4"))
futures.append(ep.recv(recv_list[-1]))
await asyncio.gather(*futures)
# We expect to get the sum of all ranks excluding ours
expect = sum(range(len(eps) + 1)) - rank
got = np.concatenate(recv_list).sum()
assert expect == got
def test_all_comm(n_workers=4):
ucp.utils.run_on_local_network(n_workers, worker)
|
StarcoderdataPython
|
107093
|
<reponame>devolksbank/AWS-IoT-Fresh-Cloud-Coffee
import boto3
import logging
from time import time
from boto3.dynamodb.conditions import Key
statusTableName = "<insert-device-status-table-here>"
deviceIdTableName = "deviceIds"
snsTopic = "<insert-topic-arn-here>"
logger = logging.getLogger()
ddb = boto3.resource('dynamodb')
def sendPushNotification(message):
sns = boto3.client('sns')
return sns.publish(TopicArn=snsTopic,Message=message, MessageStructure='string')
def getDeviceStatus(deviceId):
statusTable = ddb.Table(statusTableName)
result = statusTable.query(KeyConditionExpression=Key('deviceID').eq(deviceId), ScanIndexForward=False)["Items"]
return(result[0]["status"] if result else "Working")
def getDeviceId(serialNumber):
table = ddb.Table('deviceIds')
result = table.get_item(Key = {'iot-serialNumber':serialNumber})
if 'Item' not in result:
logger.error("Device " + serialNumber + " could not be found.")
return
return(result['Item']['deviceId'])
def markAsDefect(deviceId):
table = ddb.Table(statusTableName)
item = {"deviceID":deviceId,
"timestamp":round(time()*1000),
"status":"Defect"}
return(table.put_item(Item=item))
def lambda_handler(event, context):
serialNumber = event['serialNumber']
deviceId = getDeviceId(serialNumber)
deviceStatus = getDeviceStatus(deviceId)
markAsDefect(deviceId)
if("Defect" not in deviceStatus):
sendPushNotification("Device " + str(deviceId) + " has been marked as defect.")
|
StarcoderdataPython
|
3353011
|
"""Define tests for the "Node" object."""
import tempfile
from unittest.mock import MagicMock, PropertyMock, mock_open
import aiohttp
import pytest
import smb
from pyairvisual import CloudAPI
from pyairvisual.errors import NodeProError
from pyairvisual.node import NodeSamba
from tests.async_mock import patch
from tests.common import (
TEST_API_KEY,
TEST_NODE_ID,
TEST_NODE_IP_ADDRESS,
TEST_NODE_PASSWORD,
load_fixture,
)
@pytest.mark.asyncio
async def test_node_by_id(aresponses):
"""Test getting a node's info by its ID from the cloud API."""
aresponses.add(
"www.airvisual.com",
"/api/v2/node/12345",
"get",
aresponses.Response(
text=load_fixture("node_by_id_response.json"),
headers={"Content-Type": "application/json"},
status=200,
),
)
async with aiohttp.ClientSession() as session:
cloud_api = CloudAPI(TEST_API_KEY, session=session)
data = await cloud_api.node.get_by_node_id(TEST_NODE_ID)
assert data["current"]["tp"] == 2.3
assert data["current"]["hm"] == 73
assert data["current"]["p2"] == 35
assert data["current"]["co"] == 479
@pytest.mark.asyncio
async def test_node_by_samba_connect_errors():
"""Test various errors arising during connection."""
node = NodeSamba(TEST_NODE_IP_ADDRESS, TEST_NODE_PASSWORD)
with patch(
"smb.SMBConnection.SMBConnection.connect",
side_effect=smb.base.NotReadyError,
):
with pytest.raises(NodeProError) as err:
await node.async_connect()
assert "The Node/Pro unit returned an error: " in str(err)
with patch(
"smb.SMBConnection.SMBConnection.connect",
side_effect=smb.base.SMBTimeout,
):
with pytest.raises(NodeProError) as err:
await node.async_connect()
assert "Timed out while talking to the Node/Pro unit" in str(err)
with patch(
"smb.SMBConnection.SMBConnection.connect",
side_effect=ConnectionRefusedError,
):
with pytest.raises(NodeProError) as err:
await node.async_connect()
assert "Couldn't find a Node/Pro unit at IP address: 192.168.1.100" in str(err)
with patch(
"smb.SMBConnection.SMBConnection.connect",
return_value=False,
):
with pytest.raises(NodeProError) as err:
await node.async_connect()
assert "No data or results returned" in str(err)
@pytest.mark.asyncio
async def test_node_by_samba_dict_response():
"""Test getting a node's info over the local network (via Samba) – dict variant."""
# Mock the tempfile that current measurements get loaded into:
measurements_response = load_fixture("node_measurements_samba_dict_response.json")
mock_measurements_tmp_file = MagicMock()
mock_measurements_tmp_file.read.return_value = measurements_response.encode()
# Mock the history file that SMBConnection returns:
mock_history_tmp_file = MagicMock()
type(mock_history_tmp_file).name = PropertyMock(
return_value="202003_AirVisual_values.txt"
)
# Mock the tempfile that history data gets loaded into:
mock_history_file = MagicMock()
type(mock_history_file).filename = PropertyMock(
return_value="202003_AirVisual_values.txt"
)
# Mock opening the history file into a CSV reader:
mop = mock_open(read_data=load_fixture("node_history_samba_response.txt"))
mop.return_value.__iter__ = lambda self: self
mop.return_value.__next__ = lambda self: next(iter(self.readline, ""))
with patch.object(
tempfile,
"NamedTemporaryFile",
side_effect=[mock_measurements_tmp_file, mock_history_tmp_file],
), patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.listPath", return_value=[mock_history_file]
), patch(
"smb.SMBConnection.SMBConnection.retrieveFile",
), patch(
"smb.SMBConnection.SMBConnection.close"
), patch(
"builtins.open", mop
):
async with NodeSamba(TEST_NODE_IP_ADDRESS, TEST_NODE_PASSWORD) as node:
measurements = await node.async_get_latest_measurements()
history = await node.async_get_history()
assert measurements["last_measurement_timestamp"] == 1584204767
assert measurements["measurements"]["co2"] == "442"
assert measurements["measurements"]["humidity"] == "35"
assert measurements["measurements"]["pm0_1"] == "3"
assert measurements["measurements"]["pm1_0"] == "4"
assert measurements["measurements"]["aqi_cn"] == "6"
assert measurements["measurements"]["aqi_us"] == "17"
assert measurements["measurements"]["pm2_5"] == "4.0"
assert measurements["measurements"]["temperature_C"] == "19.3"
assert measurements["measurements"]["temperature_F"] == "66.8"
assert measurements["measurements"]["voc"] == "-1"
assert len(history["measurements"]) == 7
assert history["trends"] == {
"aqi_cn": "decreasing",
"aqi_us": "decreasing",
"co2": "decreasing",
"humidity": "increasing",
"pm0_1": "decreasing",
"pm1_0": "decreasing",
"pm2_5": "decreasing",
"voc": "flat",
}
@pytest.mark.asyncio
async def test_node_by_samba_fewer_trend_measurements():
"""Test getting a node's trends with a configured number of measurements."""
# Mock the tempfile that current measurements get loaded into:
measurements_response = load_fixture("node_measurements_samba_list_response.json")
mock_measurements_tmp_file = MagicMock()
mock_measurements_tmp_file.read.return_value = measurements_response.encode()
# Mock the history file that SMBConnection returns:
mock_history_tmp_file = MagicMock()
type(mock_history_tmp_file).name = PropertyMock(
return_value="202003_AirVisual_values.txt"
)
# Mock the tempfile that history data gets loaded into:
mock_history_file = MagicMock()
type(mock_history_file).filename = PropertyMock(
return_value="202003_AirVisual_values.txt"
)
# Mock opening the history file into a CSV reader:
mop = mock_open(read_data=load_fixture("node_history_samba_response.txt"))
mop.return_value.__iter__ = lambda self: self
mop.return_value.__next__ = lambda self: next(iter(self.readline, ""))
with patch.object(
tempfile,
"NamedTemporaryFile",
side_effect=[mock_measurements_tmp_file, mock_history_tmp_file],
), patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.listPath", return_value=[mock_history_file]
), patch(
"smb.SMBConnection.SMBConnection.retrieveFile",
), patch(
"smb.SMBConnection.SMBConnection.close"
), patch(
"builtins.open", mop
):
async with NodeSamba(TEST_NODE_IP_ADDRESS, TEST_NODE_PASSWORD) as node:
history = await node.async_get_history(measurements_to_use=3)
assert history["trends"] == {
"aqi_cn": "flat",
"aqi_us": "flat",
"co2": "decreasing",
"humidity": "decreasing",
"pm0_1": "flat",
"pm1_0": "decreasing",
"pm2_5": "flat",
"voc": "flat",
}
@pytest.mark.asyncio
async def test_node_by_samba_get_file_errors():
"""Test various errors arising while getting a file via Samba."""
node = NodeSamba(TEST_NODE_IP_ADDRESS, TEST_NODE_PASSWORD)
with patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.retrieveFile",
side_effect=smb.base.NotConnectedError,
):
with pytest.raises(NodeProError):
await node.async_connect()
_ = await node.async_get_latest_measurements()
with patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.retrieveFile",
side_effect=smb.base.SMBTimeout,
):
with pytest.raises(NodeProError):
await node.async_connect()
_ = await node.async_get_latest_measurements()
with patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.retrieveFile",
side_effect=smb.smb_structs.UnsupportedFeature,
):
with pytest.raises(NodeProError):
await node.async_connect()
_ = await node.async_get_latest_measurements()
with patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.retrieveFile",
side_effect=smb.smb_structs.ProtocolError,
):
with pytest.raises(NodeProError):
await node.async_connect()
_ = await node.async_get_latest_measurements()
@pytest.mark.asyncio
async def test_node_by_samba_history_errors():
"""Test various errors arising while getting history."""
node = NodeSamba(TEST_NODE_IP_ADDRESS, TEST_NODE_PASSWORD)
with patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.listPath",
side_effect=smb.base.NotConnectedError,
):
with pytest.raises(NodeProError):
await node.async_connect()
_ = await node.async_get_history()
with patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.listPath",
side_effect=smb.base.SMBTimeout,
):
with pytest.raises(NodeProError):
await node.async_connect()
_ = await node.async_get_history()
with patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.listPath",
side_effect=smb.smb_structs.UnsupportedFeature,
):
with pytest.raises(NodeProError):
await node.async_connect()
_ = await node.async_get_history()
with patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.listPath",
side_effect=smb.smb_structs.ProtocolError,
):
with pytest.raises(NodeProError):
await node.async_connect()
_ = await node.async_get_history()
@pytest.mark.asyncio
async def test_node_by_samba_list_response():
"""Test getting a node's info over the local network (via Samba) – list variant."""
# Mock the tempfile that current measurements get loaded into:
measurements_response = load_fixture("node_measurements_samba_list_response.json")
mock_measurements_tmp_file = MagicMock()
mock_measurements_tmp_file.read.return_value = measurements_response.encode()
# Mock the history file that SMBConnection returns:
mock_history_tmp_file = MagicMock()
type(mock_history_tmp_file).name = PropertyMock(
return_value="202003_AirVisual_values.txt"
)
# Mock the tempfile that history data gets loaded into:
mock_history_file = MagicMock()
type(mock_history_file).filename = PropertyMock(
return_value="202003_AirVisual_values.txt"
)
# Mock opening the history file into a CSV reader:
mop = mock_open(read_data=load_fixture("node_history_samba_response.txt"))
mop.return_value.__iter__ = lambda self: self
mop.return_value.__next__ = lambda self: next(iter(self.readline, ""))
with patch.object(
tempfile,
"NamedTemporaryFile",
side_effect=[mock_measurements_tmp_file, mock_history_tmp_file],
), patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.listPath", return_value=[mock_history_file]
), patch(
"smb.SMBConnection.SMBConnection.retrieveFile",
), patch(
"smb.SMBConnection.SMBConnection.close"
), patch(
"builtins.open", mop
):
async with NodeSamba(TEST_NODE_IP_ADDRESS, TEST_NODE_PASSWORD) as node:
measurements = await node.async_get_latest_measurements()
history = await node.async_get_history()
assert measurements["last_measurement_timestamp"] == 1584204767
assert measurements["measurements"]["co2"] == "442"
assert measurements["measurements"]["humidity"] == "35"
assert measurements["measurements"]["pm0_1"] == "3"
assert measurements["measurements"]["pm1_0"] == "4"
assert measurements["measurements"]["aqi_cn"] == "6"
assert measurements["measurements"]["aqi_us"] == "17"
assert measurements["measurements"]["pm2_5"] == "4.0"
assert measurements["measurements"]["temperature_C"] == "19.3"
assert measurements["measurements"]["temperature_F"] == "66.8"
assert measurements["measurements"]["voc"] == "-1"
assert len(history["measurements"]) == 7
assert history["trends"] == {
"aqi_cn": "decreasing",
"aqi_us": "decreasing",
"co2": "decreasing",
"humidity": "increasing",
"pm0_1": "decreasing",
"pm1_0": "decreasing",
"pm2_5": "decreasing",
"voc": "flat",
}
@pytest.mark.asyncio
async def test_node_by_samba_no_history_files():
"""Test the Node/Pro not having any history files where expected."""
# Mock the tempfile that current measurements get loaded into:
measurements_response = load_fixture("node_measurements_samba_list_response.json")
mock_measurements_tmp_file = MagicMock()
mock_measurements_tmp_file.read.return_value = measurements_response.encode()
# Mock the history file that SMBConnection returns:
mock_history_tmp_file = MagicMock()
with patch.object(
tempfile,
"NamedTemporaryFile",
side_effect=[mock_measurements_tmp_file, mock_history_tmp_file],
), patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.retrieveFile",
), patch(
"smb.SMBConnection.SMBConnection.listPath", return_value=[]
), patch(
"smb.SMBConnection.SMBConnection.close"
):
with pytest.raises(NodeProError):
async with NodeSamba(TEST_NODE_IP_ADDRESS, TEST_NODE_PASSWORD) as node:
await node.async_get_history()
@pytest.mark.asyncio
async def test_node_by_samba_no_sensor_life_data():
"""Test a proper response when no sensor life values are returned."""
# Mock the tempfile that current measurements get loaded into:
measurements_response = load_fixture(
"node_measurements_samba_no_sensor_life_response.json"
)
mock_measurements_tmp_file = MagicMock()
mock_measurements_tmp_file.read.return_value = measurements_response.encode()
# Mock the history file that SMBConnection returns:
mock_history_tmp_file = MagicMock()
type(mock_history_tmp_file).name = PropertyMock(
return_value="202003_AirVisual_values.txt"
)
# Mock the tempfile that history data gets loaded into:
mock_history_file = MagicMock()
type(mock_history_file).filename = PropertyMock(
return_value="202003_AirVisual_values.txt"
)
# Mock opening the history file into a CSV reader:
mop = mock_open(read_data=load_fixture("node_history_samba_response.txt"))
mop.return_value.__iter__ = lambda self: self
mop.return_value.__next__ = lambda self: next(iter(self.readline, ""))
with patch.object(
tempfile,
"NamedTemporaryFile",
side_effect=[mock_measurements_tmp_file, mock_history_tmp_file],
), patch("smb.SMBConnection.SMBConnection.connect"), patch(
"smb.SMBConnection.SMBConnection.listPath", return_value=[mock_history_file]
), patch(
"smb.SMBConnection.SMBConnection.retrieveFile",
), patch(
"smb.SMBConnection.SMBConnection.close"
), patch(
"builtins.open", mop
):
async with NodeSamba(TEST_NODE_IP_ADDRESS, TEST_NODE_PASSWORD) as node:
measurements = await node.async_get_latest_measurements()
assert measurements["status"]["sensor_life"] == {}
|
StarcoderdataPython
|
4814733
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Pre-renders templates (for both collaborative filtering and content-based use)
for all products and saves them in the database.
**Command-line parameters**
*environment*
The intended environment, as defined in mongoid.yml.
*product_ids*
A list of comma-separated product ids to be processed.
If not informed, all products in the product_product_strengths_window will be considered.
If --all, all products (unfiltered) will be considered.
**Examples of usage**
``python consolidate_product_templates.py development``
``python consolidate_product_templates.py development 1111,22,333,444``
"""
import sys
import traceback
from time import time
from barbante.maintenance.template_consolidation import consolidate_product_templates
import barbante.utils.logging as barbante_logging
from barbante.context.context_manager import new_context
from barbante.context import init_session
log = barbante_logging.get_logger(__name__)
def main(argv):
if len(argv) < 1:
msg = "You must specify the environment"
log.error(msg)
return {"success": False, "message": msg}
try:
# command-line arguments
env = argv[0]
session = init_session(env)
product_ids = None
if len(argv) >= 2:
product_ids = argv[1]
if product_ids != "--all":
product_ids = argv[1].split(",")
timestamp = session.get_present_date()
start = time()
latest_run = session.data_proxy.fetch_latest_batch_info_product_template_consolidation()
if latest_run:
if latest_run.get("status") == "running":
msg = "An old consolidation batch is still running. Won't start another one."
log.info(msg)
return {"success": False, "message": msg}
session.data_proxy.save_timestamp_product_template_consolidation(
status="running", timestamp=timestamp)
consolidate_product_templates(session, product_ids)
session.data_proxy.ensure_indexes_cache()
elapsed_time = time() - start
session.data_proxy.save_timestamp_product_template_consolidation(
status="success", timestamp=timestamp, elapsed_time=elapsed_time)
return {"success": True}
except Exception:
log.exception('Exception on {0}:'.format(__name__))
session.data_proxy.save_timestamp_product_template_consolidation(
status="failed", timestamp=timestamp)
return {"success": False, "message": traceback.format_exc()}
if __name__ == '__main__':
with new_context():
print(main(sys.argv[1:]))
|
StarcoderdataPython
|
1707198
|
<reponame>dimartinot/P3-Collaborative<gh_stars>1-10
import numpy as np
import random
import copy
from collections import namedtuple, deque
from model import Actor, Critic
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 256 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 3e-3 # for soft update of target parameters
LR_ACTOR = 1e-3 # learning rate of the actor
LR_CRITIC = 3e-3 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
EPSILON = 1.0 # explore->exploit noise process added to act step
EPSILON_DECAY = 0.999 # decay rate for noise process
UPDATE_NUM = 3 # How many times a learning phase is run
MIN_EP_FOR_LEARNING = 300 # Minimum episode to run learning procedure
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class MADDPG(object):
'''Wrapper class for multi-agents DDPG'''
def __init__(self, state_size, action_size, num_agents, random_seed):
self.state_size = state_size
self.action_size = action_size
self.num_agents = num_agents
self.full_action_size = self.action_size*self.num_agents
# Common replay buffer for both agents: sampling will be done through MADDPG, that will then transmit to agents the sampled experiences
self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, random_seed) # Replay memory
# Array of agents
self.maddpg_agents = [
Agent(state_size, action_size, state_size*num_agents, action_size*num_agents, num_agents, random_seed)
for _ in range(num_agents)
] #create agents
self.episodes_before_training = MIN_EP_FOR_LEARNING
self.random_seed = random_seed
def reset(self):
for agent in self.maddpg_agents:
agent.reset()
def step(self, states, actions, rewards, next_states, dones, i_episode):
"""Save experience in replay memory, and use random sample from buffer to learn."""
full_states = np.reshape(states, newshape=(-1))
full_next_states = np.reshape(next_states, newshape=(-1))
# Save experience / reward
# "state", "action", "full_state","full_action","reward", "next_state","next_full_state","done"
self.memory.add(states, actions, full_states, rewards, next_states, full_next_states, dones)
# Learn, if enough samples are available in memory
if len(self.memory) > BATCH_SIZE and i_episode > self.episodes_before_training:
for _ in range(UPDATE_NUM): #learn multiple times at every step
for agent_no in range(self.num_agents):
experiences = self.memory.sample()
self.learn(experiences, agent_no)
self.soft_update_all()
def soft_update_all(self):
#soft update all the agents
for agent in self.maddpg_agents:
agent.soft_update_all()
def learn(self, experiences, agent_no, gamma=GAMMA):
#for learning MADDPG
states, actions, full_states, rewards, next_states, next_full_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models of each agent
target_next_actions = torch.zeros(states.shape[:2] + (self.action_size,), dtype=torch.float, device=device)
for agent_id, agent in enumerate(self.maddpg_agents):
agent_next_state = next_states[:,agent_id,:]
target_next_actions[:,agent_id,:] = agent.actor_target.forward(agent_next_state)
target_next_actions = target_next_actions.view(-1, self.full_action_size)
# Retrieves current agent
agent = self.maddpg_agents[agent_no]
agent_state = states[:,agent_no,:]
# Update current agent action with its actor local action prediction
actor_full_actions = actions.clone()
actor_full_actions[:,agent_no,:] = agent.actor_local.forward(agent_state)
actor_full_actions = actor_full_actions.view(-1, self.full_action_size)
# Reshape actions
full_actions = actions.view(-1,self.full_action_size)
rewards = rewards[:,agent_no].view(-1,1)
dones = dones[:,agent_no].view(-1,1)
experiences = (full_states, actor_full_actions, full_actions, rewards, \
dones, next_full_states, target_next_actions)
agent.learn(experiences, gamma)
def act(self, full_states, i_episode, add_noise=True):
# all actions between -1 and 1
actions = []
for agent_id, agent in enumerate(self.maddpg_agents):
action = agent.act(np.reshape(full_states[agent_id,:], newshape=(1,-1)), i_episode, add_noise)
action = np.reshape(action, newshape=(1,-1))
actions.append(action)
actions = np.concatenate(actions, axis=0)
return actions
def save_maddpg(self):
for agent_id, agent in enumerate(self.maddpg_agents):
torch.save(agent.actor_local.state_dict(), 'checkpoint_actor_local_' + str(agent_id) + '.pth')
torch.save(agent.critic_local.state_dict(), 'checkpoint_critic_local_' + str(agent_id) + '.pth')
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, per_agent_state_size, per_agent_action_size,
full_state_size, full_action_size, num_agents,
random_seed):
"""Initialize an Agent object.
Params
======
per_agent_state_size (int): dimension of each state for one agent
per_agent_action_size (int): dimension of each action for one action
full_state_size (int): dimension of each state for all agent
full_action_size (int): dimension of each action for all agent
num_agents (int) : number of agents
random_seed (int): random seed
"""
self.per_agent_state_size = per_agent_state_size
self.per_agent_action_size = per_agent_action_size
self.seed = random.seed(random_seed)
self.epsilon = EPSILON
self.num_agents = num_agents
# Initializes actor's local and target network + uniformise parameters between networks
self.actor_local = Actor(per_agent_state_size, per_agent_action_size, random_seed).to(device)
self.actor_target = Actor(per_agent_state_size, per_agent_action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
self.hard_update(self.actor_target, self.actor_local)
# Critic Network (w/ Target Network)
self.critic_local = Critic(full_state_size, full_action_size, random_seed).to(device)
self.critic_target = Critic(full_state_size, full_action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
self.hard_update(self.critic_target, self.critic_local)
def act(self, state, i_episode, add_noise=True):
"""Returns actions for given state as per current policy."""
if self.epsilon > 0.1:
self.epsilon = EPSILON_DECAY**(i_episode-MIN_EP_FOR_LEARNING)
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.epsilon*0.5*np.random.standard_normal(self.per_agent_action_size)
return np.clip(action, -1, 1)
def reset(self):
pass
def soft_update_all(self):
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
def learn(self, experiences, gamma):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
full_states, actor_full_actions, full_actions, rewards, \
dones, next_full_states, target_next_actions = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from MADDPG class
Q_targets_next = self.critic_target(next_full_states, target_next_actions)
# Compute Q targets for current states (y_i)
sum_rewards = rewards.sum(1, keepdim=True)
Q_targets = sum_rewards + (gamma * Q_targets_next * (1 - dones.max(1, keepdim=True)[0]))
# Compute critic loss
#actions = actions.view(actions.shape[0], -1)
Q_expected = self.critic_local(full_states, full_actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
actor_loss = -self.critic_local(full_states, actor_full_actions).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
def hard_update(self, target, source):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(source_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.memory = deque(maxlen=buffer_size) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "full_state","reward", "next_state","next_full_state","done"])
self.seed = random.seed(seed)
def add(self, state, action, full_state, reward, next_state, next_full_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, full_state, reward, next_state, next_full_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.array([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.array([e.action for e in experiences if e is not None])).float().to(device)
full_states = torch.from_numpy(np.vstack([e.full_state for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.array([e.next_state for e in experiences if e is not None])).float().to(device)
next_full_states = torch.from_numpy(np.vstack([e.next_full_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, full_states, rewards, next_states, next_full_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
|
StarcoderdataPython
|
4829457
|
<filename>qutip/tomography.py
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
__all__ = ['qpt_plot', 'qpt_plot_combined', 'qpt']
from qutip.tensor import tensor
from qutip.superoperator import spre, spost, mat2vec, vec2mat
from numpy import hstack, real, imag
import scipy.linalg as la
from qutip.visualization import matrix_histogram, matrix_histogram_complex
try:
import matplotlib.pyplot as plt
except:
pass
def _index_permutations(size_list, perm=[]):
"""
Generate a list with all index permutations.
Parameters
----------
size_list : list
A list that contains the sizes for each composite system.
perm : list
A list of permutations
Returns
-------
perm_idx : list
List containing index permutations.
"""
if len(size_list) == 0:
yield perm
else:
for n in range(size_list[0]):
for ip in _index_permutations(size_list[1:], perm + [n]):
yield ip
def qpt_plot(chi, lbls_list, title=None, fig=None, axes=None):
"""
Visualize the quantum process tomography chi matrix. Plot the real and
imaginary parts separately.
Parameters
----------
chi : array
Input QPT chi matrix.
lbls_list : list
List of labels for QPT plot axes.
title : string
Plot title.
fig : figure instance
User defined figure instance used for generating QPT plot.
axes : list of figure axis instance
User defined figure axis instance (list of two axes) used for
generating QPT plot.
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if axes is None or len(axes) != 2:
if fig is None:
fig = plt.figure(figsize=(16, 8))
ax1 = fig.add_subplot(1, 2, 1, projection='3d', position=[0, 0, 1, 1])
ax2 = fig.add_subplot(1, 2, 2, projection='3d', position=[0, 0, 1, 1])
axes = [ax1, ax2]
xlabels = []
for inds in _index_permutations([len(lbls) for lbls in lbls_list]):
xlabels.append("".join([lbls_list[k][inds[k]]
for k in range(len(lbls_list))]))
matrix_histogram(real(chi), xlabels, xlabels,
title=r"real($\chi$)", limits=[-1, 1], ax=axes[0])
matrix_histogram(imag(chi), xlabels, xlabels,
title=r"imag($\chi$)", limits=[-1, 1], ax=axes[1])
if title and fig:
fig.suptitle(title)
return fig, axes
def qpt_plot_combined(chi, lbls_list, title=None,
fig=None, ax=None, figsize=(8, 6),
threshold=None):
"""
Visualize the quantum process tomography chi matrix. Plot bars with
height and color corresponding to the absolute value and phase,
respectively.
Parameters
----------
chi : array
Input QPT chi matrix.
lbls_list : list
List of labels for QPT plot axes.
title : string
Plot title.
fig : figure instance
User defined figure instance used for generating QPT plot.
ax : figure axis instance
User defined figure axis instance used for generating QPT plot
(alternative to the fig argument).
threshold: float (None)
Threshold for when bars of smaller height should be transparent. If
not set, all bars are colored according to the color map.
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if ax is None:
if fig is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1, projection='3d', position=[0, 0, 1, 1])
xlabels = []
for inds in _index_permutations([len(lbls) for lbls in lbls_list]):
xlabels.append("".join(
[lbls_list[k][inds[k]] for k in range(len(lbls_list))]))
if not title:
title = r"$\chi$"
matrix_histogram_complex(chi, xlabels, xlabels, title=title, ax=ax,
threshold=threshold)
return fig, ax
def qpt(U, op_basis_list):
"""
Calculate the quantum process tomography chi matrix for a given (possibly
nonunitary) transformation matrix U, which transforms a density matrix in
vector form according to:
vec(rho) = U * vec(rho0)
or
rho = vec2mat(U * mat2vec(rho0))
U can be calculated for an open quantum system using the QuTiP propagator
function.
Parameters
----------
U : Qobj
Transformation operator. Can be calculated using QuTiP propagator
function.
op_basis_list : list
A list of Qobj's representing the basis states.
Returns
-------
chi : array
QPT chi matrix
"""
E_ops = []
# loop over all index permutations
for inds in _index_permutations([len(ops) for ops in op_basis_list]):
# loop over all composite systems
E_op_list = [op_basis_list[k][inds[k]] for k in range(len(
op_basis_list))]
E_ops.append(tensor(E_op_list))
EE_ops = [spre(E1) * spost(E2.dag()) for E1 in E_ops for E2 in E_ops]
M = hstack([mat2vec(EE.full()) for EE in EE_ops])
Uvec = mat2vec(U.full())
chi_vec = la.solve(M, Uvec)
return vec2mat(chi_vec)
|
StarcoderdataPython
|
1670594
|
<gh_stars>0
#define a dictionary data structure
#dictionaries have key: value for the elements
example_dict = {
'class' : 'Astr 119',
'prof' : 'Brant',
'awesomeness' : 10
}
print(type(example_dict)) #gives data type
#get a value via key
course = example_dict['class'] #assign a variable call values with []
print(course) #print 'Astr 119'
#change a value via key
example_dict['awesomeness'] += 1 #increment by 1
#since the key is a int
#symbols for int work
#print the dictionary
print(example_dict)
#print dictionary element by element
for x in example_dict.keys(): #only interested in keys
print(x,example_dict[x]) #loop through them
#also print values x is paired with
|
StarcoderdataPython
|
3300129
|
class Solution:
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
ans = []
def backtrack(nums, subset, ans):
if len(subset) == len(nums):
ans.append(subset)
return
for i in range(len(nums)):
# cannot reuse same element
# input does not contain duplicates, but the same number cannot appear twice
if nums[i] in subset:
continue
backtrack(nums, subset + [nums[i]], ans)
backtrack(nums, [], ans)
return ans
solver = Solution()
ans = solver.permute([1,2,3])
print(ans)
|
StarcoderdataPython
|
1767243
|
from django import forms
from django.contrib.auth import get_user_model
from .send import send_templated_mail
from . import models
class MailForm(forms.Form):
template_name = forms.CharField()
users = forms.ModelMultipleChoiceField(
queryset=get_user_model().objects.all(),
widget=forms.CheckboxSelectMultiple
)
def clean_template_name(self):
template_name = self.cleaned_data['template_name']
try:
models.EmailTemplate.objects.get(template_name=template_name)
except models.EmailTemplate.DoesNotExist:
raise forms.ValidationError("Template with the name {} not found".format(template_name))
return template_name
def save(self):
template_name = self.cleaned_data['template_name']
users = self.cleaned_data['users']
count = 0
for user in users:
send_templated_mail(
template_name,
[user.email],
context = dict(
user = user
),
)
count += 1
return count
|
StarcoderdataPython
|
3354856
|
<gh_stars>1-10
"""
Copy the output to clipboard as a list or single string.
this is a handy way of pasting your newly found subdomains
into other tools that does not accept file input.
"""
import pyperclip
def clipboard_output(subdomain_list, output_style):
"""Copy to clipboard, 's' for string and 'l' for list."""
if output_style == 's':
pyperclip.copy(' '.join(subdomain_list))
elif output_style == 'l':
pyperclip.copy('\n'.join(subdomain_list))
|
StarcoderdataPython
|
39038
|
<reponame>guillaume-martin/exercism<filename>python/rna-transcription/rna_transcription.py
def to_rna(dna_strand):
pairs = {'G':'C','C':'G','T':'A','A':'U'}
return ''.join(pairs[n] for n in dna_strand)
|
StarcoderdataPython
|
162339
|
from abc import ABCMeta, abstractmethod
from liteflow.core.builders import WorkflowBuilder
class Workflow(metaclass=ABCMeta):
@property
@abstractmethod
def id(self):
return None
@property
@abstractmethod
def version(self):
return 1
@abstractmethod
def build(self, builder: WorkflowBuilder):
raise NotImplementedError
|
StarcoderdataPython
|
4812000
|
#programa de alistamento Militar
from datetime import date
atual = date.today().year
ano = int(input('Digite o Ano de Nascimento: '))
idade = atual - ano
print(f'Você tem {idade} anos')
if idade < 18:
falta = 18 - idade
print('Voce ainda não tem idade de se Alistar')
print(f'Falta {falta} anos')
elif idade == 18:
print('Esta na hora de se Alistar')
else:
passou = idade - 18
print('Ja passou da Hora de se Alistar')
print(f'Ja se passaram {passou} anos')
|
StarcoderdataPython
|
3248079
|
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["SmartCapabilities"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class SmartCapabilities:
"""
SmartCapabilities
Codes that define what the server is capable of.
Status: draft - Version: 4.0.1
Copyright None
http://terminology.hl7.org/CodeSystem/smart-capabilities
"""
launch_ehr = CodeSystemConcept(
{
"code": "launch-ehr",
"definition": "support for SMART’s EHR Launch mode.",
"display": "EHR Launch Mode",
}
)
"""
EHR Launch Mode
support for SMART’s EHR Launch mode.
"""
launch_standalone = CodeSystemConcept(
{
"code": "launch-standalone",
"definition": "support for SMART’s Standalone Launch mode.",
"display": "Standalone Launch Mode",
}
)
"""
Standalone Launch Mode
support for SMART’s Standalone Launch mode.
"""
client_public = CodeSystemConcept(
{
"code": "client-public",
"definition": "support for SMART’s public client profile (no client authentication).",
"display": "Public Client Profile",
}
)
"""
Public Client Profile
support for SMART’s public client profile (no client authentication).
"""
client_confidential_symmetric = CodeSystemConcept(
{
"code": "client-confidential-symmetric",
"definition": "support for SMART’s confidential client profile (symmetric client secret authentication).",
"display": "Confidential Client Profile",
}
)
"""
Confidential Client Profile
support for SMART’s confidential client profile (symmetric client secret authentication).
"""
sso_openid_connect = CodeSystemConcept(
{
"code": "sso-openid-connect",
"definition": "support for SMART’s OpenID Connect profile.",
"display": "Supports OpenID Connect",
}
)
"""
Supports OpenID Connect
support for SMART’s OpenID Connect profile.
"""
context_passthrough_banner = CodeSystemConcept(
{
"code": "context-passthrough-banner",
"definition": "support for “need patient banner” launch context (conveyed via need_patient_banner token parameter).",
"display": 'Allows "Need Patient Banner"',
}
)
"""
Allows "Need Patient Banner"
support for “need patient banner” launch context (conveyed via need_patient_banner token parameter).
"""
context_passthrough_style = CodeSystemConcept(
{
"code": "context-passthrough-style",
"definition": "support for “SMART style URL” launch context (conveyed via smart_style_url token parameter).",
"display": 'Allows "Smart Style Style"',
}
)
"""
Allows "Smart Style Style"
support for “SMART style URL” launch context (conveyed via smart_style_url token parameter).
"""
context_ehr_patient = CodeSystemConcept(
{
"code": "context-ehr-patient",
"definition": "support for patient-level launch context (requested by launch/patient scope, conveyed via patient token parameter).",
"display": 'Allows "Patient Level Launch Context (EHR)"',
}
)
"""
Allows "Patient Level Launch Context (EHR)"
support for patient-level launch context (requested by launch/patient scope, conveyed via patient token parameter).
"""
context_ehr_encounter = CodeSystemConcept(
{
"code": "context-ehr-encounter",
"definition": "support for encounter-level launch context (requested by launch/encounter scope, conveyed via encounter token parameter).",
"display": 'Allows "Encounter Level Launch Context (EHR)"',
}
)
"""
Allows "Encounter Level Launch Context (EHR)"
support for encounter-level launch context (requested by launch/encounter scope, conveyed via encounter token parameter).
"""
context_standalone_patient = CodeSystemConcept(
{
"code": "context-standalone-patient",
"definition": "support for patient-level launch context (requested by launch/patient scope, conveyed via patient token parameter).",
"display": 'Allows "Patient Level Launch Context (STANDALONE)"',
}
)
"""
Allows "Patient Level Launch Context (STANDALONE)"
support for patient-level launch context (requested by launch/patient scope, conveyed via patient token parameter).
"""
context_standalone_encounter = CodeSystemConcept(
{
"code": "context-standalone-encounter",
"definition": "support for encounter-level launch context (requested by launch/encounter scope, conveyed via encounter token parameter).",
"display": 'Allows "Encounter Level Launch Context (STANDALONE)"',
}
)
"""
Allows "Encounter Level Launch Context (STANDALONE)"
support for encounter-level launch context (requested by launch/encounter scope, conveyed via encounter token parameter).
"""
permission_offline = CodeSystemConcept(
{
"code": "permission-offline",
"definition": "support for refresh tokens (requested by offline_access scope).",
"display": "Supports Refresh Token",
}
)
"""
Supports Refresh Token
support for refresh tokens (requested by offline_access scope).
"""
permission_patient = CodeSystemConcept(
{
"code": "permission-patient",
"definition": "support for patient-level scopes (e.g. patient/Observation.read).",
"display": "Supports Patient Level Scopes",
}
)
"""
Supports Patient Level Scopes
support for patient-level scopes (e.g. patient/Observation.read).
"""
permission_user = CodeSystemConcept(
{
"code": "permission-user",
"definition": "support for user-level scopes (e.g. user/Appointment.read).",
"display": "Supports User Level Scopes",
}
)
"""
Supports User Level Scopes
support for user-level scopes (e.g. user/Appointment.read).
"""
class Meta:
resource = _resource
|
StarcoderdataPython
|
10498
|
<reponame>gchiesa/cfmacro<filename>cfmacro/_resources/examples/lambda.py
# -*- coding: utf-8 -*-
from cfmacro.processors import SgProcessor
from cfmacro.core.engine import ProcessorEngine
from cfmacro.core.template import TemplateProcessor
def lambda_handler(event, context):
"""
Implement a core handler for security groups ingress / egress
:param event:
:param context:
:return:
"""
print(f'event received: {event}')
processor_engine = ProcessorEngine()
processor_engine.register_processor(SgProcessor)
template_processor = TemplateProcessor(processor_engine)
result = template_processor.process(fragment=event['fragment'],
template_params=event['templateParameterValues']).to_dict()
print(f'event processed. Result: \n{result}')
return {
"requestId": event['requestId'],
"status": "success",
"fragment": result
}
|
StarcoderdataPython
|
83080
|
<filename>test/unit/test_config.py
import os
import unittest
import yaml
import dbt.config
if os.name == 'nt':
TMPDIR = 'c:/Windows/TEMP'
else:
TMPDIR = '/tmp'
class ConfigTest(unittest.TestCase):
def set_up_empty_config(self):
profiles_path = '{}/profiles.yml'.format(TMPDIR)
with open(profiles_path, 'w') as f:
f.write(yaml.dump({}))
def set_up_config_options(self, **kwargs):
profiles_path = '{}/profiles.yml'.format(TMPDIR)
config = {
'config': kwargs
}
with open(profiles_path, 'w') as f:
f.write(yaml.dump(config))
def tearDown(self):
profiles_path = '{}/profiles.yml'.format(TMPDIR)
try:
os.remove(profiles_path)
except:
pass
def test__implicit_opt_in(self):
self.set_up_empty_config()
config = dbt.config.read_config(TMPDIR)
self.assertTrue(dbt.config.send_anonymous_usage_stats(config))
def test__explicit_opt_out(self):
self.set_up_config_options(send_anonymous_usage_stats=False)
config = dbt.config.read_config(TMPDIR)
self.assertFalse(dbt.config.send_anonymous_usage_stats(config))
def test__explicit_opt_in(self):
self.set_up_config_options(send_anonymous_usage_stats=True)
config = dbt.config.read_config(TMPDIR)
self.assertTrue(dbt.config.send_anonymous_usage_stats(config))
def test__implicit_colors(self):
self.set_up_empty_config()
config = dbt.config.read_config(TMPDIR)
self.assertTrue(dbt.config.colorize_output(config))
def test__explicit_opt_out(self):
self.set_up_config_options(use_colors=False)
config = dbt.config.read_config(TMPDIR)
self.assertFalse(dbt.config.colorize_output(config))
def test__explicit_opt_in(self):
self.set_up_config_options(use_colors=True)
config = dbt.config.read_config(TMPDIR)
self.assertTrue(dbt.config.colorize_output(config))
|
StarcoderdataPython
|
3310147
|
<filename>python/maheen_code/pascal_3d.py
#
import os;
import scipy;
import mat4py;
from scipy import misc;
import scipy.io
import visualize;
import numpy as np;
import glob
import script_nearestNeigbourExperiment
import cPickle as pickle;
import matplotlib.pyplot as plt;
from pascal3d_db import Pascal3D, Pascal3D_Manipulator
import time;
def script_savePerClassPerDegreeHistograms():
train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
non_train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
dirs=[dir[:-7] for dir in os.listdir('/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations') if dir.endswith('_pascal')];
layers=['pool5','fc6','fc7'];
degrees=[0,45,90,135,180];
# degrees=[90];
# dirs=['train']
delta=5;
for file_pre in [train_pre,non_train_pre]:
for layer in layers:
curr_dir=os.path.join(file_pre+'_'+layer+'_all_azimuths');
for dir in dirs:
print dir
curr_file=os.path.join(curr_dir,dir+'_data.p');
[diffs_curr,dists_curr]=pickle.load(open(curr_file,'rb'));
for degree in degrees:
title=dir+' '+str(degree)+' delta '+str(delta)
out_file=os.path.join(curr_dir,dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p')
print out_file
try:
hists,bins=getDistanceHistograms(diffs_curr,degree,delta=delta,normed=True,bins=10);
pickle.dump([hists,bins],open(out_file,'wb'));
out_file=os.path.join(curr_dir,dir+'_'+str(degree)+'_'+str(delta)+'_non_compress_data.p')
hists,bins=getDistanceHistograms(diffs_curr,degree,dists_curr=dists_curr,delta=delta,normed=True,bins=10);
pickle.dump([hists,bins],open(out_file,'wb'));
except:
print 'error'
print out_file
# out_file=os.path.join(curr_dir,dir+'_'+str(degree)+'_'+str(delta)+'_compress.png');
# # print out_file
# try:
# visualize.plotDistanceHistograms(diffs_curr,degree,out_file,title=title,delta=delta,dists_curr=None,bins=10,normed=True)
# out_file=os.path.join(curr_dir,dir+'_'+str(degree)+'_'+str(delta)+'_non_compress.png');
# visualize.plotDistanceHistograms(diffs_curr,degree,out_file,title=title,delta=delta,dists_curr=dists_curr,bins=10,normed=True)
# except:
# print 'error'
# print out_file
def getDistanceHistograms(diffs_curr,degree,delta=0,dists_curr=None,bins=10,normed=False):
if dists_curr is None:
dists_curr=np.array(range(1,diffs_curr.shape[1]+1));
dists_curr=np.expand_dims(dists_curr,0);
dists_curr=np.repeat(dists_curr,diffs_curr.shape[0],0);
diffs=diffs_curr-degree;
diffs=abs(diffs);
idx=np.where(diffs<=delta)
dists=dists_curr[idx[0],idx[1]];
hist,bin_edges=np.histogram(dists, bins=bins, normed=normed)
return hist,bin_edges
def script_createHistDifferenceHTML():
out_dir_meta='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d';
train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
non_train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
dirs=[dir[:-7] for dir in os.listdir('/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations') if dir.endswith('_pascal')];
layers=['pool5','fc6','fc7'];
degrees=[0,45,90,135,180];
delta=5;
caption_text=['Trained','Not Trained'];
replace=[out_dir_meta+'/',''];
degree=90;
for layer in layers:
out_file_html=os.path.join(out_dir_meta,layer+'_all_azimuths'+'.html')
img_paths=[];
caption_paths=[];
for dir in dirs:
img_paths_row=[];
caption_paths_row=[];
for idx,file_pre in enumerate([train_pre,non_train_pre]):
curr_dir=os.path.join(file_pre+'_'+layer+'_all_azimuths');
im_file=os.path.join(curr_dir,dir+'_'+str(degree)+'_'+str(delta)+'_compress.png');
img_paths_row.append(im_file.replace(replace[0],replace[1]));
caption_paths_row.append(caption_text[idx]+' '+layer+' '+dir);
img_paths.append(img_paths_row);
caption_paths.append(caption_paths_row);
visualize.writeHTML(out_file_html,img_paths,caption_paths,height=400,width=400);
print out_file_html
def script_createHistsWithSpecificAngleHtml():
out_dir_meta='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d';
train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
non_train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
dirs=[dir[:-7] for dir in os.listdir('/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations') if dir.endswith('_pascal')];
layers=['pool5','fc6','fc7'];
deg_to_see=0;
degree=90;
delta=5;
out_file_html=os.path.join('/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d','hist_angle_restrict_'+str(deg_to_see)+'_'+str(degree)+'_comparison_non_compress.html');
replace=['/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/',''];
img_paths=[];
captions=[];
for dir in dirs:
for layer in layers:
single_row=[];
single_row_caption=[];
for caption_curr,file_pre in [('Trained',train_pre),('Not trained',non_train_pre)]:
curr_dir=file_pre+'_'+layer+'_all_azimuths'
img_path=os.path.join(curr_dir,dir+'_'+str(deg_to_see)+'_'+str(degree)+'_'+str(delta)+'_non_compress.png');
img_path=img_path.replace(replace[0],replace[1]);
single_row.append(img_path);
single_row_caption.append(caption_curr+' '+dir+' '+layer);
img_paths.append(single_row);
captions.append(single_row_caption);
visualize.writeHTML(out_file_html,img_paths,captions,height=300,width=400)
def script_createHistsWithSpecificAngle():
out_dir_meta='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d';
train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
non_train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
dirs=[dir[:-7] for dir in os.listdir('/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations') if dir.endswith('_pascal')];
layers=['pool5','fc6','fc7'];
deg_to_see=0;
degree=90;
delta=5;
for file_pre in [train_pre,non_train_pre]:
azimuth_file=file_pre+'_azimuths.p';
for dir in dirs:
# dir='car';
for layer in layers:
# ='fc7';
curr_dir=file_pre+'_'+layer+'_all_azimuths'
class_data_file=os.path.join(curr_dir,dir+'_data.p');
[img_paths,gt_labels,azimuths]=pickle.load(open(azimuth_file,'rb'));
[diffs_all,dists_all]=pickle.load(open(class_data_file,'rb'));
idx=np.array(gt_labels);
idx=np.where(idx==dirs.index(dir))[0];
azimuths_rel=np.array(azimuths);
azimuths_rel=azimuths_rel[idx];
idx_deg=np.where(azimuths_rel==deg_to_see)[0];
diffs_curr=diffs_all[idx_deg,:];
dists_curr=dists_all[idx_deg,:]
print diffs_curr.shape
print dists_curr.shape
out_file=os.path.join(curr_dir,dir+'_'+str(deg_to_see)+'_'+str(degree)+'_'+str(delta)+'_compress.png');
# # print out_file
title=dir+' with angle '+str(deg_to_see)+' with '+str(degree)+' difference'
visualize.plotDistanceHistograms(diffs_curr,degree,out_file,title=title,delta=delta,dists_curr=None,bins=10,normed=True)
hists,bin_edges=getDistanceHistograms(diffs_curr,degree,delta=delta,dists_curr=None,bins=10,normed=True);
pickle.dump([hists,bin_edges],open(out_file[:-2],'wb'));
out_file=os.path.join(curr_dir,dir+'_'+str(deg_to_see)+'_'+str(degree)+'_'+str(delta)+'_non_compress.png');
visualize.plotDistanceHistograms(diffs_curr,degree,out_file,title=title,delta=delta,dists_curr=dists_curr,bins=10,normed=True)
hists,bin_edges=getDistanceHistograms(diffs_curr,degree,delta=delta,dists_curr=dists_curr,bins=10,normed=True);
pickle.dump([hists,bin_edges],open(out_file[:-2],'wb'));
def script_createHistComparative():
out_dir_meta='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d';
train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
non_train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
dirs=[dir[:-7] for dir in os.listdir('/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations') if dir.endswith('_pascal')];
layers=['pool5','fc6','fc7'];
delta=5;
caption_text=['Trained','Not Trained'];
replace=[out_dir_meta+'/',''];
degree=90;
deg_to_see=0;
# train_files=[os.path.join(train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p') for layer in layers for dir in dirs];
# non_train_files=[os.path.join(non_train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p') for layer in layers for dir in dirs];
# for idx in range(len(train_files)):
combos=[(dir,layer) for dir in dirs for layer in layers];
out_file_html=os.path.join(out_dir_meta,'hist_by_degree_'+str(degree)+'_comparisons_compress.html');
img_paths=[];
captions=[];
for dir,layer in combos:
file_train=os.path.join(train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p');
# train_files[idx];
file_non_train=os.path.join(non_train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p');
# non_train_files[idx];
hists_train,bins_train=pickle.load(open(file_train,'rb'));
hists_non_train,bins_non_train=pickle.load(open(file_non_train,'rb'));
mid_points_train=[bins_train[i]+bins_train[i+1]/float(2) for i in range(len(bins_train)-1)];
mid_points_non_train=[bins_non_train[i]+bins_non_train[i+1]/float(2) for i in range(len(bins_non_train)-1)];
# dir=file_train[file_train.rindex('/')+1:];
# dir=dir[:dir.index('_')];
out_file_just_file=layer+'_'+dir+'_'+str(degree)+'_'+str(delta)+'.png'
out_file=os.path.join(out_dir_meta,out_file_just_file)
title=dir+' Comparison';
xlabel='Distance Rank';
ylabel='Frequency';
# print out_file
img_paths.append([out_file_just_file]);
captions.append([dir+' '+layer]);
visualize.plotSimple(zip([mid_points_train,mid_points_non_train],[hists_train,hists_non_train]),out_file,title=title,xlabel=xlabel,ylabel=ylabel,legend_entries=['Trained','Non Trained'],loc=0);
print out_file_html
visualize.writeHTML(out_file_html,img_paths,captions,width=400,height=400);
# return
# for layer in layers:
# out_file_html=os.path.join(out_dir_meta,layer+'_all_azimuths'+'.html')
# img_paths=[];
# caption_paths=[];
# for dir in dirs:
# img_paths_row=[];
# caption_paths_row=[];
# for idx,file_pre in enumerate([train_pre,non_train_pre]):
# curr_dir=os.path.join(file_pre+'_'+layer+'_all_azimuths');
# im_file=os.path.join(curr_dir,dir+'_'+str(degree)+'_'+str(delta)+'_compress.png');
# img_paths_row.append(im_file.replace(replace[0],replace[1]));
# caption_paths_row.append(caption_text[idx]+' '+layer+' '+dir);
# img_paths.append(img_paths_row);
# caption_paths.append(caption_paths_row);
# visualize.writeHTML(out_file_html,img_paths,caption_paths,height=400,width=400);
# print out_file_html
def saveBBImages(path_to_im,path_to_anno,file_names,out_dir):
curr_dict={};
for idx_file_name,file_name in enumerate(file_names):
curr_dict=scipy.io.loadmat(os.path.join(path_to_anno,file_name+'.mat'),squeeze_me=True, struct_as_record=False);
im=misc.imread(os.path.join(path_to_im,file_name+'.jpg'));
objects=curr_dict['record'].objects
if not hasattr(objects, '__iter__'):
objects=[objects]
for idx,object_curr in enumerate(objects):
if object_curr.viewpoint != []:
curr_class= object_curr.__dict__['class']
bbs_curr= object_curr.bbox
bbs_curr=[b-1 for b in bbs_curr];
im_curr=im[bbs_curr[1]:bbs_curr[3],bbs_curr[0]:bbs_curr[2],:];
file_name_out=file_name[file_name.rindex('/')+1:]
out_file=os.path.join(out_dir,file_name_out+'_'+curr_class+'_'+str(idx)+'.jpg');
misc.imsave(out_file,im_curr);
def script_saveAzimuthInfo(file_name,path_to_anno):
test_set,_=pickle.load(open(file_name+'.p','rb'));
test_set=sorted(test_set,key=lambda x: x[0])
test_set=zip(*test_set);
img_paths=list(test_set[0]);
gt_labels=list(test_set[1]);
azimuths=[];
for idx_img_path,img_path in enumerate(img_paths):
if idx_img_path%10==0:
print idx_img_path,len(img_paths);
image_name=img_path[img_path.rindex('/')+1:-4];
image_name_split=image_name.split('_');
image_name_pre=image_name_split[0]+'_'+image_name_split[1];
pascal_class=image_name_split[2];
obj_index=int(image_name_split[3]);
mat_file=os.path.join(os.path.join(path_to_anno,pascal_class+'_pascal'),image_name_pre+'.mat')
try:
azimuth=getCoarseAzimuth(mat_file,obj_index)
except:
print 'error'
azimuth=-1;
azimuths.append(azimuth);
pickle.dump([img_paths,gt_labels,azimuths],open(file_name+'_azimuths.p','wb'));
def script_saveIndicesAll(file_name,layers):
test_set,_=pickle.load(open(file_name+'.p','rb'));
vals=np.load(file_name+'.npz');
test_set=sorted(test_set,key=lambda x: x[0])
test_set=zip(*test_set);
img_paths=list(test_set[0]);
gt_labels=list(test_set[1]);
numberOfN=None;
for layer in layers:
print layer
file_name_l=file_name+'_'+layer+'_distances_all';
# indices,conf_matrix=script_nearestNeigbourExperiment.doNN(img_paths,gt_labels,vals[layer],numberOfN=numberOfN,distance='cosine',algo='brute')
# pickle.dump([img_paths,gt_labels,indices,conf_matrix],open(file_name_l+'.p','wb'));
indices,distances=script_nearestNeigbourExperiment.doNN(img_paths,gt_labels,vals[layer],numberOfN=numberOfN,distance='cosine',algo='brute',distances_return=True)
pickle.dump([img_paths,gt_labels,indices,distances],open(file_name_l+'.p','wb'));
def script_populateDB(db_file,file_pre,trainFlag,layers,class_ids,caffe_model):
exception_bool_all=[];
mani=Pascal3D_Manipulator('sqlite:////'+db_file);
mani.openSession();
for layer in layers:
exception_bool=[];
file_name_l=file_pre+'_'+layer+'_distances_all.p';
print file_name_l
t=time.time();
[img_paths,gt_labels,indices,distances]=pickle.load(open(file_name_l,'rb'));
# img_paths=range(10000);
print time.time()-t
for idx in range(len(img_paths)):
if idx%100==0:
print idx
nn_timestamp=file_pre[file_pre.rindex('/')+1:]
# print 'idx',idx
# print 'img_paths[idx]',img_paths[idx]
# print 'layer',layer
# print 'nn_timestamp',nn_timestamp
# print 'class_ids[gt_labels[idx]]',class_ids[gt_labels[idx]]
# print 'gt_labels[idx]',gt_labels[idx]
# print 'caffe_model',caffe_model
# print 'indices[idx]',indices[idx].shape
# print 'distances[idx]',distances[idx].shape
# print 'trainFlag',trainFlag
# break;
# break;
# try:
mani.insert(idx,img_paths[idx],layer,nn_timestamp,class_ids[gt_labels[idx]],gt_labels[idx],caffe_model, neighbor_index=indices[idx],neighbor_distance=distances[idx],trainedClass=trainFlag)
# mani.insert();
# raise Exception('fun');
# except:
# exception_bool.append(idx);
# print idx,'EXCEPTION!'
exception_bool_all.append(exception_bool);
mani.closeSession();
return exception_bool_all
def getCoarseAzimuth(file_name,obj_index):
curr_dict=scipy.io.loadmat(file_name,squeeze_me=True, struct_as_record=False);
objects=curr_dict['record'].objects
if not hasattr(objects, '__iter__'):
objects=[objects]
assert len(objects)>obj_index;
obj=objects[obj_index];
assert 'viewpoint' in obj.__dict__.keys();
assert 'azimuth_coarse' in obj.viewpoint.__dict__.keys();
return obj.viewpoint.azimuth_coarse
def script_compareAzimuth():
path_to_anno='/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations/chair_pascal';
im_dir='/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB';
out_file_html='/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB/chair_angle_check.html'
anno_files=[os.path.join(path_to_anno,file_name) for file_name in os.listdir(path_to_anno) if file_name.endswith('.mat')];
list_of_chairs=[];
for anno_file in anno_files:
just_file=anno_file[anno_file.rindex('/')+1:];
just_file=just_file[:-4];
curr_dict=scipy.io.loadmat(anno_file,squeeze_me=True, struct_as_record=False);
objects=curr_dict['record'].objects
if not hasattr(objects, '__iter__'):
objects=[objects]
for idx,obj in enumerate(objects):
if obj.__dict__['class']=='chair':
im_file=os.path.join(im_dir,just_file+'_chair_'+str(idx)+'.jpg');
list_of_chairs.append((im_file,obj.viewpoint.azimuth_coarse));
angles=list(zip(*list_of_chairs)[1]);
images=list(zip(*list_of_chairs)[0]);
angles=np.array(angles)
angles_uni=np.unique(angles);
col_im=[];
col_caption=[];
for angle_uni in angles_uni:
idx_uni=np.where(angles==angle_uni)[0];
row_im_curr=[];
row_caption_curr=[];
for idx_curr in range(min(5,len(idx_uni))):
idx_im=idx_uni[idx_curr]
image_just_name=images[idx_im]
image_just_name=image_just_name[image_just_name.rindex('/')+1:];
row_im_curr.append(image_just_name);
row_caption_curr.append(str(angle_uni));
col_im.append(row_im_curr);
col_caption.append(row_caption_curr);
print col_im[:5];
print col_caption[:5];
visualize.writeHTML(out_file_html,col_im,col_caption)
def script_createComparativeHtmls():
layers=['pool5','fc6','fc7'];
path_to_anno='/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations';
file_dir='/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB';
dirs=[dir[:-7] for dir in os.listdir(path_to_anno) if dir.endswith('pascal')];
file_name='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
file_name_alt='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
replace_paths=['/disk2','../../../..']
out_file_pre='nn_performance_comparison_trained_notrained'
out_file_pre=os.path.join('/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d',out_file_pre);
for layer in layers:
file_name_l=file_name+'_'+layer;
[img_paths,gt_labels,indices,_]=pickle.load(open(file_name_l+'.p','rb'));
idx_sort_binned=script_nearestNeigbourExperiment.sortByPerformance(indices,gt_labels,1,perClass=True);
img_paths=[x.replace(replace_paths[0],replace_paths[1]) for x in img_paths];
im_paths,captions=visualize.createImageAndCaptionGrid(img_paths,gt_labels,indices,dirs)
file_name_l=file_name_alt+'_'+layer;
[img_paths_alt,gt_labels_alt,indices,_]=pickle.load(open(file_name_l+'.p','rb'));
img_paths_alt=[x.replace(replace_paths[0],replace_paths[1]) for x in img_paths_alt];
im_paths_alt,captions_alt=visualize.createImageAndCaptionGrid(img_paths,gt_labels,indices,dirs)
im_paths_alt=[im_paths_alt[img_paths_alt.index(curr_img_path)] for curr_img_path in img_paths];
captions_alt=[captions_alt[img_paths_alt.index(curr_img_path)] for curr_img_path in img_paths];
im_paths_big=[];
captions_big=[];
for idx_curr in idx_sort_binned:
im_paths_big.append(im_paths[idx_curr]);
im_paths_big.append(im_paths_alt[idx_curr]);
captions_big.append(captions[idx_curr]);
captions_big.append(captions_alt[idx_curr]);
visualize.writeHTML(out_file_pre+'_'+layer+'.html',im_paths_big,captions_big)
def script_visualizePerformanceDifference():
trained_file='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
notrained_file='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
out_file='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/nn_performance_comparison_trained_notrained.png';
out_file_diff='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/nn_performance_comparison_trained_notrained_diff.png';
file_pres=[trained_file,notrained_file]
layers=['pool5','fc6','fc7'];
legend_pres=['Trained','Unfamiliar'];
legend_entries=[];
for leg in legend_pres:
legend_entries.extend([leg+' '+layer for layer in layers]);
vecs_to_plot=[];
file_names=[file_pre+'_'+layer for file_pre in file_pres for layer in layers];
for idx,file_name in enumerate(file_names):
print file_name,legend_entries[idx];
[img_paths,gt_labels,indices,_]=pickle.load(open(file_name+'.p','rb'));
no_correct,_=script_nearestNeigbourExperiment.getNumberOfCorrectNNMatches(indices,gt_labels)
with open(file_name+'.txt','wb') as f:
for num in no_correct:
f.write(str(num)+' ');
f.write('\n');
# with open(file_name+'.txt','rb') as f:
# no_correct=f.readline();
# no_correct=[float(no_correct_curr) for no_correct_curr in no_correct.strip('\n').split()];
vecs_to_plot.append(no_correct);
# print legend_entries;
print vecs_to_plot;
# print len(vecs_to_plot);
# print len(legend_entries);
plt.figure();
plt.xlabel('Number of Nearest Neighbours K');
plt.ylabel('Accuracy');
plt.title('NN Accuracy DNN Features for Pascal Classes');
plt.xlim(0,6);
plt.ylim(min([min(vec) for vec in vecs_to_plot])-0.05,max([max(vec) for vec in vecs_to_plot])+0.05);
handles=[];
for vec in vecs_to_plot:
handle,=plt.plot(range(1,len(vec)+1),vec);
handles.append(handle);
plt.legend(handles, legend_entries,loc=2,prop={'size':10});
plt.savefig(out_file);
legend_entries=['Trained-Untrained '+layer for layer in layers];
diffs=[];
for idx in range(3):
a=vecs_to_plot[idx];
b=vecs_to_plot[idx+3];
diff=[a[idx_curr]-b[idx_curr] for idx_curr in range(len(a))];
diffs.append(diff);
vecs_to_plot=diffs;
plt.figure();
plt.xlabel('Number of Nearest Neighbours K');
plt.ylabel('Accuracy Difference');
plt.title('NN Accuracy DNN Features for Pascal Classes');
plt.xlim(0,6);
plt.ylim(min([min(vec) for vec in vecs_to_plot])-0.01,max([max(vec) for vec in vecs_to_plot])+0.01);
handles=[];
for vec in vecs_to_plot:
handle,=plt.plot(range(1,len(vec)+1),vec);
handles.append(handle);
plt.legend(handles, legend_entries,loc=2,prop={'size':10});
plt.savefig(out_file_diff);
def getAzimuthInfo(im_paths,gt_labels,indices,azimuth):
diffs_all=[];
dists_all=[];
for r in range(indices.shape[0]):
gt_label=gt_labels[r];
gt_azi=azimuth[r];
diffs=[];
dists=[];
for c in range(indices.shape[1]):
pred_idx=indices[r,c];
pred_label=gt_labels[pred_idx]
pred_azi=azimuth[pred_idx];
if pred_label==gt_label:
diff=abs(gt_azi-pred_azi);
if diff>180:
diff=(360-diff)%180;
diffs.append(diff);
dists.append(c);
diffs_all.append(diffs);
dists_all.append(dists);
return diffs_all,dists_all;
def getPerClassInfo(gt_labels,diffs_all,dists_all):
gt_labels=np.array(gt_labels);
gt_labels_uni=np.unique(gt_labels);
diffs_dists_by_label=[];
for gt_label in gt_labels_uni:
idx_gt=np.where(gt_labels==gt_label)[0];
diffs_curr=[diffs_all[idx] for idx in idx_gt];
dists_curr=[dists_all[idx] for idx in idx_gt];
diffs_curr=np.array(diffs_curr);
diffs_curr=diffs_curr[:,:-1];
dists_curr=np.array(dists_curr);
dists_curr=dists_curr[:,:-1];
curr_combo=(diffs_curr,dists_curr);
diffs_dists_by_label.append(curr_combo);
return diffs_dists_by_label,gt_labels_uni
def script_visualizePerClassAzimuthPerformance():
train_file='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
non_train_file='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
dirs=[dir[:-7] for dir in os.listdir('/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations') if dir.endswith('_pascal')];
file_pres=[train_file,non_train_file];
layers=['pool5','fc6','fc7'];
for file_name in file_pres:
for layer in layers:
in_file=file_name+'_'+layer+'_all_azimuths.p';
print in_file
t=time.time();
[img_paths,gt_labels,azimuths,diffs_all,dists_all]=pickle.load(open(in_file,'rb'));
t=time.time()-t;
# print t;
diffs_dists_by_label,gt_labels_uni=getPerClassInfo(gt_labels,diffs_all,dists_all);
out_dir=in_file[:-2];
if not os.path.exists(out_dir):
os.mkdir(out_dir);
for idx_gt_label,gt_label in enumerate(gt_labels_uni):
diffs_curr,dists_curr=diffs_dists_by_label[idx_gt_label];
out_file=os.path.join(out_dir,dirs[gt_label]+'_data.p');
pickle.dump([diffs_curr,dists_curr],open(out_file,'wb'));
title=dirs[gt_label]+' Distances Versus Viewpoint Difference'
xlabel='Distance'
ylabel='Viewpoint Difference in Degree'
out_file=os.path.join(out_dir,dirs[gt_label]+'_compress.png');
visualize.createScatterOfDiffsAndDistances(diffs_curr,title,xlabel,ylabel,out_file);
out_file=os.path.join(out_dir,dirs[gt_label]+'_non_compress.png');
visualize.createScatterOfDiffsAndDistances(diffs_curr,title,xlabel,ylabel,out_file,dists_curr);
def script_saveNNDistances(file_name,layers):
test_set,_=pickle.load(open(file_name+'.p','rb'));
vals=np.load(file_name+'.npz');
test_set=sorted(test_set,key=lambda x: x[0])
test_set=zip(*test_set);
img_paths=list(test_set[0]);
gt_labels=list(test_set[1]);
numberOfN=None;
for layer in layers:
file_name_l=file_name+'_'+layer+'_all_distances';
indices,conf_matrix=script_nearestNeigbourExperiment.doNN(img_paths,gt_labels,vals[layer],numberOfN=numberOfN,distance='cosine',algo='brute',conf_matrix=False,distances=True)
pickle.dump([img_paths,gt_labels,indices,conf_matrix],open(file_name_l+'.p','wb'));
def script_testingDoNN():
img_paths=[];
numberOfSamples=100;
featureVectorLength=10;
numberOfClasses=5;
gt_labels=np.random.random_integers(0,numberOfClasses-1,(numberOfSamples,));
features_curr=np.random.random_integers(-100,100,(numberOfSamples,featureVectorLength));
features_curr=np.array(features_curr,dtype=float);
numberOfN=5;
indices=script_nearestNeigbourExperiment.doNN(img_paths,gt_labels,features_curr,numberOfN=numberOfN)
print indices.shape==(numberOfSamples,numberOfN);
numberOfN=None
indices=script_nearestNeigbourExperiment.doNN(img_paths,gt_labels,features_curr,numberOfN=numberOfN)
print indices.shape==(numberOfSamples,numberOfSamples);
indices,conf_matrix=script_nearestNeigbourExperiment.doNN(img_paths,gt_labels,features_curr,numberOfN=numberOfN,conf_matrix_return=True)
print conf_matrix.shape==(numberOfClasses,numberOfClasses)
indices,distances=script_nearestNeigbourExperiment.doNN(img_paths,gt_labels,features_curr,numberOfN=numberOfN,conf_matrix_return=False,distances_return=True)
print distances.shape==indices.shape==(numberOfSamples,numberOfSamples);
indices,conf_matrix,distances=script_nearestNeigbourExperiment.doNN(img_paths,gt_labels,features_curr,numberOfN=numberOfN,conf_matrix_return=True,distances_return=True)
print conf_matrix.shape==(numberOfClasses,numberOfClasses)
print distances.shape==indices.shape==(numberOfSamples,numberOfSamples);
plt.figure();
plt.plot(distances[0]);
plt.plot(distances[numberOfSamples-1]);
plt.savefig('/disk2/temp/checkNN.png');
plt.close();
def main():
train_file='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
non_train_file='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
layers=['pool5','fc6','fc7'];
out_dir='/disk2/octoberExperiments/nn_pascal3d'
db_file=os.path.join(out_dir,'nn_pascal3d_new.db');
if not os.path.exists(out_dir):
os.mkdir(out_dir);
path_to_anno='/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations';
class_ids=[dir[:-7] for dir in os.listdir(path_to_anno) if dir.endswith('pascal')];
file_pre=train_file
trainFlag=True
caffe_model='/home/maheenrashid/Downloads/caffe/caffe-rc2/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
# exceptions=script_populateDB(db_file,file_pre,trainFlag,layers,class_ids,caffe_model)
# for ex in exceptions:
# print len(ex)
# pickle.dump([exceptions],open(file_pre+'_db_exceptions.p','wb'));
file_pre=non_train_file
trainFlag=False
caffe_model='/disk2/octoberExperiments/nn_performance_without_pascal/snapshot_iter_450000.caffemodel'
script_populateDB(db_file,file_pre,trainFlag,layers,class_ids,caffe_model)
return
script_saveIndicesAll(train_file,layers);
script_saveIndicesAll(non_train_file,layers);
# script_testingDoNN();
return
script_createHistComparative();
# script_createHistsWithSpecificAngle()
return
script_createHistComparative()
# script_createHistDifferenceHTML()
# script_savePerClassPerDegreeHistograms()
return
script_visualizePerClassAzimuthPerformance();
return
train_file='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
non_train_file='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
layers=['pool5','fc6','fc7'];
path_to_anno='/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations'
for file_name in [train_file,non_train_file]:
[img_paths,gt_labels,azimuths]=pickle.load(open(file_name+'_azimuths.p','rb'));
for layer in layers:
print layer;
file_name_l=file_name+'_'+layer+'_all';
out_file=file_name_l+'_azimuths.p';
t=time.time()
[img_paths,gt_labels,indices,_]=pickle.load(open(file_name_l+'.p','rb'));
t=time.time()-t
print t
# raw_input();
diffs_all,dists_all=getAzimuthInfo(img_paths,gt_labels,indices,azimuths)
pickle.dump([img_paths,gt_labels,azimuths,diffs_all,dists_all],open(out_file,'wb'));
return
text_labels=[dir[:-7] for dir in os.listdir(path_to_anno) if dir.endswith('pascal')];
for file_name in [train_file,non_train_file]:
[img_paths,gt_labels,azimuths]=pickle.load(open(file_name+'_azimuths.p','rb'));
for layer in layers:
print layer;
file_name_l=file_name+'_'+layer+'_all';
out_dir=file_name_l+'_azimuths';
if not os.path.exists(out_dir):
os.mkdir(out_dir);
t=time.time()
[img_paths,gt_labels,indices,_]=pickle.load(open(file_name_l+'.p','rb'));
t=time.time()-t
print t
# raw_input();
createAzimuthGraphs(img_paths,gt_labels,indices,azimuths,out_dir,text_labels)
for layer in layers:
print layer
out_file='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/azimuths_'+layer+'_all'+'_comparison.html';
rel_train='trained/20151027204114_'+layer+'_all'+'_azimuths'
rel_notrain='no_trained/20151027203547_'+layer+'_all'+'_azimuths';
out_dir='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547_'+layer+'_all'+'_azimuths'
im_paths=[[os.path.join(rel_train,file_curr),os.path.join(rel_notrain,file_curr)] for file_curr in os.listdir(out_dir) if file_curr.endswith('.jpg')];
captions=[['train','no_train']]*len(im_paths);
visualize.writeHTML(out_file,im_paths,captions,height=500,width=500)
# script_saveAzimuthInfo(train_file,path_to_anno);
# script_saveAzimuthInfo(non_train_file,path_to_anno);
# script_saveIndicesAll(train_file,layers)
# script_saveIndicesAll(non_train_file,layers)
return
out_dir='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d';
if not os.path.exists(out_dir):
os.mkdir(out_dir);
# out_dir=os.path.join(out_dir,'no_trained');
out_dir=os.path.join(out_dir,'trained');
if not os.path.exists(out_dir):
os.mkdir(out_dir);
path_to_anno='/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations';
file_dir='/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB';
dirs=[dir[:-7] for dir in os.listdir(path_to_anno) if dir.endswith('pascal')];
test_set=[];
for dir_idx,dir in enumerate(dirs):
ims=[filename for filename in glob.glob(file_dir + '/*'+dir+'*.jpg')]
test_set.extend(zip(ims,[dir_idx]*len(ims)));
print len(test_set);
layers=['pool5','fc6','fc7'];
gpu_no=1
path_to_classify='..';
numberOfN=5
relativePaths=['/disk2','../../../../..'];
deployFile='/disk2/octoberExperiments/nn_performance_without_pascal/deploy.prototxt'
meanFile='/disk2/octoberExperiments/nn_performance_without_pascal/mean.npy'
modelFile='/disk2/octoberExperiments/nn_performance_without_pascal/snapshot_iter_450000.caffemodel'
# file_name=script_nearestNeigbourExperiment.runClassificationTestSet(test_set,out_dir,path_to_classify,gpu_no,layers,deployFile=deployFile,meanFile=meanFile,modelFile=modelFile,ext='jpg')
# file_name=script_nearestNeigbourExperiment.runClassificationTestSet(test_set,out_dir,path_to_classify,gpu_no,layers,ext='jpg')
file_name=os.path.join(out_dir,'20151027204114');
test_set,_=pickle.load(open(file_name+'.p','rb'));
vals=np.load(file_name+'.npz');
test_set=sorted(test_set,key=lambda x: x[0])
test_set=zip(*test_set);
img_paths=list(test_set[0]);
gt_labels=list(test_set[1]);
numberOfN=5;
# file_name_alt='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203004'
for layer in layers:
print layer
file_name_l=file_name+'_'+layer;
# indices,conf_matrix=script_nearestNeigbourExperiment.doNN(img_paths,gt_labels,vals[layer],numberOfN=numberOfN,distance='cosine',algo='brute')
# pickle.dump([img_paths,gt_labels,indices,conf_matrix],open(file_name_l+'.p','wb'));
[img_paths,gt_labels,indices,_]=pickle.load(open(file_name_l+'.p','rb'));
idx_sort_binned=script_nearestNeigbourExperiment.sortByPerformance(indices,gt_labels,0,perClass=True);
img_paths=[x.replace('/disk2','../../../../..') for x in img_paths];
im_paths,captions=visualize.createImageAndCaptionGrid(img_paths,gt_labels,indices,dirs)
im_paths=[im_paths[idx] for idx in idx_sort_binned];
captions=[captions[idx] for idx in idx_sort_binned];
visualize.writeHTML(file_name_l+'_sorted.html',im_paths,captions)
return
path_to_anno='/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations';
path_to_im='/disk2/pascal_3d/PASCAL3D+_release1.0/Images';
dirs=[dir for dir in os.listdir(path_to_anno) if dir.endswith('pascal')];
out_dir='/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB';
if not os.path.exists(out_dir):
os.mkdir(out_dir);
for dir in dirs:
file_names=[os.path.join(dir,file_name)[:-4] for file_name in os.listdir(os.path.join(path_to_im,dir)) if file_name.endswith('.jpg')];
saveBBImages(path_to_im,path_to_anno,file_names,out_dir);
if __name__=='__main__':
main();
|
StarcoderdataPython
|
1643546
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Data Source
import yfinance as yf
import time, datetime, math
from datetime import datetime
import sqlite3
con = sqlite3.connect("DB/stocks.db")
#con.row_factory = sqlite3.Row
stocks = ['UBER']
#data = pd.read_sql_query("select DISTINCT symbol FROM stocks_hist",con)
#stocks = data['symbol']
#['UBER','FLT.V','TSLA','eark.ne','rci-b.to','SNDL','PLTR','PBR',"AAL",'A','AAL','AAP','AAPL','ABBV','ABC','ABMD','ABT','ACN','ADBE','ADI','ADM','ADP','ADSK','AEE','AEP','AES','AFL','AIG','AIZ','AJG','AKAM','ALB','ALGN','ALK','ALL','ALLE','ALXN','AMAT','AMCR','AMD','AME','AMGN','AMP','AMT','AMZN','ANET','ANSS','ANTM','AON','AOS','APA','APD','APH','APTV','ARE','ATO','ATVI','AVB','AVGO','AVY','AWK','AXP','AZO','BA','BAC','BAX','BBY','BDX','BEN','BF-B','BIIB','BIO','BK','BKNG','BKR','BLK','BLL','BMY','BR','BRK-B','BSX','BWA','BXP','C','CAG','CAH','CARR','CAT','CB','CBOE','CBRE','CCI','CCL','CDNS','CDW','CE','CERN','CF','CFG','CHD','CHRW','CHTR','CI','CINF','CL','CLX','CMA','CMCSA','CME']
#,'FLT.V','TSLA','eark.ne','rci-b.to','BTC-USD'
##AMD AND INTEL DOING BEST
#Interval required 5 minutes
StartBal = 1000
Nshares = 0
sl = StartBal
buy = 0
RSIL = 0
b = 0
tv = 0
olddata = 0
per=[]
for stock in stocks:
data = pd.read_sql_query("SELECT * FROM stocks_hist WHERE symbol='" + stock + "' ORDER BY Datetime DESC limit 100 ",con,index_col='Datetime')
data.sort_index()
#print(data)
#data = yf.download(tickers=stock, period='5d', interval='1m',progress=False)
#RSI CALC
data['Return'] = np.log(data['Close'] / data['Close'].shift(1) )
data['Movement'] = data['Close'] - data['Close'].shift(1)
data['up'] = np.where((data['Movement'] > 0) ,data['Movement'],0)
data['down'] = np.where((data['Movement'] < 0) ,data['Movement'],0)
window_length = 14
#calculate moving average of the last 14 days gains
up = data['up'].rolling(window_length).mean()
#calculate moving average of the last 14 days losses
down = data['down'].abs().rolling(window_length).mean()
RS = up / down
data['RSI'] = 100.0 - (100.0 / (1.0 + RS))
#Bollinger bands, 1 std and 2 std
data['MA20'] = data['Close'].rolling(window=20).mean()
data['20dSTD'] = data['Close'].rolling(window=20).std()
data['Upper'] = data['MA20'] + (data['20dSTD'] * 2)
data['Lower'] = data['MA20'] - (data['20dSTD'] * 2)
data['Upper1s'] = data['MA20'] + (data['20dSTD'] * 1)
data['Lower1s'] = data['MA20'] - (data['20dSTD'] * 1)
data['LBPer']=(data['Close']/data['Lower'])-1
data['UBPer']=(data['Upper']/data['Close'])-1
data['UBPer1s']=(data['Close']/data['Upper1s'])-1
data['AD'] = 0
#ADL Line
data['CMFV'] = (((data['Close']-data['Low'])-(data['High']-data['Close']))/(data['High']-data['Low']))*data['Volume']
data['AD'] = data['CMFV'].rolling(14, min_periods=14).sum()
data['AD'] = data['AD'].shift(1)
#data.to_csv('csv/' + stock + '.csv')
#data = data[data.index.strftime('%Y-%m-%d') == '2021-02-17']
LastRSI = 0
LastLBPer = 10000000
LastUBPer = 10000000
LastClose =10000000
now = 0
#data=data.tail(n=1)
for index, row in data.iterrows():
timestr = '15:57:00'
now = index
if now != olddata:
current_time = now.strftime("%H:%M:%S")
ftr = [3600,60,1]
tv = sum([a*b for a,b in zip(ftr, map(int,timestr.split(':')))]) - sum([a*b for a,b in zip(ftr, map(int,current_time.split(':')))])
TStop = 0
if tv<0:
#determines if time is 3:57 and sells the position
TStop = 1
'''if buy == 0 and row['RSI'] < 10 and LastRSI < row['RSI'] and row['Close'] < row['Lower'] and LastClose < row['Close'] and row['AD'] < row['Close']:
Nshares = math.floor(StartBal / row['Close'])
StartBal -= row['Close'] * Nshares
buy = 1
b+=1
print(f"{index} - BOUGHT - at {row['Close']} - {Nshares} # of shares")
if buy == 1 and row['RSI'] > 70 and LastRSI > row['RSI'] and row['Close'] > LastUB and LastClose < row['Close'] and row['AD'] > row['Close']:
StartBal += row['Close'] * Nshares
Nshares = 0
buy = 0
print(f"{index} - SOLD - Balance {StartBal}")'''
if buy == 1 and row['RSI'] >= 80 and LastRSI > row['RSI'] and LastClose < row['Close'] and row['AD'] > row['Close']:
StartBal += row['Close'] * Nshares
Nshares = 0
buy = 0
per.append((((StartBal/sl)-1)*100))
#print(f"{index} - SOLD - Balance {StartBal} % Made {(((StartBal/sl)-1)*100)}")
'''if TStop == 1 and buy ==1:
StartBal += row['Close'] * Nshares
Nshares = 0
buy = 0
per.append((((StartBal/sl)-1)*100))
#print(f"{index} - SOLD - Balance {StartBal} % Made {(((StartBal/sl)-1)*100)}")'''
if buy == 0 and row['RSI'] <= 30 and LastRSI < row['RSI'] and LastClose < row['Close'] and row['AD'] < row['Close'] and row['Close']>LastLB and row['Close']>row['Lower'] and row['Close']<row['Lower1s']:
sl = StartBal
Nshares = math.floor(StartBal / row['Close'])
StartBal -= row['Close'] * Nshares
buy = 1
b+=1
#print(f"{index} - BOUGHT - at {row['Close']} - {Nshares} # of shares")
LastRSI = row['RSI']
LastClose = row['Close']
LastLB = row['Lower']
LastUB = row['Upper']
#print(f"stock {stock} time {now} - current price {LastClose} its this clsoe to low bol {row['UBPer']} beg {sl} end {StartBal} pending {buy} and tr {b}")
olddata = now
print(f"total transactions {b} and pending {buy}")
#time.sleep(1)
Lost = sum(map(lambda x: x< 0,per))
Won = sum(map(lambda x: x> 0,per))
perwon = (Won/len(per)*100)
print(perwon)
print(sum(per))
print(sl)
|
StarcoderdataPython
|
35037
|
<gh_stars>1-10
from setuptools import setup, find_packages
setup(
name = 'Flask-Digest',
version = '0.2.1',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/vctandrade/flask-digest',
description = 'A RESTful authentication service for Flask applications',
long_description = open('README.rst').read(),
license = 'MIT',
platforms = ['Platform Independent'],
install_requires = ['Flask >= 0.10.1'],
packages = find_packages(),
keywords = ['digest', 'authentication', 'flask'],
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation'
]
)
|
StarcoderdataPython
|
3304744
|
from ipykernel.kernelapp import IPKernelApp
from .kernel import YottaDBKernel
IPKernelApp.launch_instance(kernel_class=YottaDBKernel)
|
StarcoderdataPython
|
28715
|
import numpy as np
from napari_plugin_engine import napari_hook_implementation
from napari_tools_menu import register_function
from napari_time_slicer import time_slicer, slice_by_slice
import napari
from napari.types import ImageData, LabelsData
@napari_hook_implementation
def napari_experimental_provide_function():
return [
gaussian_blur,
threshold_otsu,
connected_component_labeling,
sobel_edge_detector,
binary_fill_holes,
seeded_watershed,
split_touching_objects,
euclidean_distance_map
]
@register_function(menu="Filtering / noise removal > Gaussian (n-mahotas)")
@time_slicer
def gaussian_blur(image:ImageData, sigma: float = 1, viewer: napari.Viewer = None) -> ImageData:
"""
Filters an image using a Gaussian kernel with a given sigma.
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.gaussian_filter
"""
import mahotas as mh
return mh.gaussian_filter(image, sigma)
def _8bit(image):
return (image / image.max() * 255).astype(np.uint8)
@register_function(menu="Segmentation / binarization > Threshold (Otsu et al 1979, n-mahotas)")
@time_slicer
def threshold_otsu(image:ImageData, viewer: napari.Viewer = None) -> LabelsData:
"""
Thresholds an image using Otsu's technique
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.otsu
"""
import mahotas as mh
image_8bit = _8bit(image)
t = mh.otsu(image_8bit)
return image_8bit > t
@register_function(menu="Segmentation / labeling > Connected component labeling (n-mahotas)")
@time_slicer
def connected_component_labeling(binary_image: LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Label connected regions in a binary image
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.label
"""
labeled, nr_objects = mh.label(binary_image)
return labeled
@register_function(menu="Filtering / edge enhancement > Sobel edge detection (slice-by-slice, n-mahotas)")
@time_slicer
def sobel_edge_detector(image:ImageData, viewer: napari.Viewer = None) -> ImageData:
"""
Enhances edges using a sobel operator
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.sobel
"""
import mahotas as mh
return mh.sobel(image, just_filter=True)
@register_function(menu="Segmentation post-processing > Binary fill holes (slice_by_slice, n-mahotas)")
@slice_by_slice
@time_slicer
def binary_fill_holes(binary_image:LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Fill holes in a binary image
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.close_holes
"""
import mahotas as mh
return mh.close_holes(binary_image)
@register_function(menu="Segmentation / labeling > Seeded watershed (n-mahotas)")
@time_slicer
def seeded_watershed(image:ImageData, labeled_seeds:LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Labels all pixels in an image by flooding intensity valleys in a given image starting from labeled region seeds.
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.cwatershed
"""
import mahotas as mh
labels = mh.cwatershed(image, labeled_seeds)
return labels
@register_function(menu="Measurement > Euclidean distance map (n-mahotas)")
@time_slicer
def euclidean_distance_map(binary_image:LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Draws a Euclidean distance map from a binary image. Non-zero values in th binary image will be
replaced by the distance to the next zero pixel.
See also
--------
..[0] https://en.wikipedia.org/wiki/Distance_transform
"""
import mahotas as mh
return mh.distance(binary_image)
def _sobel_3d(image):
from scipy import ndimage as ndi
kernel = np.asarray([
[
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], [
[0, 1, 0],
[1, -6, 1],
[0, 1, 0]
], [
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
]
])
return ndi.convolve(image, kernel)
@register_function(menu="Segmentation post-processing > Split touching objects (n-mahotas)")
@time_slicer
def split_touching_objects(binary:LabelsData, sigma:float=3.5, viewer: napari.Viewer = None) -> LabelsData:
"""
Takes a binary image and draws cuts in the objects similar to the ImageJ watershed algorithm.
See also
--------
.. [0] https://imagej.nih.gov/ij/docs/menus/process.html#watershed
"""
import mahotas as mh
binary = _8bit(np.asarray(binary))
# typical way of using scikit-image watershed
distance = mh.distance(binary)
blurred_distance = mh.gaussian_filter(distance, sigma=sigma)
fp = np.ones((3,) * binary.ndim)
markers, num_labels = mh.label(mh.regmax(blurred_distance, Bc=fp))
labels = mh.cwatershed(-blurred_distance, markers)
# identify label-cutting edges
if len(binary.shape) == 2:
edges = mh.sobel(labels, just_filter=True)
edges2 = mh.sobel(binary, just_filter=True)
else: # assuming 3D
edges = _sobel_3d(labels)
edges2 = _sobel_3d(binary)
almost = np.logical_not(np.logical_xor(edges != 0, edges2 != 0)) * binary
return mh.open(almost) != 0
|
StarcoderdataPython
|
1699985
|
<reponame>Nurul-GC/powfu_file_organizer
"""
This script is to install the program in Windows machine.
Its function is to create a new Folder in C:\\Program Files\\ and copy the program.
After this, create a new RegeditKey to help users to use the program by clicking on the right button.
"""
import os
import shutil
import time
# todo: python 3.7 and 3.8 doesn't have this module, please update!
import winreg
folder_name = "PowFu - File Organizer"
# don't worry, python don't need to use this slash '\'
# because the compiler (on windows) convert this '/' to this '\'
installation_destination = "C:/Program Files/"
full = os.path.join(installation_destination, folder_name)
def copyFiles(file, my_path):
"""
This is the function that move the files into their apropriete folder.
It also replaces duplicated files.
Parametres :
File - The file to be copied as a string type.
destination - The name of the folder where the file must be copied, as string.
Return : This function does not return any value.
"""
source = os.path.join(os.getcwd(), my_path, file)
try:
if file not in os.listdir(full):
shutil.copy(source, full)
except BaseException as error:
print(f"Something went wrong with {error}")
def createFolder():
try:
if folder_name not in os.listdir(installation_destination):
os.makedirs(full)
else:
pass
except BaseException as error:
# be careful about the variable's name
print(f"Something went wrong with {error}")
def create_root_key():
try:
new_key = winreg.OpenKeyEx(winreg.HKEY_CLASSES_ROOT, "Directory/Background/shell", 0, winreg.KEY_ALL_ACCESS)
key = winreg.CreateKey(new_key, r"powfu")
winreg.SetValue(key, "", winreg.REG_SZ, "Organize with PowFu - File Organizer")
sub_key = winreg.CreateKey(key, r"command")
winreg.SetValue(sub_key, "", winreg.REG_SZ, os.path.join(full, "powfu.exe"))
winreg.CloseKey(new_key)
winreg.CloseKey(key)
winreg.CloseKey(sub_key)
except WindowsError as we:
# to use raise we have to give a proper instance or any approximated to our thought
raise SystemError("Ops! Something went wrong")
if __name__ == '__main__':
try:
print("=====================================================================")
print("====== POWFU - FILE ORGANIZER ======")
print("====== Installing ======")
print("=====================================================================")
time.sleep(3)
createFolder()
copyFiles(os.path.join("powfu.exe"), "windows-executable")
copyFiles(os.path.join("READ_FIRST.txt"), os.getcwd())
copyFiles(os.path.join("uninstaller.bat"), "windows-executable")
create_root_key()
print("Installation Done successfully!")
time.sleep(2)
except:
print("Sorry we could not install.")
time.sleep(3)
|
StarcoderdataPython
|
121204
|
<gh_stars>1-10
from player_commands.bazaar import bazaar_cog
from player_commands.sky import sky_cog
from player_commands.wiki import wiki_cog
from player_commands.dungeons import dungeons_cog
from player_commands.kills import kills_cog
from player_commands.lowest_bin import lowest_bin_cog
from player_commands.skills import skills_cog
from player_commands.slayer import slayer_cog
from player_commands.invite import invite_cog
from player_commands.auction_house import auction_house_cog
from player_commands.missing import missing_cog
from player_commands.weights import weights_cog
from player_commands.leaderboard import leaderboard_cog
from player_commands.price_check import price_check_cog
from player_commands.minions import minions_cog
from player_commands.rank import rank_cog
from player_commands.guild_print import guild_print_cog
from player_commands.maxer import maxer_cog
from player_commands.set_prefix import set_prefix_cog
from player_commands.link_account import link_account_cog
from player_commands.help_command import help_cog
from player_commands.regenerate_leaderboard import regenerate_leaderboard_cog
#from player_commands._dev import _dev_cog
assistant_commands = [set_prefix_cog, link_account_cog, help_cog, regenerate_leaderboard_cog]
regular_commands = [sky_cog, wiki_cog, bazaar_cog,
dungeons_cog, kills_cog, lowest_bin_cog,
skills_cog, slayer_cog, invite_cog,
auction_house_cog, missing_cog, weights_cog,
leaderboard_cog, price_check_cog,
minions_cog, rank_cog, guild_print_cog,
maxer_cog]
player_commands = regular_commands+assistant_commands
|
StarcoderdataPython
|
1769661
|
<reponame>homeoffice-ys/EliteQuant_Python<filename>source/gui/ui_log_window.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtWidgets, QtGui
from ..event.event import GeneralEvent
class LogWindow(QtWidgets.QTableWidget):
msg_signal = QtCore.pyqtSignal(type(GeneralEvent()))
def __init__(self, lang_dict, parent=None):
super(LogWindow, self).__init__(parent)
self.header = [lang_dict['Time'],
lang_dict['Content']]
self.init_table()
self._lang_dict = lang_dict
self.msg_signal.connect(self.update_table)
def init_table(self):
col = len(self.header)
self.setColumnCount(col)
self.setHorizontalHeaderLabels(self.header)
self.setEditTriggers(self.NoEditTriggers)
self.verticalHeader().setVisible(False)
self.setAlternatingRowColors(True)
self.setSortingEnabled(False)
def update_table(self,geneal_event):
'''
Only add row
'''
self.insertRow(0)
self.setItem(0, 0, QtWidgets.QTableWidgetItem(geneal_event.timestamp))
self.setItem(0, 1, QtWidgets.QTableWidgetItem(geneal_event.content))
|
StarcoderdataPython
|
1624199
|
import unittest
import torch
from torch import nn
from ner.active_heuristic import (
Random,
Uncertantiy,
KNNEmbeddings,
)
from . import utils
class TestActiveHeuristics(unittest.TestCase):
'''
Test cases for active_heuristics
'''
def test_random(self):
'''
run test cases for random heurisitcs
'''
# should not rely on vocabulary
heuristic = Random(vocab=None, tag_vocab=None)
dataset = utils.construct_sample_unlabeled_dataset()
# this is model independent, so setting the model
# to none should not change the output
result = heuristic.evaluate(
model=None,
dataset=dataset,
)
model_result = heuristic.evaluate(
model=utils.MockModel(),
dataset=dataset,
)
# result should be model agnostic
assert torch.all(result.eq(model_result))
# make sure all the results are the same
# postivie numbers
# between 0 and 1
# and sum to 1
assert len(result) == len(dataset)
for i in range(1, len(result)):
assert result[i] >= 0.0
assert result[i] <= 1.0
assert result[i] == result[i - 1]
assert sum(result) == 1.0
def test_uncertain(self):
'''
test cases for uncertain based heuristic
'''
model = utils.MockModel()
dataset = utils.construct_sample_unlabeled_dataset()
vocab = utils.build_sample_vocab()
tag_vocab = utils.build_sample_tag_vocab()
heuristic = Uncertantiy(vocab=vocab, tag_vocab=tag_vocab)
result = heuristic.evaluate(
model=model,
dataset=dataset
)
assert len(result) == len(dataset)
# all result here should be equal to model.random_val
assert torch.all(torch.eq(result, 1.0 / len(result)))
def test_knn(self):
'''
test case for KNN based heuristic
'''
model = utils.MockModel()
dataset = utils.construct_sample_unlabeled_dataset()
vocab = utils.build_sample_vocab()
tag_vocab = utils.build_sample_tag_vocab()
heuristic = KNNEmbeddings(vocab=vocab, tag_vocab=tag_vocab)
heuristic.prepare(
model,
dataset,
)
dataset.remove((0, utils.SAMPLE_DATASET[0][0]))
output = heuristic.evaluate_with_labeled(
model=model,
dataset=dataset,
labeled_indexes=[0],
labeled_points=[(0,) + utils.SAMPLE_DATASET[0]],
)
assert len(output) == len(dataset)
print(output)
|
StarcoderdataPython
|
1799792
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehumancommunity.org/
**Github Code Home Page:** https://github.com/makehumancommunity/
**Authors:** <NAME>
**Copyright(c):** MakeHuman Team 2001-2019
**Licensing:** AGPL3
This file is part of MakeHuman Community (www.makehumancommunity.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
Mesh operations such as calculating volume and surface measures.
"""
import numpy as np
import math
def calculateSurface(mesh, vertGroups=None, faceMask=None):
"""
Calculate surface area of a mesh. Specify vertGroups or faceMask to
calculate area of a subset of the mesh and filter out other faces.
"""
if vertGroups is not None:
f_idx = mesh.getFacesForGroups(vertGroups)
fvert = mesh.fvert[f_idx]
elif faceMask is not None:
f_idx = np.argwhere(faceMask)[...,0]
fvert = mesh.fvert[f_idx]
else:
fvert = mesh.fvert
if mesh.vertsPerPrimitive == 4:
# Split quads in triangles (assumes clockwise ordering of verts)
t1 = fvert[:,[0,1,2]]
t2 = fvert[:,[2,3,0]]
v1 = mesh.coord[t1]
v2 = mesh.coord[t2]
l1 = _sideLengthsFromTris(v1)
l2 = _sideLengthsFromTris(v2)
l = np.vstack([l1,l2])
return _surfaceOfTris(l)
elif mesh.vertsPerPrimitive == 3:
v = mesh.coord[fvert]
l = _sideLengthsFromTris(v)
return _surfaceOfTris(l)
else:
raise RuntimeError("Only supports meshes with triangle or quad primitives.")
def calculateVolume(mesh, vertGroups=None, faceMask=None):
"""
Calculate the volume of a mesh.
Mesh is expected to be closed.
"""
if vertGroups is not None:
f_idx = mesh.getFacesForGroups(vertGroups)
fvert = mesh.fvert[f_idx]
elif faceMask is not None:
f_idx = np.argwhere(faceMask)[...,0]
fvert = mesh.fvert[f_idx]
else:
fvert = mesh.fvert
if mesh.vertsPerPrimitive == 4:
# Split quads in triangles (assumes clockwise ordering of verts)
t1 = fvert[:,[0,1,2]]
t2 = fvert[:,[2,3,0]]
v1 = mesh.coord[t1]
v2 = mesh.coord[t2]
v = np.vstack([v1,v2])
return _signedVolumeFromTris(v)
elif mesh.vertsPerPrimitive == 3:
v = mesh.coord[fvert]
return _signedVolumeFromTris(v)
else:
raise RuntimeError("Only supports meshes with triangle or quad primitives.")
def _sideLengthsFromTris(triVects):
"""
Calculate lengths of the sides of triangles specified by their vectors
in clockwise fashion.
triVects = [ [T1V1, T1V2, T1V3], [T2V1, T2V2, T2V3], ... ]
with Ti a triangle, Vi a triange vector, defined in clockwise fashion
and each vector (TiVi) an array [x, y, z] with vector coordinates
Returns a list [ [T1L1, T1L2, T1L3], [T2L1, T2L2, T2L3], ...]
with Ti a triangle (in the same order as in the input), and Li the length of
side i (a float)
"""
v = triVects
s = np.zeros(v.shape, dtype=np.float32)
# Get side vectors
s[:,0] = v[:,1] - v[:,0]
s[:,1] = v[:,2] - v[:,1]
s[:,2] = v[:,0] - v[:,2]
# Calculate lengths of sides
l = s[:,:,0]*s[:,:,0] + s[:,:,1]*s[:,:,1] + s[:,:,2]*s[:,:,2]
l = np.sqrt(l)
return l
def _surfaceOfTris(triSideLengths):
"""
Calculate total surface area of triangles with sides of specified lengths
triSideLengths should be an array of layout
[ [T1L1, T1L2, T1L3], [T2L1, T2L2, T2L3], ... ]
with Ti a triangle, and Li the length of the ith side of the triangle
TiLi should be a float.
Returns a float representing the total surface area.
"""
l = triSideLengths
# Heron's formula
o = ( l[:,0] +l[:,1] +l[:,2]) * \
( l[:,0] +l[:,1] -l[:,2]) * \
(-l[:,0] +l[:,1] +l[:,2]) * \
( l[:,0] -l[:,1] +l[:,2])
o = np.sqrt(o)/4
return np.sum(o)
def _signedVolumeFromTris(triVects):
"""
Calculate volume of a set of triangles by summing signed volumes of
tetrahedrons between those triangles and the origin.
"""
v = triVects
v321 = v[:,2,0] * v[:,1,1] * v[:,0,2]
v231 = v[:,1,0] * v[:,2,1] * v[:,0,2]
v312 = v[:,2,0] * v[:,0,1] * v[:,1,2]
v132 = v[:,0,0] * v[:,2,1] * v[:,1,2]
v213 = v[:,1,0] * v[:,0,1] * v[:,2,2]
v123 = v[:,0,0] * v[:,1,1] * v[:,2,2]
signedVolume = -v321 + v231 + v312 - v132 - v213 + v123
signedVolume /= 6.0
vol = np.sum(signedVolume)
return math.fabs(vol)
def findVertIndex(mesh, vert):
"""
Find the index of specified vertex (as an [x, y, z] array) within mesh.
"""
matches = list(np.where(mesh.coord == vert)[0])
return [idx for idx in set(matches) if matches.count(idx) > 2]
|
StarcoderdataPython
|
4819503
|
<reponame>bitfort/mlmetrics
"""Definition of metric structure and seiralization/deserialization.
Read and write a metrics structure to/from files and strings.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
# A structure for use in python which is nicer to use than
# a raw dictionary.
Metric = collections.namedtuple('Metric', [
'timestamp', 'key', 'value', 'metadata'
])
def parse_metrics_from_string(metrics_string):
''' Parses a string of json serialized metrics. '''
return [dict_to_metric(j) for j in json.loads(metrics_string)]
def encode_metrics_as_string(metrics):
''' Encodes a list of Metric structs as a json string. '''
return json.dumps([metric_to_dict(m) for m in metrics])
def dict_to_metric(j):
''' Converts a json dict to a Metric struct. '''
return Metric(j['ts'], j['key'], j['value'], j['metadata'])
def metric_to_dict(m):
''' Converts a metrics struct to a json dict. '''
return {
'ts': m.timestamp,
'key': m.key,
'value': m.value,
'metadata': m.metadata,
}
def read_metrics_from_file(file_name):
''' Returns a file as a list of Metric structs. '''
with open(file_name) as f:
return parse_metrics_from_string(f.read())
def write_metrics_to_file(metrics, file_name):
''' Writes a list of Metric structs to a file. '''
with open(file_name, 'w') as f:
f.write(encode_metrics_as_string(metrics))
|
StarcoderdataPython
|
1753640
|
<filename>third_party/houdini/scripts/python/husdshadertranslators/arnold.py
import hou
import husdshadertranslators.utils as utils
from husdshadertranslators.default import DefaultShaderTranslatorHelper, renderContextName, RampParmTranslator
from pxr import Usd, UsdShade, Sdf, Vt
from itertools import izip
# TODO(pal):
# - Support putting fetch nodes in the middle of the shader graph.
# - Investigate animated parameters, especially the ramp.
# - Filter the extra parameters created on the ramp nodes.
# Arnold shaders have the render mask of VMantra. This would be great to change
# as mantra and other shader types might share this mask.
ARNOLD_RENDER_MASK = 'VMantra'
ARNOLD_TERMINALS = [hou.shaderType.Surface, hou.shaderType.Displacement, 'volume']
ARNOLD_NODE_PREFIX = 'arnold::'
ARNOLD_USD_PREFIX = 'arnold:'
ARNOLD_FETCH_NAME = 'arnold::fetch'
ARNOLD_RENDER_CONTEXT_NAME = 'arnold'
ARNOLD_RAMP_TYPES = ['arnold::ramp_rgb', 'arnold::ramp_float']
ARNOLD_RAMP_INTERP_REMAP = {
hou.rampBasis.Constant: 0,
hou.rampBasis.Linear: 1,
hou.rampBasis.CatmullRom: 2,
hou.rampBasis.BSpline: 2,
hou.rampBasis.MonotoneCubic: 3,
}
def resolve_fetch_vop(node):
while node.type().name() == ARNOLD_FETCH_NAME:
if node.isBypassed():
return None
node = node.parm('target').evalAsNode()
if not node:
return None
return node
class ArnoldRampParmTranslator(RampParmTranslator):
"""
Translator for Arnold ramp shader params.
"""
def createAndSetAttrib(self, usd_shader, time_code):
""" Creates an attribute on the usd shader primitive and sets its value
according to the member node parameter, at the specified time code.
"""
ramp = self.valueFromParm(self.parmTuple(), time_code)[0]
output_time_code = self.adjustTimeCode(time_code)
position = usd_shader.CreateInput('position', Sdf.ValueTypeNames.FloatArray)
position.Set(Vt.FloatArray(ramp.keys()), output_time_code)
interpolation = usd_shader.CreateInput('interpolation', Sdf.ValueTypeNames.IntArray)
interpolation.Set([ARNOLD_RAMP_INTERP_REMAP.get(v, 3) for v in ramp.basis()], output_time_code)
if ramp.isColor():
value_input = usd_shader.CreateInput('color', Sdf.ValueTypeNames.Color3fArray)
value = ramp.values()
else:
value_input = usd_shader.CreateInput('value', Sdf.ValueTypeNames.FloatArray)
value = Vt.FloatArray(ramp.values())
value_input.Set(value, output_time_code)
class ArnoldShaderHelper(DefaultShaderTranslatorHelper):
# Just initializing the default shader translator helper.
def __init__(self, translator_id, usd_stage, usd_material_path, usd_time_code):
DefaultShaderTranslatorHelper.__init__(self, translator_id, usd_stage, usd_material_path, usd_time_code)
def createShaderPrimID(self, shader_prim, shader_node):
"""
Creates and sets the id parameter on the shader. We are querying the shader_node's type name and removing the
arnold:: prefix, and setting the id to arnold:<shader_type>
"""
type_name = shader_node.type().name()
if type_name.startswith(ARNOLD_NODE_PREFIX):
shader_name = type_name[len(ARNOLD_NODE_PREFIX):]
shader_prim.SetShaderId(ARNOLD_USD_PREFIX + shader_name)
# Falling back to the built-in function.
else:
DefaultShaderTranslatorHelper.createShaderPrimID(self, shader_prim, shader_node)
def createShaderPrimAttributes(self, shader_prim, shader_node):
""" Creates and sets the shader parameters on the usd shader
based on the given shader node.
"""
for parm_tuple in shader_node.parmTuples():
parm_template = parm_tuple.parmTemplate()
if isinstance(parm_template, hou.FolderParmTemplate) or isinstance(parm_template, hou.FolderSetParmTemplate):
continue
parm_translator = self.getParmTranslator(parm_tuple)
if parm_translator is None:
continue
# Create an attribute on the usd prim and set its value.
parm_translator.createAndSetAttrib(shader_prim, self.timeCode())
def getRampParmTranslator(self, parm_tuple):
""" Returns a translator for ramp parameters.
"""
if parm_tuple.node().type().name() not in ARNOLD_RAMP_TYPES:
return None
return ArnoldRampParmTranslator(parm_tuple)
def getRenderContextName(self, shader_node, shader_node_output_name):
""" Returns the name of the render context to be used in material
output name.
"""
# We are only calling this helper on arnold shader, so we can just
# hardcode the value.
return ARNOLD_RENDER_CONTEXT_NAME
class ArnoldShaderTranslator(object):
def __init__(self):
self.my_id = -1
def setTranslatorID(self, translator_id):
self.my_id = translator_id
def translatorID(self):
return self.my_id
def matchesRenderMask(self, render_mask):
return render_mask == ARNOLD_RENDER_MASK
def createMaterialShader(self, usd_stage, usd_material_path, usd_time_code, shader_node, shader_type, output_name):
""" Creates a USD shader primitive that is part of the USD material
Ie, the translator will connect the shader to the material output.
usd_stage - UsdStage object on which to create the shader.
usd_material_path - Path to the material primitive
in which to create the shader.
usd_time_code - time code (frame) at which to evaluate shader
parameters and at which to set the primitive attributes.
shader_node - Houdini node representing a shader.
shader_type - Requested shader type to use, in case
the shader node implements several shaders
(eg, is a material builder).
outupt_name - Particular output of the Houdini node that was
used to arrive at the shader node and which represents the
shader to translate (in case the node has several shaders).
The output name can be an empty string, if node has no outputs.
"""
shader_node = resolve_fetch_vop(shader_node)
if shader_node is None:
return
type_name = shader_node.type().name()
# If type name is 'arnold_material' then we are working with an output node, and we need to check the different
# terminals. Otherwise we are just dealing with the first node.
shaders_to_translate = []
if type_name == 'arnold_material':
shaders_to_translate = [(input, terminal) for input, terminal in izip(shader_node.inputs(), ARNOLD_TERMINALS)
if input is not None]
elif type_name.startswith(ARNOLD_NODE_PREFIX):
shaders_to_translate = [(shader_node, ARNOLD_TERMINALS[0])]
else:
# If we are dealing with non-arnold materials, we are running the default shader translator.
helper = DefaultShaderTranslatorHelper(self.translatorID(), usd_stage, usd_material_path, usd_time_code)
helper.createMaterialShader(shader_node, shader_type, output_name)
return
for shader, shader_type in shaders_to_translate:
shader = resolve_fetch_vop(shader)
if shader and not shader.isBypassed():
helper = ArnoldShaderHelper(self.translatorID(), usd_stage, usd_material_path, usd_time_code)
helper.createMaterialShader(shader, shader_type, output_name)
arnold_translator = ArnoldShaderTranslator()
def usdShaderTranslator():
return arnold_translator
|
StarcoderdataPython
|
3276518
|
<filename>ChessGui/main.py
import pygame as pg
pg.init()
pg.display.init()
|
StarcoderdataPython
|
1700233
|
<filename>sources/mss/wrwe/xwr14xx_capturedemo.py
#
# Copyright (c) 2019, <NAME>
# This file is licensed under the terms of the MIT license.
#
#
# TI IWR1443 ES2.0 EVM @ capture demo of SDK 1.1.0.2
#
import os, time, sys, threading, serial
from lib.probe import *
from lib.shell import *
from lib.helper import *
from lib.utility import *
# ------------------------------------------------
_meta_ = {
'dev': 'xWR14xx',
'mss': 'Capture Demo',
'ver': ('01.01.00.02', ),
'cli': 'CaptureDemo:/>',
'ccs': os.environ.get('CCS_PATH'), # full path to CCS for tiflash.memory_read()
'dbg': 'XDS110',
'mem': 0x40000,
'typ': 'IWR1443',
'add': 0x51020000,
# define apps to run
'app': [
'dft_if_signal'
]
}
# ------------------------------------------------
try:
import tiflash
except ImportError as e:
print_log(e, sys._getframe())
# ------------------------------------------------
apps = {}
verbose = False
# ------------------------------------------------
def _read_(dat, target=sys.stdout): # called by read_usr and others
target.write(dat)
target.flush()
for ver in _meta_['ver']:
if all((tag in dat for tag in (_meta_['dev'], _meta_['mss'], ver))): return None # reset detected
if _meta_['cli'] in dat: return True # cli ready
return False # unknown
def _init_(prt, dev, cfg, dat=None):
if dev is not None:
try:
l3_size = _meta_['mem']
ccs = _meta_['ccs']
con = tiflash.get_connections(ccs)
con = [c for c in con if _meta_['dbg'] in c]
if len(con) > 0:
con = con[0]
frame_values = cfg['profileCfg']['adcSamples'] * num_rx_antenna(cfg) * chirps_per_frame(cfg)
value_size = 2 + 2
count = cfg['frameCfg']['frames']
frame_size = frame_values * value_size
if count == 0:
count = max(1, l3_size // frame_size)
if frame_size * count > l3_size:
raise Exception('frame size ({}) exceeds buffer size ({})'.format(frame_size, l3_size))
tmem = threading.Thread(
target=_data_,
args=(con, dev._details_['serial'], value_size, frame_values, count, prt, cfg['frameCfg']['frames'] == 0))
tmem.start()
except Exception as e:
print_log(e, sys._getframe())
def _conf_(cfg):
global verbose
key = _meta_['dev']
c = dict(cfg)
p = {'rangebias': float('nan')}
if '_comment_' in c:
c.pop('_comment_', None) # remove entry
if '_settings_' in c:
rx_ant = int(c['_settings_']['rxAntennas'])
tx_ant = int(c['_settings_']['txAntennas'])
# common
if c['channelCfg']['rxMask'] is None:
c['channelCfg']['rxMask'] = 2**rx_ant - 1
if c['channelCfg']['txMask'] is None:
n = tx_ant
if n == 1: n = 0
else: n = 2 * n
c['channelCfg']['txMask'] = 1 + n
# cli output
if 'verbose' in c['_settings_'] and c['_settings_']['verbose'] is not None:
verbose = c['_settings_']['verbose']
p['range_bias'] = c['_settings_']['rangeBias']
c.pop('_settings_', None) # remove entry
return c, p
def _proc_(cfg, par, err={1: 'miss', 2: 'exec', 3: 'plot'}):
global apps
for _, app in apps.items(): app.kill()
apps.clear()
for app in _meta_['app']:
if type(app) not in (list, tuple): app = (app,)
for item in app:
if item not in apps:
apps[item], values = exec_app(item, (cfg, par, ))
if values is None: values = []
code = apps[item].poll()
if code is None:
print_log(item, values)
else:
print_log(item, values, RuntimeError(err[code]))
def _pipe_(dat):
for tag in apps:
if apps[tag] is None: continue
try:
apps[tag].stdin.write(str.encode(dat + '\n'))
apps[tag].stdin.flush()
except Exception as e:
print_log(e, sys._getframe(), tag)
apps[tag].kill()
apps[tag] = None
def _grab_(tag):
pass
# ------------------------------------------------
def _data_(con, sn, sval, fval, cnt, prt, infinite=True, width=16):
time.sleep(2)
active = True
while active:
try:
print_log('read memory: address={}, bytes={}, frames={}'.format(hex(_meta_['add']), sval * fval * cnt, cnt), sys._getframe())
buf = tiflash.memory_read(
address=_meta_['add'],
num_bytes=sval * fval * cnt,
ccs=_meta_['ccs'],
serno=sn,
connection=con,
fresh=True,
devicetype=_meta_['typ'])
except Exception as e:
print_log(e, sys._getframe())
break
buffer = []
tmp = dec2hex(buf)
frames = split(tmp, sval * fval * 2) # two chars per byte
for frame in frames:
buffer.append('')
tmp = split(frame, width * sval)
for line in tmp:
buffer.append(' '.join(split(line, sval)))
chunk = '\n'.join(buffer)
_pipe_(chunk)
if verbose:
print(chunk, file=sys.stdout, flush=True)
if infinite:
send_config(prt, None, None)
time.sleep(0.5)
active = infinite
|
StarcoderdataPython
|
97409
|
#!/usr/bin/env python
"""
check out chapter 2 of cam
"""
|
StarcoderdataPython
|
3329787
|
from twittertennis.handler import *
from twittertennis.tennis_utils import *
__version__ = '0.1.2'
__all__ = [
'__version__',
]
|
StarcoderdataPython
|
3322162
|
class AnaplanVersion:
_api_major_version: int = 2
_api_minor_version: int = 0
@staticmethod
def major():
return AnaplanVersion._api_major_version
@staticmethod
def minor():
return AnaplanVersion._api_minor_version
|
StarcoderdataPython
|
1769674
|
<filename>all_functions/new ddos similar/citeulike-parser-master/python/ieee.py
#!/usr/bin/env python2.6
# Copyright (c) 2010 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is derived from software contributed to CiteULike.org
# by
# <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by
# CiteULike <http://www.citeulike.org> and its
# contributors.
# 4. Neither the name of CiteULike nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY CITEULIKE.ORG AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import re, sys, cookielib, urllib2
from cultools import urlparams, bail
from urllib import urlencode, unquote
from urllib2 import urlopen
import socket
from html5lib import treebuilders
import html5lib
import warnings
import codecs
import metaheaders
#from subprocess import Popen, PIPE
from lxml import etree
socket.setdefaulttimeout(15)
warnings.simplefilter("ignore",DeprecationWarning)
# Read URL from stdin
url = sys.stdin.readline().strip()
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
if url.startswith("http://ieeexplore.ieee.org/Xplore/login.jsp?url="):
url = unquote(urlparams(url)["url"])
# Some IEEE urls are malformed and have ? characters instead of & to separate
# key-value pairs in the url.
s = url.split("?")
url_head = s[0]
url_tail = "&".join(s[1:])
# Some IEEE URLs look like ./a/b?¶m=value - we need to sort this out
if url_tail[0] == '&':
url_tail = url_tail[1:]
url = url_head + "?" + url_tail
try:
ar_number = int(urlparams(url)["arnumber"])
except KeyError:
bail("Couldn't find an 'arNumber' field in the URL")
metaheaders = metaheaders.MetaHeaders("http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=%d" % ar_number)
root = metaheaders.root
abstract = ''
abstractDiv = root.xpath("//a[@name='Abstract']/../*/text()")
if abstractDiv:
abstract = abstractDiv[0]
abstract = re.sub("^Abstract\s*", "", abstract).strip()
#print etree.tostring(root, pretty_print=True)
doi = metaheaders.get_item("citation_doi")
if not doi:
aLinks = root.cssselect("a")
for a in aLinks:
if not a.attrib.has_key("href"):
continue
href = a.attrib["href"]
if href.startswith("http://dx.doi.org/"):
match = re.search(r'(10\..*)', href)
if match:
doi = match.group(1)
break
print "begin_tsv"
print "type\tJOUR"
if True and metaheaders.get_item("citation_title"):
metaheaders.print_item("title","citation_title")
metaheaders.print_item("publisher","citation_publisher")
authors = metaheaders.get_multi_item("citation_author")
if authors:
for a in authors:
print "author\t%s" % a
else:
metaheaders.print_item("author","citation_authors")
metaheaders.print_item("volume","citation_volume")
metaheaders.print_item("issue","citation_issue")
metaheaders.print_item("start_page","citation_firstpage")
metaheaders.print_item("end_page","citation_lastpage")
# "serial" or "issn". Do both, to be safe
metaheaders.print_item("serial","citation_issn")
metaheaders.print_item("issn","citation_issn")
metaheaders.print_item("isbn","citation_isbn")
metaheaders.print_item("title_secondary","citation_conference")
metaheaders.print_date("citation_date")
metaheaders.print_item("journal","citation_journal_title")
metaheaders.print_item("publisher","citation_publisher")
# date is sometimes (always?) like "Oct. 2004"
date = metaheaders.get_item("citation_date")
else:
if not doi:
bail("Couldn't find an DOI")
print "use_crossref\t1"
if doi:
print "linkout\tDOI\t\t%s\t\t" % (doi)
if abstract != "":
print "abstract\t%s" % abstract
print "linkout\tIEEE\t%d\t\t\t" % (ar_number)
print "end_tsv"
print "status\tok"
|
StarcoderdataPython
|
93304
|
<filename>test.py
import unittest
from giig import *
# define some languages to test for:
EXAMPLE_LIST = ["linux", "python", "c", "java", "intellij", "eclipse"]
class TestStringMethods(unittest.TestCase):
def test_get_list(self):
result = giig._get_list()
for item in EXAMPLE_LIST:
self.assertIn(item, result)
def test_search(self):
result = giig._search("java")
self.assertEqual(3, len(result))
result = giig._search("python")
self.assertEqual(1, len(result))
def test_gitignore(self):
result = giig._get_gitignore(["linux", "python"])
self.assertIn("### Linux ###", result)
self.assertIn("### Python ###", result)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
1775376
|
#
# # from copy import deepcopy
# # class A:
# # def __init__(self):
# # self.a = 5
# #
# # class B:
# # def __init__(self, z):
# # self.z = z
# # def foo(self):
# # self.z.a += 1
# #
# # AA = A()
# # BB = B(AA)
# # CC = deepcopy(BB)
# # CC.z.a += 1
# # print(BB.z.a, CC.z.a)
# # BB.foo()
# # print(AA.a, BB.z.a)
#
# # import numpy as np
# # def foo(b):
# # b+=1
# # print(b)
# # return b
# # a = np.asarray([1,2,3,4,5])
# # foo(a)
# # print(a)
#
# # import pandas as pd
# # a = {("1", 0.2): 1, ("1", 0.5): 2, ("1", 1): 3,
# # ("2", 0.2): 1, ("2", 0.5): 2, ("2", 1): 3}
# # b = pd.DataFrame.from_dict(a)
#
# # import time
# # loctime = time.localtime(time.time())
#
#
# import numpy as np
# import tensorflow as tf
# import NNbuilds
#
# # for _ in range(2):
# # np.random.seed(0)
# # print(np.random.rand(3))
# for _ in range(2):
# tf.set_random_seed(0)
# a = tf.Variable(tf.random_normal([1, 3]), name="hjk")
# sess = tf.Session()
# sess.run(tf.global_variables_initializer())
# print(a.eval(sess))
#
# r = []
# for _ in range(2):
# g = tf.Graph()
# with g.as_default():
# tf.set_random_seed(0)
# with tf.variable_scope("D{}".format(_)) as scope:
# a = tf.get_variable(name="a", shape=[1,3], initializer=tf.random_normal_initializer())
# sess = tf.Session(graph=g)
# sess.run(tf.global_variables_initializer())
# print(a.eval(sess))
# # tf.reset_default_graph()
# r.append((g, sess))
# # for _ in range(2):
# # g = tf.Graph()
# # with g.as_default():
# # tf.set_random_seed(0)
# # a = tf.Variable(tf.random_normal([1, 3]), name="hjk")
# # sess = tf.Session() # sess = tf.Session(graph=g)
# # sess.run(tf.global_variables_initializer())
# # print(a.eval(sess))
# print("END")
#
#
# # sess.run(a)
# # print(a.eval(sess))
#
#
import Plots
import pickle
results = pickle.load(open(r"C:\Workspaces\PyCharm workspace\project-thesis\log_dir\results\GAN_2017-05-26_17-11-44_738309-603538.pkl", "rb"))
Plots.aplot_auc_anomaly_fit_heatmap(results, fit_test="KL", save_path=r"C:\Workspaces\PyCharm workspace\project-thesis\log_dir\GAN_2017-05-26_17-11-44_738309-603538")
|
StarcoderdataPython
|
3328169
|
# Generated by Django 3.1.12 on 2021-06-28 08:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0018_cachesettings'),
]
operations = [
migrations.AddField(
model_name='section',
name='show_progress_bar',
field=models.BooleanField(default=False),
),
]
|
StarcoderdataPython
|
85452
|
<reponame>utcsilab/eq-net
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from keras.callbacks import TerminateOnNaN, EarlyStopping, ModelCheckpoint
from keras.callbacks import ReduceLROnPlateau
from keras.optimizers import Adam
from keras import backend as K
from sklearn.model_selection import train_test_split
import tensorflow as tf
from aux_networks import QuantizationAutoencoder, EstimationEncoder
from aux_networks import sample_balanced_wmse
from aux_networks import sample_wmse, sample_wmse_numpy
from aux_matlab import decode_matlab_file
from pymatbridge import Matlab
import numpy as np
import hdf5storage
import os
import joblib
from sklearn.cluster import MiniBatchKMeans
# GPU allocation
K.clear_session()
tf.reset_default_graph()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"] = "0";
# Set a global seed
global_seed = 1000
# Tensorflow memory allocation
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
K.tensorflow_backend.set_session(tf.Session(config=config))
### Parameters and initializations
# Scenario parameters
num_tx, num_rx = 2, 2
mod_size = 6 * num_tx # K = log2(M) in the paper, bits per QAM symbol
# Autoencoder parameters
ae_cfg = { # Architecture
'latent_dim': 3 * num_tx,
'num_layers': 6,
'hidden_dim': 4 * mod_size * np.ones((12,), dtype=np.int32),
'common_layer': 'relu',
'latent_layer': 'tanh',
'weight_reg': 0.,
'noise_sigma': 1e-3,
'global_eps': 1e-6,
# Training
'batch_size': 8192 * 4,
'num_rounds': 12,
'num_epochs_1': 100,
'num_epochs_2': 800,
'freeze_enc': False,
'learning_rate': 1e-3}
# Encoder parameters
enc_cfg = { # Architecture
'latent_dim': ae_cfg['latent_dim'],
'num_layers': 7,
'num_blocks': 1, # '1' = L architecture, '3' = P architecture
'hidden_dim': 16 * mod_size * np.ones((7,), dtype=np.int32),
'common_layer': 'relu',
'latent_layer': 'linear',
'conj_inputs': False, # Never used in the paper
'weight_reg': 0.,
'global_eps': ae_cfg['global_eps'],
# Training
'batch_size': 8192 * 4,
'num_epochs': 2000,
'learning_rate': 1e-3,
'latent_loss': 'mean_absolute_error'}
# Quantizer parameters
bits_per_dim = [5, 6]
# Are we training the quantizer?
train_quantizer = True
# Are we training the encoder?
train_encoder = True
# Inference parameters
inf_batch_size = 65536
# Define all used seeds
train_seed = 1234
test_seed = 4321
### Training
# Which algorithm and which channel model
training_channel = 'rayleigh'
training_alg = 'ml'
# Target file for training/validation data
train_file = 'matlab/data/extended_mimo%dby%d_mod%d_seed%d.mat' % (
num_tx, num_rx, mod_size//num_tx, train_seed)
# train_file = 'matlab/data/extended_%s_%s_mimo%dby%d_mod%d_seed%d.mat' % (
# training_channel, training_alg,
# num_tx, num_rx, mod_size//num_tx, train_seed)
# Load and remove high SNR points
contents = hdf5storage.loadmat(train_file)
train_snr = np.arange(len(np.squeeze(contents['snr_range'])))
ref_llr = np.asarray(contents['ref_llr'])[train_snr]
ref_y = np.asarray(contents['ref_y'])[train_snr]
ref_h = np.asarray(contents['ref_h'])[train_snr]
ref_n = np.asarray(contents['ref_n'])[train_snr]
# Reshape, convert to soft bits and seeded shuffle
# Downselect low-mid SNR values and permute
llr_train = np.moveaxis(ref_llr, -1, -3)
y_train = np.moveaxis(ref_y, -1, -2)
h_train = np.moveaxis(ref_h, -1, -3)
n_train = np.repeat(ref_n[..., None], llr_train.shape[2], axis=1)
# Apply conjugate operator to features
if enc_cfg['conj_inputs']:
y_train = np.matmul(np.conj(np.swapaxes(h_train, -2, -1)), y_train[..., None])[..., 0]
h_train = np.matmul(np.conj(np.swapaxes(h_train, -2, -1)), h_train)
# Reshape
llr_train = np.reshape(llr_train, llr_train.shape[:-2] + (-1,))
llr_train = np.reshape(llr_train, (-1, mod_size))
llr_train = np.tanh(llr_train / 2)
y_train = np.reshape(y_train, (-1, num_rx))
h_train = np.reshape(h_train, h_train.shape[:-2] + (-1,))
h_train = np.reshape(h_train, (-1, num_rx*num_tx))
n_train = np.reshape(n_train, (-1, 1))
# Convert complex to reals
y_train = y_train.view(np.float64)
h_train = h_train.view(np.float64)
# Shuffle
np.random.seed(global_seed)
shuffled_idx = np.random.permutation(len(llr_train))
llr_train = llr_train[shuffled_idx]
y_train = y_train[shuffled_idx]
h_train = h_train[shuffled_idx]
n_train = n_train[shuffled_idx]
# Split into training/validation
llr_train, llr_val, y_train, y_val, h_train, h_val, n_train, n_val = \
train_test_split(llr_train, y_train, h_train, n_train, test_size=0.2)
# Test data
# Which algorithm and which channel model
testing_channel = 'rayleigh'
testing_alg = 'ml'
test_file = 'matlab/data/extended_mimo%dby%d_mod%d_seed%d.mat' % (
num_tx, num_rx, mod_size//num_tx, test_seed)
contents = hdf5storage.loadmat(test_file)
ref_llr = np.asarray(contents['ref_llr'])
ref_y = np.asarray(contents['ref_y'])
ref_h = np.asarray(contents['ref_h'])
ref_n = np.asarray(contents['ref_n'])
ref_bits = contents['ref_bits']
snr_range = contents['snr_range'][0]
num_snr, num_codewords, _, _, _ = ref_llr.shape
# Reshape, convert to soft bits and seeded shuffle
llr_test = np.moveaxis(ref_llr, -1, -3)
llr_test = np.reshape(llr_test, llr_test.shape[:-2] + (-1,))
llr_test = np.reshape(llr_test, (-1, mod_size))
llr_test = np.tanh(llr_test / 2)
y_test = np.moveaxis(ref_y, -1, -2)
h_test = np.moveaxis(ref_h, -1, -3)
n_test = np.repeat(ref_n[..., None], ref_llr.shape[-1], axis=1)
# Apply conjugate operator to features
if enc_cfg['conj_inputs']:
y_test = np.matmul(np.conj(np.swapaxes(h_test, -2, -1)), y_test[..., None])
h_test = np.matmul(np.conj(np.swapaxes(h_test, -2, -1)), h_test)
y_test = np.reshape(y_test, (-1, num_rx))
h_test = np.reshape(h_test, h_test.shape[:-2] + (-1,))
h_test = np.reshape(h_test, (-1, num_rx*num_tx))
n_test = np.reshape(n_test, (-1, 1))
# Convert complex to reals
y_test = y_test.view(np.float64)
h_test = h_test.view(np.float64)
# Start Matlab engine
eng = Matlab()
eng.start()
# Move to right path
eng.run_code('cd /path/to/Matlab/')
# How many runs
num_runs = 1
# Global metrics
local_seed_collect = np.zeros((num_runs,))
bler_ae, ber_ae = np.zeros((num_runs, num_snr)), np.zeros((num_runs, num_snr))
bler_aeq, ber_aeq = np.zeros((num_runs, len(bits_per_dim), num_snr)), np.zeros((num_runs, len(bits_per_dim), num_snr))
bler_enc, ber_enc = np.zeros((num_runs, num_snr)), np.zeros((num_runs, num_snr))
bler_encq, ber_encq = np.zeros((num_runs, len(bits_per_dim), num_snr)), np.zeros((num_runs, len(bits_per_dim), num_snr))
# Create a global result directory
global_dir = 'models_eqnet/freeze%d_cyclic%d/mimo%dby%d_\
conj%d_mod%d_aelayers%d_latent%d_blocks%d' % (
ae_cfg['freeze_enc'], ae_cfg['cyclic_lr'],
num_tx, num_rx, enc_cfg['conj_inputs'],
mod_size//num_tx,
ae_cfg['num_layers'], ae_cfg['latent_dim'],
enc_cfg['num_blocks'])
if not os.path.exists(global_dir):
os.makedirs(global_dir)
# Optionally, the target result directory if we want to use a pretrained representation
target_dir = 'models_eqnet/freeze%d_cyclic%d/mimo%dby%d_mod%d_aelayers%d_latent%d_blocks%d' % (
ae_cfg['freeze_enc'], ae_cfg['cyclic_lr'],
num_tx, num_rx, mod_size//num_tx,
ae_cfg['num_layers'], ae_cfg['latent_dim'],
3)
target_seed = 1763986680
# For each run
for run_idx in range(num_runs):
# Initial weight seeding - this allows for completely reproducible results
local_seed = np.random.randint(low=0, high=2**31-1)
np.random.seed(local_seed)
# Store seeds
local_seed_collect[run_idx] = local_seed
# Create a local folder
local_dir = global_dir + '/seed%d' % local_seed
if not os.path.exists(local_dir):
os.makedirs(local_dir)
# If desired, skip first stage
if not train_quantizer:
# Instantiate blank autoencoder
ae, ae_list, enc, dec, dec_list = QuantizationAutoencoder(mod_size, ae_cfg['latent_dim'],
ae_cfg['num_layers'], ae_cfg['hidden_dim'],
ae_cfg['common_layer'],
ae_cfg['latent_layer'],
ae_cfg['weight_reg'], local_seed,
False, ae_cfg['noise_sigma'])
# Load weights
ae.load_weights(target_dir + '/seed%d/finetune_best.h5' % target_seed)
else:
### Stage 1 - Periodically update the weights wk ###
# For each round
for round_idx in range(ae_cfg['num_rounds']):
# Clear session
K.clear_session()
# Initial weights
if round_idx == 0:
# Initial weight tensor
loss_np, weight_values = np.ones((mod_size,)), np.ones((mod_size,))
# Normalize and update weights
loss_weights = K.expand_dims(K.variable(loss_np / np.sum(loss_np)))
# Instantiate blank autoencoder
ae, ae_list, enc, dec, dec_list = QuantizationAutoencoder(mod_size, ae_cfg['latent_dim'],
ae_cfg['num_layers'], ae_cfg['hidden_dim'],
ae_cfg['common_layer'],
ae_cfg['latent_layer'],
ae_cfg['weight_reg'], local_seed,
False, ae_cfg['noise_sigma'])
# Local optimizer
optimizer = Adam(lr=ae_cfg['learning_rate'], amsgrad=True)
# Compile with custom weighted loss function
ae.compile(optimizer=optimizer, loss=sample_balanced_wmse(eps=ae_cfg['global_eps'],
weights=loss_weights))
# Load last round weights and optimizer state
if round_idx > 0:
ae._make_train_function()
ae.optimizer.set_weights(weight_values)
ae.load_weights(global_dir + '/tmp_weights_seed%d.h5' % local_seed)
# Create list of callbacks
callbacks = [TerminateOnNaN()]
# Train
history = ae.fit(x=llr_train, y=llr_train, batch_size=ae_cfg['batch_size'],
epochs=ae_cfg['num_epochs_1'],
validation_data=(llr_val, llr_val), verbose=2,
callbacks=callbacks)
# Write incrementally
hdf5storage.savemat(local_dir + '/results.mat',
{'val_loss': history.history['val_loss']},
truncate_existing=True)
# Evaluate on validation data
rec_val = ae.predict(llr_val, batch_size=inf_batch_size)
loss_np = sample_wmse_numpy(llr_val, rec_val, eps=ae_cfg['global_eps']) # This is sufficient
# Print errors
print('Per-output error is:' + str(loss_np))
# Save weights and optimizer state
symbolic_weights = getattr(ae.optimizer, 'weights')
weight_values = K.batch_get_value(symbolic_weights)
ae.save_weights(global_dir + '/tmp_weights_seed%d.h5' % local_seed)
# Freeze encoder and finetune decoders
if ae_cfg['freeze_enc']:
enc.trainable = False
# Recompile with slower learning rate and WMSE
optimizer = Adam(lr=ae_cfg['learning_rate']/2, amsgrad=True)
ae.compile(optimizer=optimizer, loss=sample_wmse(ae_cfg['global_eps']))
# Early stop
earlyStop = EarlyStopping(monitor='val_loss', patience=100, min_delta=1e-5,
restore_best_weights=True)
# Save best weights
bestModel = ModelCheckpoint(local_dir + '/finetune_best.h5',
verbose=0, save_best_only=True,
save_weights_only=True, period=1)
# Train (fully parallel)
history = ae.fit(x=llr_train, y=llr_train, batch_size=ae_cfg['batch_size'],
epochs=ae_cfg['num_epochs_2'],
validation_data=(llr_val, llr_val), verbose=2,
callbacks=[earlyStop, bestModel, TerminateOnNaN()])
# Save last weights
ae.save_weights(local_dir + '/finetune_last.h5')
# Load best weights
ae.load_weights(local_dir + '/finetune_best.h5')
# Test performance
rec_test = ae.predict(llr_test, batch_size=inf_batch_size)
bler_ae[run_idx], ber_ae[run_idx], _ = decode_matlab_file(eng, 'ldpc',
rec_test, ref_bits, num_snr, num_codewords)
# Get all latent validation data and test data
latent_val = enc.predict(llr_val, batch_size=inf_batch_size)
latent_test = enc.predict(llr_test, batch_size=inf_batch_size)
### Train quantizers
for idx, num_bits in enumerate(bits_per_dim):
# Quantized representation
latent_q = np.zeros(latent_test.shape)
# One quantizer per dimension
for dim_idx in range(ae_cfg['latent_dim']):
# Fit
kmeans = MiniBatchKMeans(n_clusters=2**num_bits, verbose=2,
batch_size=8192, n_init=1000, max_no_improvement=1000)
kmeans.fit(np.reshape(latent_val[:, dim_idx], (-1, 1)))
# Save trained model to file
joblib.dump(kmeans, local_dir + '/kmeans_dimension%d_bits%d.sav' % (
dim_idx, num_bits))
# Extract codebook
codebook = kmeans.cluster_centers_
# Predict codebook index
codebook_idx = kmeans.predict(np.reshape(latent_test[:,dim_idx], (-1, 1)))
# Assign values from codebook
latent_q[:, dim_idx] = np.squeeze(codebook[codebook_idx])
# Test performance
rec_test = dec.predict(latent_q, batch_size=inf_batch_size)
bler_aeq[run_idx, idx], ber_aeq[run_idx, idx], _ = decode_matlab_file(eng, 'ldpc',
rec_test, ref_bits, num_snr, num_codewords)
# Get all latent validation data and test data
latent_val = enc.predict(llr_val, batch_size=inf_batch_size)
latent_test = enc.predict(llr_test, batch_size=inf_batch_size)
### Train sample encoder
if not train_encoder:
# Instantiate blank encoder
sample_encoder = EstimationEncoder(mod_size, num_rx, num_tx,
enc_cfg['num_blocks'],
enc_cfg['latent_dim'],
enc_cfg['num_layers'], enc_cfg['hidden_dim'],
enc_cfg['common_layer'], enc_cfg['latent_layer'],
enc_cfg['weight_reg'],
local_seed, verbose=False)
# Load weights
sample_encoder.load_weights(target_dir + '/seed%d/estimator_best.h5' % target_seed)
else:
# Get latent representation of training data
latent_train = enc.predict(llr_train, batch_size=inf_batch_size)
# Instantiate blank encoder
sample_encoder = EstimationEncoder(mod_size, num_rx, num_tx,
enc_cfg['num_blocks'],
enc_cfg['latent_dim'],
enc_cfg['num_layers'], enc_cfg['hidden_dim'],
enc_cfg['common_layer'], enc_cfg['latent_layer'],
enc_cfg['weight_reg'],
local_seed, verbose=False)
# Reduce LR
slowdown = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=100,
verbose=1)
# Save best weights
bestModel = ModelCheckpoint(local_dir + '/estimator_best.h5',
verbose=0, save_best_only=True, save_weights_only=True, period=1)
# Local optimizer
optimizer = Adam(lr=enc_cfg['learning_rate'], amsgrad=True)
# Compile with custom weighted loss function
sample_encoder.compile(optimizer=optimizer, loss=enc_cfg['latent_loss'])
# Create list of callbacks
callbacks = [slowdown, bestModel, TerminateOnNaN()]
# Train
history = sample_encoder.fit(x=[y_train, h_train, n_train], y=latent_train,
batch_size=enc_cfg['batch_size'],
epochs=enc_cfg['num_epochs'],
validation_data=([y_val, h_val, n_val], latent_val), verbose=2,
callbacks=callbacks)
# Save encoder loss
hdf5storage.savemat(local_dir + '/estimator_history.mat',
{'history': history.history})
# Load best weights
sample_encoder.load_weights(local_dir + '/estimator_best.h5')
### Test sample encoder
# Without quantization
latent_test = sample_encoder.predict([y_test, h_test, n_test], batch_size=inf_batch_size)
# Decode
rec_test = dec.predict(latent_test, batch_size=inf_batch_size)
bler_enc[run_idx], ber_enc[run_idx], _ = decode_matlab_file(eng, 'ldpc',
rec_test, ref_bits, num_snr, num_codewords)
# Test under quantization
for idx, num_bits in enumerate(bits_per_dim):
# Quantized representation
latent_q = np.zeros(latent_test.shape)
# One quantizer per dimension
for dim_idx in range(ae_cfg['latent_dim']):
if not train_quantizer:
# Load pretrained model
kmeans = joblib.load(target_dir + '/seed%d/kmeans_dimension%d_bits%d.sav' % (
target_seed, dim_idx, num_bits))
else:
kmeans = joblib.load(local_dir + '/kmeans_dimension%d_bits%d.sav' % (
dim_idx, num_bits))
# Extract codebook
codebook = kmeans.cluster_centers_
# Predict codebook index
codebook_idx = kmeans.predict(np.reshape(latent_test[:, dim_idx], (-1, 1)))
# Assign values from codebook
latent_q[:, dim_idx] = np.squeeze(codebook[codebook_idx])
# Test performance
rec_test = dec.predict(latent_q, batch_size=inf_batch_size)
bler_encq[run_idx, idx], ber_encq[run_idx, idx], _ = decode_matlab_file(eng, 'ldpc',
rec_test, ref_bits, num_snr, num_codewords)
# Store local results
hdf5storage.savemat(local_dir + '/results.mat', {'bler_ae': bler_ae[run_idx],
'ber_ae': ber_ae[run_idx],
'bler_aeq': bler_aeq[run_idx],
'ber_aeq': ber_aeq[run_idx],
'bler_enc': bler_enc[run_idx],
'ber_enc': ber_enc[run_idx],
'bler_encq': bler_encq[run_idx],
'ber_encq': ber_encq[run_idx],
'val_loss': history.history['val_loss']
}, truncate_existing=True)
# Store global results incrementally
hdf5storage.savemat(global_dir + '/results_global%d.mat' % global_seed, {'bler_ae': bler_ae,
'ber_ae': ber_ae,
'bler_aeq': bler_aeq,
'ber_aeq': ber_aeq,
'bler_enc': bler_enc,
'ber_enc': ber_enc,
'bler_encq': bler_encq,
'ber_encq': ber_encq,
'local_seed_collect': local_seed_collect
}, truncate_existing=True)
# Close MATLAB engine
eng.stop()
|
StarcoderdataPython
|
163932
|
<gh_stars>1-10
import os
import cv2
import numpy as np
import types
import json
import codecs
import threading
import pprint as pp
class Chunk():
def __init__(self, video_root, video_filename, value_loader_config, repeats):
self.file = os.path.join(video_root, video_filename)
self.repeats = repeats
self.video_filename = video_filename
self.lock = threading.Lock()
# Add the value loader
self.value_loader = None
if value_loader_config is not None:
if "delimited_filename" in value_loader_config:
self.value_loader = ValueLoader_DelimitedFilename(video_filename, value_loader_config["delimited_filename"])
elif "json" in value_loader_config:
self.value_loader = ValueLoader_Json(video_root, video_filename, value_loader_config["json"])
# Calculate the chunk length
self.reader = cv2.VideoCapture(self.file)
self.num_frames = int(self.reader.get(cv2.CAP_PROP_FRAME_COUNT))
print("'%s': %i frames found." % (self.file,self.num_frames))
# Calculate if this chunk should be repeated at all
repeat_times = 1
if self.repeats is not None:
for repeat in self.repeats:
if repeat["match_type"] == "partial_match_filename":
if repeat["value_match"] in video_filename:
repeat_times = repeat_times * repeat["repeat_times"]
if repeat_times != 1:
print("'%s': Repeating %f times." % (self.file,repeat_times))
self.length = int(self.num_frames * repeat_times)
self.width = None
self.height = None
if self.reader.isOpened() and self.num_frames > 0:
self.reader.set(1, 0)
ret, first_frame = self.reader.read()
if ret == True:
self.width = np.shape(first_frame)[1]
self.height = np.shape(first_frame)[0]
def get_frame_value(self, index):
# Load the value for the frame
if self.value_loader is None:
return None
return self.value_loader.get_values(index)
def get_frame(self, index):
frame = None
values = {}
# Load the camera frame
with self.lock:
if self.reader.isOpened():
self.reader.set(1, (index % self.num_frames))
ret, new_frame = self.reader.read()
if ret == True and new_frame is not None:
frame = new_frame
# Load and merge the associated values
if self.value_loader is None:
values = None
else:
values = self.value_loader.get_values(index)
return frame, values
class ValueLoader_DelimitedFilename():
def __init__(self, filename, value_loader_config):
# Parse the filename according to the config as the value
tokens = filename.split(".")[0].split( value_loader_config["delimiter"] )
self.values = {}
if "filename" in value_loader_config:
self.values[value_loader_config["filename"]] = filename
for name, indices in value_loader_config["token_load"].items():
if type(indices) is not list:
indices = [indices]
items = []
for index in indices:
items.append( tokens[index] )
# Add this named value
self.values[name] = items
self.filename_noext = filename.split(".")[0]
self.values["filename_noext"] = self.filename_noext
def get_values(self, index):
# Constant set of values depending on filename
return self.values
class ValueLoader_Json():
def __init__(self, video_root, filename, value_loader_config):
# Parse the filename according to the config as the value
self.filename_noext = filename.split(".")[0]
self.constant = False
if "constant" in value_loader_config:
self.constant = value_loader_config["constant"]
json_file = os.path.join(video_root, self.filename_noext + ".json")
if os.path.isfile(json_file):
data = codecs.open(json_file, 'r', encoding='utf-8').read()
self.data = json.loads(data)
#pp.pprint(len(self.data["ball"]))
else:
self.data = {}
def get_values(self, index):
if self.constant:
# Constant set of values for this loader
values = {}
for name, array in self.data.items():
if len(array) > 0:
values[name] = array[0]
else:
values[name] = []
else:
# Decode per the value for this index
values = {}
for name, array in self.data.items():
values[name] = array[index]
values["filename_noext"] = self.filename_noext
return values
|
StarcoderdataPython
|
1626330
|
<filename>awq.py
#!//usr/bin/env python3
import requests
import json
import os
from common import *
username = "tritlo"
fxmlUrl = "https://flightxml.flightaware.com/json/FlightXML3/"
with open('aircraft-2.0.json','r') as f:
aircraftInfo = json.loads(f.read())
# Fix inconsistencies in naming from FlightAware
def transformer(st):
crj = 'Canadair Regional Jet'
if st.startswith(crj):
return 'Bombardier' + st[len(crj):]
return st
def findSimilar(friendlyType:str):
friendlyType = transformer(friendlyType)
spl = friendlyType.split(' ')[:2]
if len(spl) == 2\
and spl[0] in aircraftInfo\
and spl[1] in aircraftInfo[spl[0]]:
return (True, aircraftInfo[spl[0]][spl[1]])
if len(spl) == 2 and spl[0] in aircraftInfo:
(m,t) = spl
if t in aircraftInfo[m]:
minDistStr = t
else:
tys = list(aircraftInfo[m].keys())
minDistStr = findMinDistStr(t,tys)
return (True, aircraftInfo[m][minDistStr])
return (False, None)
def queryFlight(ident):
apiKey = os.environ.get('FAApiKey')
payload = {'ident':ident}
response = requests.get(fxmlUrl + "FlightInfoStatus",
params=payload,
auth=(username, apiKey))
res = response.json()
flights = res["FlightInfoStatusResult"]["flights"]
# We only check the first one, to avoid too many
# API requests for aircraft....
origin = flights[0]["origin"]["airport_name"]
destination = flights[0]["destination"]["airport_name"]
aircraft = flights[0]["aircrafttype"]
distance = int(flights[0]["distance_filed"])
duration = int(flights[0]["filed_ete"])
payload = {'type': aircraft}
r2 = requests.get(fxmlUrl + "AircraftType",
params=payload,
auth=(username, apiKey))
res = r2.json()["AircraftTypeResult"]
manufacturer = res["manufacturer"]
aircraftType = res["type"]
ft = ' '.join([manufacturer,aircraftType])
(found, acinfo) = findSimilar(ft)
# We couldn't find that type of airplane
if not found:
raise NoSuchFlightError(ident)
return {'type': aircraft,
'friendlyType': ft,
'flightDuration': duration,
'origin': origin,
'destination': destination,
'distance': distance,
'gallons': round((distance * acinfo['gpm'])),
'gps': round((distance * acinfo['gpmps'])),
'seats': acinfo['seats']
}
def findCO2Kgs(flights):
results = {}
for f in flights:
data = queryFlight(f)
results[f] = []
seats = data['seats']
gps = data['gps']
co2 = round(poundsToKg(gallonsToCO2Pounds(gps)))
results[f].append({'data': data, 'seats': seats, 'co2': co2, 'gps':gps})
return results
if __name__ == '__main__':
print(findCO2Kgs(['UA9916','FI216']))
|
StarcoderdataPython
|
3284533
|
<reponame>Aerex/GamestonkTerminal
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pytest
import numpy as np
import pandas as pd
# IMPORTATION INTERNAL
from gamestonk_terminal.stocks.fundamental_analysis.financial_modeling_prep import (
fmp_model,
)
@pytest.fixture(scope="module")
def vcr_config():
return {
"filter_headers": [("User-Agent", None)],
"filter_query_parameters": [
("apikey", "MOCK_API_KEY"),
],
}
@pytest.mark.vcr
def test_get_score():
result = fmp_model.get_score(ticker="PM")
assert isinstance(result, np.number)
@pytest.mark.vcr
@pytest.mark.parametrize(
"func, kwargs_dict",
[
(
"get_profile",
{"ticker": "PM"},
),
(
"get_quote",
{"ticker": "PM"},
),
(
"get_enterprise",
{"ticker": "PM", "number": 5, "quarterly": False},
),
(
"get_dcf",
{"ticker": "PM", "number": 5, "quarterly": False},
),
(
"get_income",
{"ticker": "PM", "number": 5, "quarterly": False},
),
(
"get_balance",
{"ticker": "PM", "number": 5, "quarterly": False},
),
(
"get_cash",
{"ticker": "PM", "number": 5, "quarterly": False},
),
(
"get_key_metrics",
{"ticker": "PM", "number": 5, "quarterly": False},
),
(
"get_key_ratios",
{"ticker": "PM", "number": 5, "quarterly": False},
),
(
"get_financial_growth",
{"ticker": "PM", "number": 5, "quarterly": False},
),
],
)
def test_valid_df(func, kwargs_dict):
result_df = getattr(fmp_model, func)(**kwargs_dict)
assert isinstance(result_df, pd.DataFrame)
assert not result_df.empty
|
StarcoderdataPython
|
3294360
|
"""Pytest plugin entry point. Used for any fixtures needed."""
import pytest
from .pytest_selenium_enhancer import add_custom_commands
@pytest.fixture(scope='session')
def selenium_patcher():
"""Add custom ."""
add_custom_commands()
|
StarcoderdataPython
|
1673292
|
<filename>game/finding highest bidder.py
import os
clear = lambda: os.system('clear')
logo = '''
___________
\ /
)_______(
|"""""""|_.-._,.---------.,_.-._
| | | | | | ''-.
| |_| |_ _| |_..-'
|_______| '-' `'---------'` '-'
)"""""""(
/_________\\
.-------------.
/_______________\\
'''
print(logo)
bids= {}
bidding_finished = False
#function for finding hihest bidder
def find_highest_bidder(bidding_record):
highest_bid = 0
for bidder in bidding_record:
bid_amount = bidding_record[bidder]
if bid_amount > highest_bid:
highest_bid = bid_amount
winner =bidder
print(f"The winner is {winner} with bid amount ${highest_bid}")
while not bidding_finished:
name = input('Enter the name of bidder : ')
price =int(input("What is your bid? $"))
#adding both key and value to dic
bids[name]=price
should_continue = input('Are are there any others bidders? Type yes or no . ').lower()
if should_continue == 'no':
bidding_finished = True
find_highest_bidder(bids)
elif should_continue == 'yes':
clear()
|
StarcoderdataPython
|
3327605
|
<gh_stars>1-10
# Copyright 2013-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - <NAME>, <<EMAIL>>, 2018
#
# Add index to quarantined replicas
#
# Revision ID: b818052fa670
# Revises: <PASSWORD>
# Create Date: 2018-03-07 14:45:46.484383
from alembic.op import (create_index, drop_index)
# revision identifiers, used by Alembic.
revision = 'b818052fa670' # pylint: disable=invalid-name
down_revision = '2962ece31cf4' # pylint: disable=invalid-name
def upgrade():
'''
upgrade method
'''
create_index('QUARANTINED_REPLICAS_PATH_IDX', 'quarantined_replicas', ['path', 'rse_id'], unique=True)
def downgrade():
'''
downgrade method
'''
drop_index('QUARANTINED_REPLICAS_PATH_IDX', 'quarantined_replicas')
|
StarcoderdataPython
|
173758
|
import pkga.pkgb.modc as c_me
print c_me.stuff
print c_me.things
|
StarcoderdataPython
|
3360343
|
<filename>src/events/api/views.py
import collections
from typing import List, Union
from rest_framework.generics import RetrieveAPIView, ListAPIView
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.conf import settings
from django.db.models import Count
from django.http import Http404
from core.authentication import TokenAuthentication
from events.models import (
CustomEvent, Location, ProposedTalkEvent,
ProposedTutorialEvent, SponsoredEvent, Time, KeynoteEvent
)
from . import serializers
class TalkListAPIView(ListAPIView):
queryset = ProposedTalkEvent.objects.all()
serializer_class = serializers.TalkListSerializer
class SponsoredEventListAPIView(ListAPIView):
queryset = SponsoredEvent.objects.all()
serializer_class = serializers.SponsoredEventListSerializer
class TutorialListAPIView(ListAPIView):
queryset = ProposedTutorialEvent.objects.all()
serializer_class = serializers.TutorialListSerializer
class SpeechListAPIView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
event_type_string = request.GET.get("event_types")
event_types = event_type_string.split(',') if event_type_string else []
api_view_dict = {
'talk': TalkListAPIView,
'sponsored': SponsoredEventListAPIView,
'tutorial': TutorialListAPIView,
}
data = []
for event_type, api_view in api_view_dict.items():
if event_types and event_type not in event_types:
continue
view = api_view.as_view()
event_data = view(request._request, *args, **kwargs).data
data.extend(event_data)
if data:
return Response(data)
else:
raise Http404
class TalkDetailAPIView(RetrieveAPIView):
queryset = ProposedTalkEvent.objects.all()
serializer_class = serializers.TalkDetailSerializer
class SponsoredEventDetailAPIView(RetrieveAPIView):
queryset = SponsoredEvent.objects.all()
serializer_class = serializers.SponsoredEventDetailSerializer
class TutorialDetailAPIView(RetrieveAPIView):
queryset = ProposedTutorialEvent.objects.all()
serializer_class = serializers.TutorialDetailSerializer
class SpeechDetailAPIView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
event_id = self.kwargs.get('pk')
event_type = self.kwargs.get('event_type')
api_view_dict = {
'talk': TalkDetailAPIView,
'sponsored': SponsoredEventDetailAPIView,
'tutorial': TutorialDetailAPIView,
}
if not event_id or not event_type or \
event_type not in api_view_dict:
raise Http404
api_view = api_view_dict[event_type]
view = api_view.as_view()
return view(request._request, *args, **kwargs)
def _room_sort_key(room):
return room.split('-', 1)[0]
class EventWrapper:
def __init__(self, obj):
self.obj = obj
@property
def event_id(self) -> int:
return self.obj.id
@property
def event_type(self) -> str:
TYPE_MAP = {
CustomEvent: 'custom',
KeynoteEvent: 'keynote',
ProposedTalkEvent: 'talk',
SponsoredEvent: 'sponsored',
ProposedTutorialEvent: 'tutorial',
}
return TYPE_MAP[type(self.obj)]
@property
def title(self) -> Union[str, dict]:
if isinstance(self.obj, KeynoteEvent):
return {
'zh_hant': self.obj.session_title_zh_hant,
'en_us': self.obj.session_title_en_us,
}
elif isinstance(self.obj, (ProposedTalkEvent, ProposedTutorialEvent)):
return self.obj.proposal.title
else:
return self.obj.title
@property
def speakers(self) -> Union[List[str], List[dict]]:
if isinstance(self.obj, KeynoteEvent):
return [
{
'zh_hant': self.obj.speaker_name_zh_hant,
'en_us': self.obj.speaker_name_en_us,
}
]
if isinstance(self.obj, SponsoredEvent):
return [self.obj.host.speaker_name]
elif isinstance(self.obj, (ProposedTalkEvent, ProposedTutorialEvent)):
speaker_names = [self.obj.proposal.submitter.speaker_name]
if getattr(self.obj, '_additional_speaker_count', 1):
speaker_names.extend(
self.obj.proposal.additionalspeaker_set
.values_list('user__speaker_name', flat=True),
)
return speaker_names
else:
return []
@property
def begin_time(self) -> str:
return self.obj.begin_time.value.strftime('%Y-%m-%d %H:%M:%S')
@property
def end_time(self) -> str:
return self.obj.end_time.value.strftime('%Y-%m-%d %H:%M:%S')
@property
def is_remote(self) -> bool:
if isinstance(self.obj, (KeynoteEvent, ProposedTalkEvent, ProposedTutorialEvent)):
return self.obj.is_remote
else:
return False
@property
def recording_policy(self) -> bool:
if isinstance(self.obj, (KeynoteEvent, CustomEvent)):
return True
elif isinstance(self.obj, SponsoredEvent):
return self.obj.recording_policy
else:
return self.obj.proposal.recording_policy
@property
def break_event(self) -> bool:
if isinstance(self.obj, CustomEvent):
return self.obj.break_event
else:
return False
@property
def language(self) -> str:
if isinstance(self.obj, (ProposedTalkEvent, ProposedTutorialEvent)):
return self.obj.proposal.language
elif isinstance(self.obj, SponsoredEvent):
return self.obj.language
else:
return ''
@property
def python_level(self) -> str:
if isinstance(self.obj, (ProposedTalkEvent, ProposedTutorialEvent)):
return self.obj.proposal.python_level
elif isinstance(self.obj, SponsoredEvent):
return self.obj.python_level
else:
return ''
def display(self):
return {
'event_id': self.event_id,
'event_type': self.event_type,
'title': self.title,
'speakers': self.speakers,
'begin_time': self.begin_time,
'end_time': self.end_time,
'is_remote': self.is_remote,
'recording_policy': self.recording_policy,
'language': self.language,
'python_level': self.python_level,
'break_event': self.break_event,
}
class ScheduleAPIView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
event_querysets = [
CustomEvent.objects.all().exclude(location=Location.OTHER),
KeynoteEvent.objects.all().exclude(location=Location.OTHER),
(
ProposedTalkEvent.objects
.select_related('proposal__submitter')
.annotate(_additional_speaker_count=Count(
'proposal__additionalspeaker_set',
)).exclude(location=Location.OTHER)
),
SponsoredEvent.objects.select_related('host').exclude(location=Location.OTHER),
(
ProposedTutorialEvent.objects
.select_related('proposal__submitter')
.annotate(_additional_speaker_count=Count(
'proposal__additionalspeaker_set',
)).exclude(location=Location.OTHER)
),
]
def get(self, request):
begin_time_event_dict = collections.defaultdict(set)
for qs in self.event_querysets:
for event in qs.select_related('begin_time', 'end_time'):
begin_time_event_dict[event.begin_time].add(event)
day_info_dict = collections.OrderedDict(
(str(date), {
'date': date,
'name': name,
'rooms': set(),
'slots': {},
'timeline': {},
}) for date, name in settings.EVENTS_DAY_NAMES.items()
)
times = list(Time.objects.order_by('value'))
for begin in times:
try:
day_info = day_info_dict[str(begin.value.date())]
except KeyError:
continue
for event in begin_time_event_dict[begin]:
location = event.location
day_info['slots'].setdefault(location, [])
day_info['timeline'].setdefault('begin', event.begin_time)
day_info['timeline'].setdefault('end', event.end_time)
event_obj = EventWrapper(event)
day_info['slots'][location].append(event_obj.display())
day_info['timeline']['begin'] = min(
day_info['timeline']['begin'],
event.begin_time
)
day_info['timeline']['end'] = max(
day_info['timeline']['end'],
event.end_time
)
day_info['rooms'].add(location)
for info in day_info_dict.values():
# Sort rooms.
info['rooms'] = sorted(info['rooms'], key=_room_sort_key)
result = []
for day_info in day_info_dict.values():
day_info['timeline']['begin'] = day_info['timeline']['begin'].value.strftime('%Y-%m-%d %H:%M:%S')
day_info['timeline']['end'] = day_info['timeline']['end'].value.strftime('%Y-%m-%d %H:%M:%S')
result.append(day_info)
return Response({'data': result})
class KeynoteEventListAPIView(ListAPIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = KeynoteEvent.objects.all()
serializer_class = serializers.KeynoteEventSerializer
|
StarcoderdataPython
|
19716
|
# !/usr/bin/env python
# _*_ coding:utf-8 _*_
from django.conf.urls import url
from . import views
urlpatterns = [
# 1. 结算订单 orders/settlement/
url(r'^orders/settlement/$', views.OrdersSettlementView.as_view(), name='settlement'),
# 2. orders/commit/ 提交订单
url(r'^orders/commit/$', views.OrdersCommitView.as_view(), name='commit'),
# 3. 订单成功 -- orders/success/
url(r'^orders/success/$', views.OrdersSuccessView.as_view(), name='sucess'),
]
|
StarcoderdataPython
|
1654389
|
<reponame>KodingKurriculum/level-0-python-coding-interview
from plumbum import cli
class MergeSort(cli.Application):
_list = [8, 100, 99, 5, 15, 99, 85, 15, 25, 5, 99, 97, 10, 35, 36]
def main(self):
if list is None or len(self._list) is 0:
print("List should have at least one element")
return 1
else:
sorted_list = self.sort(self._list)
print(sorted_list)
def sort(self, mylist):
length = len(mylist)
# Base case
if length is 1:
return mylist
# Divide step
if length is 2:
first_half = [mylist[0]]
second_half = [mylist[1]]
else:
half = int(length/2)
first_half = self.sort(mylist[0:half])
second_half = self.sort(mylist[half:])
# Conquer!
f=0 # counter for first array
s=0 # counter for second array
sorted_list = []
while len(first_half) is not 0 \
and len(second_half) is not 0:
if first_half[0] <= second_half[0]:
sorted_list.append(first_half[0])
first_half.remove(first_half[0])
else:
sorted_list.append(second_half[0])
second_half.remove(second_half[0])
# Need this last part, at some point one half will be exhausted. Since we're sorted, simply add the rest.
if len(first_half) is 0:
sorted_list.extend(second_half)
else:
sorted_list.extend(first_half)
return sorted_list
|
StarcoderdataPython
|
3389010
|
<gh_stars>10-100
# Adapted from pytorch examples
from __future__ import print_function
from torch import nn, optim
from railrl.core import logger
import numpy as np
from railrl.pythonplusplus import identity
from railrl.torch.core import PyTorchModule
from railrl.torch.networks import Mlp
import railrl.torch.pytorch_util as ptu
class ReprojectionNetworkTrainer():
def __init__(
self,
train_dataset,
test_dataset,
model,
batch_size=128,
log_interval=0,
lr=1e-3,
**kwargs
):
self.log_interval = log_interval
self.batch_size = batch_size
if ptu.gpu_enabled():
model.cuda()
self.model = model
self.representation_size = model.representation_size
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
self.train_dataset, self.test_dataset = train_dataset, test_dataset
assert self.train_dataset['z'].dtype == np.float32
assert self.test_dataset['z'].dtype ==np.float32
assert self.train_dataset['z_proj'].dtype == np.float32
assert self.test_dataset['z_proj'].dtype == np.float32
self.mse = nn.MSELoss()
def get_batch(self, train=True):
dataset = self.train_dataset if train else self.test_dataset
ind = np.random.randint(0, len(dataset['z']), self.batch_size)
return {
'z': ptu.np_to_var(dataset['z'][ind, :]),
'z_proj': ptu.np_to_var(dataset['z_proj'][ind, :]),
}
def mse_loss(self, z_proj_hat, z_proj):
return self.mse(z_proj_hat, z_proj)
def train_epoch(self, epoch, batches=100):
self.model.train()
mses = []
losses = []
for batch_idx in range(batches):
data = self.get_batch()
z = data["z"]
z_proj = data['z_proj']
self.optimizer.zero_grad()
z_proj_hat = self.model(z)
mse = self.mse_loss(z_proj_hat, z_proj)
loss = mse
loss.backward()
mses.append(mse.data[0])
losses.append(loss.data[0])
self.optimizer.step()
logger.record_tabular("train/epoch", epoch)
logger.record_tabular("train/MSE", np.mean(mses))
logger.record_tabular("train/loss", np.mean(losses))
def test_epoch(self, epoch, save_network=True, batches=100):
self.model.eval()
mses = []
losses = []
for batch_idx in range(batches):
data = self.get_batch(train=False)
z = data["z"]
z_proj = data['z_proj']
z_proj_hat = self.model(z)
mse = self.mse_loss(z_proj_hat, z_proj)
loss = mse
mses.append(mse.data[0])
losses.append(loss.data[0])
logger.record_tabular("test/epoch", epoch)
logger.record_tabular("test/MSE", np.mean(mses))
logger.record_tabular("test/loss", np.mean(losses))
logger.dump_tabular()
if save_network:
logger.save_itr_params(epoch, self.model, prefix='reproj', save_anyway=True)
class ReprojectionNetwork(PyTorchModule):
def __init__(
self,
vae,
hidden_sizes=list([64, 128, 64]),
init_w=1e-3,
hidden_init=ptu.fanin_init,
output_activation=identity,
layer_norm=False,
**kwargs
):
self.save_init_params(locals())
super().__init__()
self.vae = vae
self.representation_size = self.vae.representation_size
self.hidden_init = hidden_init
self.output_activation = output_activation
# self.dist_mu = np.zeros(self.representation_size)
# self.dist_std = np.ones(self.representation_size)
self.dist_mu = self.vae.dist_mu
self.dist_std = self.vae.dist_std
self.relu = nn.ReLU()
self.init_w = init_w
hidden_sizes = list(hidden_sizes)
self.network=Mlp(hidden_sizes,
self.representation_size,
self.representation_size,
layer_norm=layer_norm,
hidden_init=hidden_init,
output_activation=output_activation,
init_w=init_w)
def forward(self, z):
z = z.view(-1, self.representation_size)
return self.network(z)
def __getstate__(self):
d = super().__getstate__()
# Add these explicitly in case they were modified
d["_dist_mu"] = self.dist_mu
d["_dist_std"] = self.dist_std
return d
def __setstate__(self, d):
super().__setstate__(d)
self.dist_mu = d["_dist_mu"]
self.dist_std = d["_dist_std"]
|
StarcoderdataPython
|
1706006
|
import pytest
from _voronoi import recompute_segment_segment_segment_circle_event as bound
from hypothesis import given
from tests.integration_tests.hints import (BoundPortedCircleEventsPair,
BoundPortedSiteEventsPair)
from tests.integration_tests.utils import are_bound_ported_circle_events_equal
from voronoi.events.computers import (
recompute_segment_segment_segment_circle_event as ported)
from . import strategies
@given(strategies.circle_events_pairs, strategies.site_events_pairs,
strategies.site_events_pairs, strategies.site_events_pairs,
strategies.booleans, strategies.booleans, strategies.booleans)
def test_basic(circle_events_pair: BoundPortedCircleEventsPair,
first_sites_pair: BoundPortedSiteEventsPair,
second_sites_pair: BoundPortedSiteEventsPair,
third_sites_pair: BoundPortedSiteEventsPair,
recompute_center_x: bool,
recompute_center_y: bool,
recompute_lower_x: bool) -> None:
bound_circle_event, ported_circle_event = circle_events_pair
first_site_bound, first_site_ported = first_sites_pair
second_site_bound, second_site_ported = second_sites_pair
third_site_bound, third_site_ported = third_sites_pair
try:
bound(bound_circle_event, first_site_bound, second_site_bound,
third_site_bound, recompute_center_x, recompute_center_y,
recompute_lower_x)
except ValueError:
with pytest.raises(ValueError):
ported(ported_circle_event, first_site_ported, second_site_ported,
third_site_ported, recompute_center_x, recompute_center_y,
recompute_lower_x)
else:
ported(ported_circle_event, first_site_ported, second_site_ported,
third_site_ported, recompute_center_x, recompute_center_y,
recompute_lower_x)
assert are_bound_ported_circle_events_equal(bound_circle_event,
ported_circle_event)
|
StarcoderdataPython
|
168498
|
from collections import OrderedDict
import cPickle
import os
def prototype_state():
state = {}
# ----- CONSTANTS -----
# Random seed
state['seed'] = 1234
# Logging level
state['level'] = 'DEBUG'
# Out-of-vocabulary token string
state['oov'] = '<unk>'
# These are end-of-sequence marks
state['end_sym_utterance'] = '</s>'
# Special tokens need to be defined here, because model architecture may adapt depending on these
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = 2 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = 3 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = 4 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = 5 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = 6 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = 7 # voice over symbol <voice_over>
state['off_screen_sym'] = 8 # off screen symbol <off_screen>
state['pause_sym'] = 9 # pause symbol <pause>
# ----- MODEL ARCHITECTURE -----
# If this flag is on, the hidden state between RNNs in subsequences is always initialized to zero.
# Set this to reset all RNN hidden states between 'max_grad_steps' time steps
state['reset_hidden_states_between_subsequences'] = False
# If this flag is on, the maxout activation function will be applied to the utterance decoders output unit.
# This requires qdim_decoder = 2x rankdim
state['maxout_out'] = False
# If this flag is on, a one-layer MLP with linear activation function will applied
# on the utterance decoder hidden state before outputting the distribution over words.
state['deep_utterance_decoder_out'] = True
# If this flag is on, there will be an extra MLP between utterance and dialogue encoder
state['deep_dialogue_encoder_input'] = False
# Default and recommended setting is: tanh.
# The utterance encoder and utterance decoder activation function
state['sent_rec_activation'] = 'lambda x: T.tanh(x)'
# The dialogue encoder activation function
state['dialogue_rec_activation'] = 'lambda x: T.tanh(x)'
# Determines how to input the utterance encoder and dialogue encoder into the utterance decoder RNN hidden state:
# - 'first': initializes first hidden state of decoder using encoders
# - 'all': initializes first hidden state of decoder using encoders,
# and inputs all hidden states of decoder using encoders
# - 'selective': initializes first hidden state of decoder using encoders,
# and inputs all hidden states of decoder using encoders.
# Furthermore, a gating function is applied to the encoder input
# to turn off certain dimensions if necessary.
#
# Experiments show that 'all' is most effective.
state['decoder_bias_type'] = 'all'
# Define the gating function for the three RNNs.
state['utterance_encoder_gating'] = 'GRU' # Supports 'None' and 'GRU'
state['dialogue_encoder_gating'] = 'GRU' # Supports 'None' and 'GRU'
state['utterance_decoder_gating'] = 'GRU' # Supports 'None', 'BOW' (Bag of Words), 'GRU' and 'LSTM'
# If this flag is on, two utterances encoders (one forward and one backward) will be used,
# otherwise only a forward utterance encoder is used.
state['bidirectional_utterance_encoder'] = False
# If this flag is on, there will be a direct connection between utterance encoder and utterance decoder RNNs.
state['direct_connection_between_encoders_and_decoder'] = False
# If this flag is on, there will be an extra MLP between utterance encoder and utterance decoder.
state['deep_direct_connection'] = False
# If the 'direct_connection_between_encoders_and_decoder' is on, then enabling this flag will
# change the model so that it does not use the dialogue encoder (context encoder)
state['disable_dialogue_encoder'] = False
# If this flag is on, the model will collaps to a standard RNN:
# 1) The utterance+dialogue encoder input to the utterance decoder will be zero
# 2) The utterance decoder will never be reset
# Note this model will always be initialized with a hidden state equal to zero.
state['collaps_to_standard_rnn'] = False
# If this flag is on, the utterance decoder will be reset after each end-of-utterance token.
state['reset_utterance_decoder_at_end_of_utterance'] = True
# If this flag is on, the utterance encoder will be reset after each end-of-utterance token.
state['reset_utterance_encoder_at_end_of_utterance'] = False
# ----- HIDDEN LAYER DIMENSIONS -----
# Dimensionality of (word-level) utterance encoder hidden state
state['qdim_encoder'] = 512
# Dimensionality of (word-level) utterance decoder (RNN which generates output) hidden state
state['qdim_decoder'] = 512
# Dimensionality of (utterance-level) context encoder hidden layer
state['sdim'] = 1000
# Dimensionality of low-rank word embedding approximation
state['rankdim'] = 256
# ----- LATENT VARIABLES WITH VARIATIONAL LEARNING -----
# If this flag is on, a Gaussian latent variable is added at the beginning of each utterance.
# The utterance decoder will be conditioned on this latent variable,
# and the model will be trained using the variational lower bound.
# See, for example, the variational auto-encoder by Kingma et al. (2013).
state['add_latent_gaussian_per_utterance'] = False
# This flag will condition the latent variables on the dialogue encoder
state['condition_latent_variable_on_dialogue_encoder'] = False
# This flag will condition the latent variable on the DCGM (mean pooling over words) encoder.
# This will replace the conditioning on the utterance encoder.
# If the flag is false, the latent variable will be conditioned on the utterance encoder RNN.
state['condition_posterior_latent_variable_on_dcgm_encoder'] = False
# Dimensionality of Gaussian latent variable, which has diagonal covariance matrix.
state['latent_gaussian_per_utterance_dim'] = 10
# This is a constant by which the diagonal covariance matrix is scaled.
# By setting it to a high number (e.g. 1 or 10),
# the KL divergence will be relatively low at the beginning of training.
state['scale_latent_gaussian_variable_variances'] = 10
state['min_latent_gaussian_variable_variances'] = 0.01
state['max_latent_gaussian_variable_variances'] = 10.0
# If on, will make apply a one-layer MLP to transform the input before computing the prior
# and posterior of the Gaussian latent variable.
state['deep_latent_gaussian_variable_conditioning'] = True
# If this flag is on, the utterance decoder will ONLY be conditioned on the Gaussian latent variable.
state['condition_decoder_only_on_latent_variable'] = False
# If this flag is on, a piecewise latent variable is added at the beginning of each utterance.
# The utterance decoder will be conditioned on this latent variable,
# and the model will be trained using the variational lower bound.
# See, for example, the variational auto-encoder by Kingma et al. (2013).
state['add_latent_piecewise_per_utterance'] = False
# If this flag is on, the posterior piecewise distribution will be interpolated
# with the prior distribution using a linear gating mechanism.
state['gate_latent_piecewise_per_utterance'] = True
state['latent_piecewise_alpha_variables'] = 5
# This is a constant by which the prior piecewise alpha parameters are scaled.
# By setting it to a number in the range (2.0, 10) the piecewise posterior distributions will
# be free to change appropriately to accomodate the real posterior,
# while still leaving some probability mass around 0.5 for the variable to change.
# With scale_latent_piecewise_variable_alpha=10, KL divergence cost is about 10% of overall cost initially.
# With scale_latent_piecewise_variable_alpha=1, KL divergence cost is about 1% of overall cost initially.
state['scale_latent_piecewise_variable_alpha_use_softplus'] = True
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['latent_piecewise_per_utterance_dim'] = 10
# If parameter tying is enabled, a Gaussian convolution is applied to all the the alpha values.
# This makes the alpha values dependent upon each other, and guarantees that a single sample
# will update the weight of all the alpha values with higher gradients to nearby values.
# Julian: This only helped slightly in my intial experiments.
state['latent_piecewise_variable_alpha_parameter_tying'] = False
state['latent_piecewise_variable_alpha_parameter_tying_beta'] = 1.0
# If on, will make apply a one-layer MLP to transform the input before computing the prior
# and posterior of the piecewise latent variable.
state['deep_latent_piecewise_variable_conditioning'] = True
# If this flag is on, the input to the utterance decoder will be passed through
# a one-layer MLP with rectified linear units.
# If batch normalization or layer normalization is on,
# this will also ensure that the inputs to the decoder RNN are normalized.
state['deep_utterance_decoder_input'] = True
# If this flag is on, the KL-divergence term weight for the latent variables
# will be slowly increased from zero to one.
state['train_latent_variables_with_kl_divergence_annealing'] = False
# The KL-divergence term weight is increased by this parameter for every training batch.
# It is truncated to one. For example, 1.0/60000.0 means that at iteration 60000 the model
# will assign weight one to the KL-divergence term (assuming kl_divergence_max_weight=1.0)
# and thus only be maximizing the true variational bound from iteration 60000 and onward.
state['kl_divergence_annealing_rate'] = 1.0/60000.0
# The maximum KL-divergence term weight allowed. Only applies to models with annealed KL-divergence.
state['kl_divergence_max_weight'] = 1.0
# If this flag is enabled, previous token input to the decoder RNN is replaced with 'unk' tokens at random.
state['decoder_drop_previous_input_tokens'] = False
# The rate at which the previous tokesn input to the decoder is kept (not set to 'unk').
# Setting this to zero effectively disables teacher-forcing in the model.
state['decoder_drop_previous_input_tokens_rate'] = 0.75
# If this flag is enabled, mean field inference with stochastic gradient descent is applied during test time.
# Julian: This didn't really make a big difference...
state['apply_meanfield_inference'] = False
# Word embedding initialization
state['initialize_from_pretrained_word_embeddings'] = False
state['pretrained_word_embeddings_file'] = ''
state['fix_pretrained_word_embeddings'] = False
# If this flag is on, the model will fix the parameters of the utterance encoder and dialogue encoder RNNs,
# as well as the word embeddings. NOTE: NOT APPLICABLE when the flag 'collaps_to_standard_rnn' is on.
state['fix_encoder_parameters'] = False
# If this flag is disabled, the model will not generate the first utterance in a dialogue.
# This is used for the debate dataset as well as the skip_utterance configuration.
state['do_generate_first_utterance'] = True
# If this flag is enabled, the data iterator is changed so that the model is conditioned
# on exactly one utterance and predicts only one utterance; the utterance to predict is
# either the next utterance or the previous utterance in the dialogue.
# When this flag is on, it forces the 'do_generate_first_utterance' to be off.
state['skip_utterance'] = False
# If 'skip_utterance' flag is enabled together with this flag, the data iterator is changed so
# that the model always predicts both the previous and next utterances.
# Note, this will double the batch size!
state['skip_utterance_predict_both'] = False
# ----- TRAINING PROCEDURE -----
# Choose optimization algorithm (adam works well most of the time)
state['updater'] = 'adam'
# If this flag is on, NCE (Noise-Contrastive Estimation) will be used to train model.
# This is significantly faster for large vocabularies (e.g. more than 20K words),
# but experiments show that this degrades performance.
state['use_nce'] = False
# Threshold to clip the gradient
state['cutoff'] = 0.01
# Learning rate. The rate 0.0002 seems to work well across many tasks with adam.
# Alternatively, the learning rate can be adjusted down (e.g. 0.00004)
# to at the end of training to help the model converge well.
state['lr'] = 0.0002
# Early stopping configuration
state['patience'] = 20
state['cost_threshold'] = 1.003
# Batch size. If out of memory, modify this!
state['bs'] = 80
# Sort by length groups of
state['sort_k_batches'] = 20
# Training examples will be split into subsequences.
# This parameter controls the maximum size of each subsequence.
# Gradients will be computed on the subsequence, and the last hidden state of all RNNs will
# be used to initialize the hidden state of the RNNs in the next subsequence.
state['max_grad_steps'] = 80
# Modify this in the prototype
state['save_dir'] = './'
# Frequency of training error reports (in number of batches)
state['train_freq'] = 10
# Validation frequency
state['valid_freq'] = 5000
# Number of batches to process
state['loop_iters'] = 3000000
# Maximum number of minutes to run
state['time_stop'] = 24*60*31
# Error level to stop at
state['minerr'] = -1
# Maximum dialogue length
state['max_len'] = -1
# The model can apply several normalization operators to the encoder hidden states:
# 'NONE': No normalization is applied.
# 'BN': Batch normalization is applied.
# 'LN': Layer normalization is applied.
#
# Note the normalization operators can only be applied to GRU encoders and feed-forward neural networks.
state['normop_type'] = 'LN'
if state['normop_type'] == 'BN':
state['normop_gamma_init'] = 0.1
state['normop_gamma_min'] = 0.05
state['normop_gamma_max'] = 10.0
state['normop_moving_average_const'] = 0.99
state['normop_max_enc_seq'] = 50
else:
state['normop_gamma_init'] = 1.0
state['normop_gamma_min'] = 0.05
state['normop_gamma_max'] = 10.0
state['normop_moving_average_const'] = 0.99
state['normop_max_enc_seq'] = 1
# Parameters for initializing the training data iterator.
# The first is the first offset position in the list examples.
# The second is the number of reshuffles to perform at the beginning.
state['train_iterator_offset'] = 0
state['train_iterator_reshuffle_count'] = 1
return state
def prototype_test():
state = prototype_state()
# Fill paths here!
state['train_dialogues'] = "./tests/data/ttrain.dialogues.pkl"
state['test_dialogues'] = "./tests/data/ttest.dialogues.pkl"
state['valid_dialogues'] = "./tests/data/tvalid.dialogues.pkl"
state['dictionary'] = "./tests/data/ttrain.dict.pkl"
state['save_dir'] = "./tests/models/"
state['max_grad_steps'] = 20
# Handle pretrained word embeddings. Using this requires rankdim=10
state['initialize_from_pretrained_word_embeddings'] = False
state['pretrained_word_embeddings_file'] = './tests/data/MT_WordEmb.pkl'
state['fix_pretrained_word_embeddings'] = False
state['valid_freq'] = 50
state['prefix'] = "testmodel_"
state['updater'] = 'adam'
state['maxout_out'] = False
state['deep_utterance_decoder_out'] = True
state['deep_dialogue_encoder_input'] = True
state['utterance_encoder_gating'] = 'GRU'
state['dialogue_encoder_gating'] = 'GRU'
state['utterance_decoder_gating'] = 'GRU'
state['bidirectional_utterance_encoder'] = True
state['direct_connection_between_encoders_and_decoder'] = True
state['bs'] = 5
state['sort_k_batches'] = 1
state['use_nce'] = False
state['decoder_bias_type'] = 'all'
state['qdim_encoder'] = 15
state['qdim_decoder'] = 5
state['sdim'] = 10
state['rankdim'] = 10
return state
def prototype_test_variational():
state = prototype_state()
# Fill paths here!
state['train_dialogues'] = "./tests/data/ttrain.dialogues.pkl"
state['test_dialogues'] = "./tests/data/ttest.dialogues.pkl"
state['valid_dialogues'] = "./tests/data/tvalid.dialogues.pkl"
state['dictionary'] = "./tests/data/ttrain.dict.pkl"
state['save_dir'] = "./tests/models/"
state['max_grad_steps'] = 20
# Handle pretrained word embeddings. Using this requires rankdim=10
state['initialize_from_pretrained_word_embeddings'] = True
state['pretrained_word_embeddings_file'] = './tests/data/MT_WordEmb.pkl'
state['valid_freq'] = 5
state['prefix'] = "testmodel_"
state['updater'] = 'adam'
state['maxout_out'] = False
state['deep_utterance_decoder_out'] = True
state['deep_dialogue_encoder_input'] = True
state['direct_connection_between_encoders_and_decoder'] = False
state['deep_direct_connection'] = False
state['utterance_encoder_gating'] = 'GRU'
state['dialogue_encoder_gating'] = 'GRU'
state['utterance_decoder_gating'] = 'LSTM'
state['bidirectional_utterance_encoder'] = False
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 5
state['condition_latent_variable_on_dialogue_encoder'] = True
state['condition_posterior_latent_variable_on_dcgm_encoder'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 10
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['bs'] = 5
state['sort_k_batches'] = 1
state['use_nce'] = False
state['decoder_bias_type'] = 'all'
state['qdim_encoder'] = 15
state['qdim_decoder'] = 5
state['sdim'] = 10
state['rankdim'] = 10
state['gate_latent_piecewise_per_utterance'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_max_weight'] = 0.5
# KL max-trick
#state['train_latent_variables_with_kl_divergence_annealing'] = False
#state['max_kl_percentage'] = 0.01
return state
###
### Twitter - Hyperparameter search for HRED:
###
# sdim = {500, 1000}
# qdim_encoder = {1000}
# qdim_decoder = {1000, 2000, 4000}
# rankdim = 400
# bidirectional_utterance_encoder = True
# reset_utterance_encoder_at_end_of_utterance = False
# reset_utterance_decoder_at_end_of_utterance = True
# lr = 0.0002
# bs = 80
# normop_type = 'LN'
def prototype_twitter_HRED_NormOp_ClusterExp1():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 500
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = False
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = False
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_HRED_NormOp_ClusterExp2():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = False
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = False
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_HRED_NormOp_ClusterExp3():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = False
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = False
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_HRED_NormOp_ClusterExp4():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = False
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = False
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_HRED_NormOp_ClusterExp5():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 2000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = False
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = False
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
###
### Twitter - Hyperparameter search for Gaussian VHRED:
###
# sdim = {500, 1000}
# qdim_encoder = {1000}
# qdim_decoder = {1000, 2000, 4000}
# rankdim = 400
# latent_gaussian_per_utterance_dim = {100, 300}
# bidirectional_utterance_encoder = True
# reset_utterance_encoder_at_end_of_utterance = False
# reset_utterance_decoder_at_end_of_utterance = True
# lr = 0.0002
# bs = 80
# normop_type = 'LN'
def prototype_twitter_GaussOnly_VHRED_NormOp_ClusterExp1():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 500
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_GaussOnly_VHRED_NormOp_ClusterExp2():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_GaussOnly_VHRED_NormOp_ClusterExp3():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_GaussOnly_VHRED_NormOp_ClusterExp4():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_GaussOnly_VHRED_NormOp_ClusterExp5():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 300
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
###
### Twitter - Hyperparameter search for Piecewise-Gaussian VHRED:
###
# sdim = {500, 1000}
# qdim_encoder = {1000}
# qdim_decoder = {1000, 2000, 4000}
# rankdim = 400
# latent_gaussian_per_utterance_dim = {100, 300}
# latent_piecewise_per_utterance_dim = {100, 300}
# gate_latent_piecewise_per_utterance = {False, True}
# bidirectional_utterance_encoder = True
# reset_utterance_encoder_at_end_of_utterance = False
# reset_utterance_decoder_at_end_of_utterance = True
# lr = 0.0002
# bs = 80
# normop_type = 'LN'
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp1():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 500
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp2():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp3():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp4():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp5():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 300
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 300
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp6():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
state['gate_latent_piecewise_per_utterance'] = False
return state
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp7():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
state['gate_latent_piecewise_per_utterance'] = False
return state
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp8():
state = prototype_state()
# Fill your paths here!
state['train_dialogues'] = "../TwitterDataBPE/Train.dialogues.pkl"
state['test_dialogues'] = "../TwitterDataBPE/Test.dialogues.pkl"
state['valid_dialogues'] = "../TwitterDataBPE/Valid.dialogues.pkl"
state['dictionary'] = "../TwitterDataBPE/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = "TwitterModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all' # Choose between 'first', 'all' and 'selective'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 300
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 300
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/60000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
state['gate_latent_piecewise_per_utterance'] = False
return state
###
### Ubuntu - Hyperparameter search for (Gaussian/Piecewise) VHRED on Ubuntu:
###
### sdim = 1000
### qdim_encoder = 1000
### qdim_decoder = 2000
### rankdim = 400
### deep_utterance_decoder_input={False,True}
###
###
### bidirectional_utterance_encoder = True
### reset_utterance_encoder_at_end_of_utterance = False
### reset_utterance_decoder_at_end_of_utterance = True
### lr = 0.0002
### bs = 80
### normop_type = 'LN'
###
### For latent models, we also experiment with kl_divergence_max_weight={0.25, 0.50, 0.75}
### NOTE: In this case, we early stop according to the reweighted lower bound!
###
###
# This is the Ubuntu HRED baseline used in "Piecewise Latent Variables for Neural Variational Text Processing" by Serban et al.
# It achieved best performance w.r.t. F1 activity performance on the validation set among all HRED baseline models
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Baseline_Exp1():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = False
state['patience'] = 20
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Baseline_Exp2():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp1():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = False
state['patience'] = 20
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp2():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = False
state['patience'] = 20
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp3():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = False
state['patience'] = 20
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp4():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
return state
# This is the Ubuntu P-VHRED model used in "Piecewise Latent Variables for Neural Variational Text Processing" by Serban et al.
# It achieved best performance w.r.t. F1 activity performance on the validation set among all P-VHRED models
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp5():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp6():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
return state
# This is the Ubuntu G-VHRED model used in "Piecewise Latent Variables for Neural Variational Text Processing" by <NAME>.
# It achieved best performance w.r.t. F1 activity performance on the validation set among all G-VHRED models
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp7():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.25
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp8():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.25
return state
# This is the Ubuntu H-VHRED model used in "Piecewise Latent Variables for Neural Variational Text Processing" by Serban et al.
# It achieved best performance w.r.t. F1 activity performance on the validation set among all H-VHRED models
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp9():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.25
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp10():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.5
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp11():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.5
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp12():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.5
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp13():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.75
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp14():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.75
return state
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp15():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = -1 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = -1 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = -1 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = -1 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = -1 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = -1 # voice over symbol <voice_over>
state['off_screen_sym'] = -1 # off screen symbol <off_screen>
state['pause_sym'] = -1 # pause symbol <pause>
state['train_dialogues'] = "../UbuntuData/Training.dialogues.pkl"
state['test_dialogues'] = "../UbuntuData/Test.dialogues.pkl"
state['valid_dialogues'] = "../UbuntuData/Validation.dialogues.pkl"
state['dictionary'] = "../UbuntuData/Dataset.dict.pkl"
state['save_dir'] = "Output"
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = "UbuntuModel_"
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
# Latent variable configuration
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = 1.0/75000.0
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.75
return state
|
StarcoderdataPython
|
1611442
|
<reponame>manuelmusngi/machine_learning_algorithms_for_development<filename>preprocessing_template.py
# Preprocessing Template
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import dataset
dataset = pd.read_('')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
# Split dataset into Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
|
StarcoderdataPython
|
3338552
|
def parse_currency_to_eur(quantity, currency):
"""
Translate a given quantity of a determinate currency to its equivalent in euros.
:param quantity: quantity of the currency
:param currency: the indentifier of the currency
:return: the quantity in euros
"""
euros = -1
if currency == 'zł':
euros = quantity * 0.23
return round(euros, 2)
|
StarcoderdataPython
|
1758201
|
<reponame>Naman-Bhalla/os-file-system-python<gh_stars>0
from file_system import FileSystem
from user import User
class OS:
def __init__(self):
self.open_files_table = {}
self.process_files_table = {}
self.users = set()
self.system_user = User()
self.users.add(self.system_user)
self.root_file_system = FileSystem()
def new_user(self):
user = User()
self.users.add(user)
return user
def open(self, file_name, mode):
pass
def close(self, file_name):
pass
|
StarcoderdataPython
|
3262757
|
import asyncio
import functools
import logging
import multiprocessing
import os
import textwrap
import time
from queue import Empty as QueueEmpty
import tweepy
import discord
import discord.ext.commands as commands
import discord.utils as dutils
from discord.ext.commands.formatter import Paginator
import paths
from .util import config, oembed
log = logging.getLogger(__name__)
def setup(bot):
log.info('Loading extension.')
# Delete irrelevant sub-process logs
for entry in os.scandir(paths.LOGS):
if entry.is_file() and 'twitter' in entry.name:
os.remove(entry.path)
cog = Twitter(bot)
bot.add_cog(cog)
asyncio.ensure_future(cog.stream.start(), loop=bot.loop)
class TwitterError(commands.CommandError):
pass
class TwitterConfig(config.ConfigElement):
def __init__(self, credentials, **kwargs):
self.credentials = credentials
self.follows = kwargs.pop('follows', [])
def remove_channels(self, *channels):
"""Unregister the given channels from every FollowConfig, and
removes any FollowConfig that end up without any channel.
"""
channels = set(c.id for c in channels)
conf_to_remove = set()
# Check every FollowConfig
for chan_conf in self.follows:
if set(c.id for c in chan_conf.discord_channels) & channels:
# Remove the given channels from this FollowConfig
dchans_to_remove = set(c for c in chan_conf.discord_channels if c.id in channels)
chan_conf.discord_channels = [c for c in chan_conf.discord_channels if c not in dchans_to_remove]
# If this FollowConfig ended up with 0 channel, save it to remove it later
if not chan_conf.discord_channels:
conf_to_remove.add(chan_conf)
if conf_to_remove:
self.follows = [c for c in self.follows if c not in conf_to_remove]
class TwitterCredentials(config.ConfigElement):
def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
class FollowConfig(config.ConfigElement):
def __init__(self, id, screen_name, **kwargs):
self.id = id
self.screen_name = screen_name
self.discord_channels = kwargs.pop('discord_channels', [])
self.latest_received = kwargs.pop('latest_received', 0)
class ChannelConfig(config.ConfigElement):
def __init__(self, id, **kwargs):
self.id = id
self.received_count = kwargs.pop('received_count', 0)
class Twitter:
"""Twitter related commands.
Powered by tweepy (https://github.com/tweepy/tweepy)
"""
def __init__(self, bot):
self.bot = bot
self.conf = config.Config(paths.TWITTER_CONFIG, encoding='utf-8')
self.api = TweepyAPI(self.conf.credentials)
self.stream = TweepyStream(self, self.conf, self.api)
self.processed_tweets = 0
def __unload(self):
log.info('Unloading cog.')
self.stream.quit()
async def on_command_error(self, error, ctx):
if isinstance(error, TwitterError):
try:
await ctx.bot.send_message(ctx.message.channel, error)
except discord.Forbidden:
warning = 'Missing the `Send Messages` permission to send the following error to {}: {}'.format(ctx.message.channel.mention, error)
log.warning(warning)
await ctx.bot.send_message(ctx.message.author, warning)
async def on_ready(self):
# Check if we've missed any tweet
await self.fetch_missed_tweets()
async def on_channel_delete(self, channel):
if channel.server is not None:
self.conf.remove_channels(channel)
self.conf.save()
await self.stream.start()
async def on_server_remove(self, server):
self.conf.remove_channels(*server.channels)
self.conf.save()
await self.stream.start()
@commands.group(name='twitter')
async def twitter_group(self):
pass
@twitter_group.command(name='fetch', pass_context=True, no_pm=True)
async def twitter_fetch(self, ctx, handle, limit: int=1):
"""Retrieves the latest tweets from a channel and displays them.
You do not need to include the '@' before the Twitter channel's
handle, it will avoid unwanted mentions in Discord.
If a limit is given, at most that number of tweets will be displayed. Defaults to 1.
"""
sane_handle = handle.lower().lstrip('@')
# Get the latest tweets from the user
try:
to_display = await self.get_latest_valid(screen_name=sane_handle, limit=limit)
except tweepy.TweepError as e:
# The channel is probably protected
if e.reason == 'Not authorized.':
raise TwitterError('This channel is protected, its tweets cannot be fetched.') from e
if e.api_code == 34:
raise TwitterError('User "{}" not found.'.format(handle)) from e
else:
log.error(str(e))
raise TwitterError('Unknown error from the Twitter API, this has been logged.') from e
# Display the kept tweets
for tweet in to_display:
embed = await self.prepare_embed(tweet)
await self.bot.say(embed=embed)
@twitter_group.command(name='follow', pass_context=True, no_pm=True)
@commands.has_permissions(manage_server=True)
async def twitter_follow(self, ctx, handle):
"""Follows a Twitter channel.
The tweets from the given Twitter channel will be
sent to the channel this command was used in.
You do not need to include the '@' before the Twitter channel's
handle, it will avoid unwanted mentions in Discord.
Following protected users is not supported by the Twitter API.
See https://dev.twitter.com/streaming/overview/request-parameters#follow
"""
discord_channel = ctx.message.channel
# Check for required permissions
if not discord_channel.permissions_for(discord_channel.server.me).embed_links:
raise TwitterError('\N{WARNING SIGN} The `Embed Links` permission in this channel is required to display tweets properly. \N{WARNING SIGN}')
sane_handle = handle.lower().lstrip('@')
conf = dutils.get(self.conf.follows, screen_name=sane_handle)
if conf is None:
# New Twitter channel, retrieve the user info
partial = functools.partial(self.api.get_user, screen_name=sane_handle)
try:
user = await self.bot.loop.run_in_executor(None, partial)
except tweepy.TweepError as e:
if e.api_code == 50:
raise TwitterError('User "{}" not found.'.format(handle)) from e
else:
log.error(str(e))
raise TwitterError('Unknown error, this has been logged.') from e
# The Twitter API does not support following protected users
# https://dev.twitter.com/streaming/overview/request-parameters#follow
if user.protected:
raise TwitterError('This channel is protected and cannot be followed.')
# Register the new channel
conf = FollowConfig(user.id_str, user.screen_name)
self.conf.follows.append(conf)
try:
# Restart the stream
await self.stream.start()
except tweepy.TweepError as e:
self.conf.follows.remove(conf)
log.error(str(e))
raise TwitterError('Unknown error, this has been logged.') from e
elif dutils.get(conf.discord_channels, id=discord_channel.id):
raise TwitterError('Already following {} on this channel.'.format(handle))
# Add new Discord channel
conf.discord_channels.append(ChannelConfig(discord_channel.id))
self.conf.save()
await self.bot.say('\N{OK HAND SIGN}')
@twitter_group.command(name='search')
async def twitter_search(self, query, limit=5):
"""Searches for a Twitter user.
To use a multi-word query, enclose it in quotes.
"""
try:
results = await self.bot.loop.run_in_executor(None, self.api.search_users, query, limit)
except tweepy.TweepError as e:
log.error(str(e))
raise TwitterError('Unknown error from the Twitter API, this has been logged.') from e
if not results:
raise TwitterError('No result.')
embed = discord.Embed(colour=0x738bd7)
for user in results:
name = '{} - @{}'.format(user.name, user.screen_name)
description = textwrap.shorten(user.description, 1024) if user.description else 'No description.'
embed.add_field(name=name, value=description, inline=False)
await self.bot.say(embed=embed)
@twitter_group.command(name='status', pass_context=True, no_pm=True)
async def twitter_status(self, ctx):
"""Displays the status of the Twitter stream."""
server_channels = set(c.id for c in ctx.message.server.channels)
followed_count = 0
displayed_count = 0
for chan_conf in self.conf.follows:
# Check if this channel is displayed in the server
if set(c.id for c in chan_conf.discord_channels) & server_channels:
followed_count += 1
displayed_count += sum(c.received_count for c in chan_conf.discord_channels if c.id in server_channels)
# Calculate the average tweets processed per minute
minutes = (time.time() - self.bot.start_time) / 60
processed_average = self.processed_tweets / minutes
processed_average = '< 1' if processed_average < 1 else round(processed_average)
tweets_processed = '{} (avg {} / min)'.format(self.processed_tweets, processed_average)
# Display the info
if self.stream.running:
embed = discord.Embed(title='Stream status', description='Online', colour=0x00ff00)
else:
embed = discord.Embed(title='Stream status', description='Offline', colour=0xff0000)
embed.add_field(name='Tweets processed since startup', value=tweets_processed, inline=False)
embed.add_field(name='Channels followed', value=followed_count)
embed.add_field(name='Tweets displayed', value=displayed_count)
await self.bot.say(embed=embed)
@twitter_group.command(name='unfollow', pass_context=True, no_pm=True)
@commands.has_permissions(manage_server=True)
async def twitter_unfollow(self, ctx, handle):
"""Unfollows a Twitter channel.
The tweets from the given Twitter channel will not be
sent to the channel this command was used in anymore.
You do not need to include the '@' before the Twitter channel's
handle, it will avoid unwanted mentions in Discord.
"""
sane_handle = handle.lower().lstrip('@')
conf = dutils.get(self.conf.follows, screen_name=sane_handle)
chan_conf = dutils.get(conf.discord_channels, id=ctx.message.channel.id) if conf is not None else None
if chan_conf is None:
raise TwitterError('Not following {} on this channel.'.format(handle))
# Remove the Discord channel from the Twitter channel conf
conf.discord_channels.remove(chan_conf)
if not conf.discord_channels:
# If there are no more Discord channel to feed, unfollow the Twitter channel
self.conf.follows.remove(conf)
del conf
# Update the tweepy stream
if len(self.conf.follows) > 0:
await self.stream.start()
else:
self.stream.stop()
self.conf.save()
await self.bot.say('\N{OK HAND SIGN}')
async def get_latest_valid(self, channel_id=None, screen_name=None, limit=0, since_id=0):
if since_id == 0:
# Because we could potentially end up fetching thousands of tweets here, let's force a limit
limit = limit or 3
partial = functools.partial(self.api.user_timeline, user_id=channel_id, screen_name=screen_name, exclude_replies=True, include_rts=True)
else:
partial = functools.partial(self.api.user_timeline, user_id=channel_id, screen_name=screen_name, exclude_replies=True, include_rts=True, since_id=since_id)
latest = await self.bot.loop.run_in_executor(None, partial)
valid = [t for t in latest if not self.skip_tweet(t, from_stream=False)]
valid.sort(key=lambda t: t.id)
return valid[-limit:]
async def fetch_missed_tweets(self):
missed = []
# Gather the missed tweets
for chan_conf in self.conf.follows:
latest = await self.get_latest_valid(chan_conf.id, since_id=chan_conf.latest_received)
if latest:
log.info('Found {} tweets to display for @{}'.format(len(latest), chan_conf.screen_name))
missed.extend(latest)
missed.sort(key=lambda t: t.id)
for tweet in missed:
await self.tweepy_on_status(tweet)
def prepare_tweet(self, tweet, nested=False):
if isinstance(tweet, dict):
tweet = tweepy.Status.parse(self.api, tweet)
tweet.tweet_web_url = 'https://twitter.com/i/web/status/{}'.format(tweet.id)
tweet.tweet_url = 'https://twitter.com/{}/status/{}'.format(tweet.author.screen_name, tweet.id)
urls = tweet.entities.get('urls', [])
media = tweet.entities.get('media', [])
if not nested and tweet.is_quote_status:
tweet.quoted_status = self.prepare_tweet(tweet.quoted_status, nested=True)
sub_tweet = tweet.quoted_status
elif not nested and hasattr(tweet, 'retweeted_status'):
tweet.retweeted_status = self.prepare_tweet(tweet.retweeted_status, nested=True)
sub_tweet = tweet.retweeted_status
else:
sub_tweet = None
# Remove the links to the attached media
for medium in media:
tweet.text = tweet.text.replace(medium['url'], '')
# Replace links in the tweet with the expanded url for lisibility
for url in urls.copy():
if url['url'] is None or url['url'] == '' \
or url['expanded_url'] is None or url['expanded_url'] == '':
# Because receiving something like this is possible:
# "urls": [ {
# "indices": [ 141, 141 ],
# "url": "",
# "expanded_url": null
# } ],
urls.remove(url)
elif url['expanded_url'] == tweet.tweet_url \
or url['expanded_url'] == tweet.tweet_web_url \
or (sub_tweet is not None and (url['expanded_url'] == sub_tweet.tweet_url
or url['expanded_url'] == sub_tweet.tweet_web_url)):
tweet.text = tweet.text.replace(url['url'], '').strip()
urls.remove(url)
else:
tweet.text = tweet.text.replace(url['url'], url['expanded_url']).strip()
# Avoid retweets without text to cause the embed to be illegal
if not tweet.text:
tweet.text = '\N{ZERO WIDTH SPACE}'
return tweet
async def prepare_embed(self, tweet):
tweet = self.prepare_tweet(tweet)
author = tweet.author
author_url = 'https://twitter.com/{}'.format(author.screen_name)
# Build the embed
embed = discord.Embed(colour=discord.Colour(int(author.profile_link_color, 16)),
title=author.name,
url=tweet.tweet_url,
timestamp=tweet.created_at)
embed.set_author(name='@{}'.format(author.screen_name), icon_url=author.profile_image_url, url=author_url)
# Check for retweets and quotes to format the tweet
if tweet.is_quote_status:
sub_tweet = tweet.quoted_status
embed.description = tweet.text
embed.add_field(name='Retweet from @{} :'.format(sub_tweet.author.screen_name), value=sub_tweet.text)
elif hasattr(tweet, 'retweeted_status'):
sub_tweet = tweet.retweeted_status
embed.add_field(name='Retweet from @{} :'.format(sub_tweet.author.screen_name), value=sub_tweet.text)
else:
sub_tweet = tweet
embed.description = tweet.text
# Parse the tweet's entities to extract media and include them as the embed's image
urls = sub_tweet.entities.get('urls', [])
media = sub_tweet.entities.get('media', [])
if media:
embed.set_image(url=media[0]['media_url_https'])
elif urls:
# Fetch oembed data from the url and use it as the embed's image
url = urls[0]['expanded_url']
try:
data = await oembed.fetch_oembed_data(url)
except Exception as e:
log.warning(str(e))
else:
if data['type'] == 'photo':
embed.set_image(url=data['url'])
else:
embed.set_image(url=data.get('thumbnail_url', None) or data.get('url', None))
return embed
def skip_tweet(self, status, from_stream=True):
"""Returns True if the given Twitter status is to be skipped."""
log_status = 'author: {}, reply_to_status: {}, reply_to_user: {}, quoting: {}, retweet: {}, text: {}'
log_status = log_status.format(status.author.screen_name,
status.in_reply_to_status_id,
status.in_reply_to_user_id,
status.is_quote_status,
hasattr(status, 'retweeted_status'),
status.text)
# Ignore replies
if status.in_reply_to_status_id or status.in_reply_to_user_id:
log.debug('Ignoring tweet (reply): ' + log_status)
return True
elif from_stream and status.author.id_str not in self.stream.get_follows():
log.debug('Ignoring tweet (bad author): ' + log_status)
return True
else:
log.debug('Dispatching tweet to handler: ' + log_status)
return False
async def tweepy_on_status(self, tweet):
"""Called by the stream when a tweet is received."""
self.processed_tweets += 1
if self.skip_tweet(tweet):
return
chan_conf = dutils.get(self.conf.follows, id=tweet.author.id_str)
try:
embed = await self.prepare_embed(tweet)
content = None
except:
embed = None
content = 'Failed to prepare embed for ' + tweet.tweet_web_url # If the preparation failed before setting tweet.tweet_web_url imma kms
log.error('Failed to prepare embed for ' + str(tweet._json))
# Make sure we're ready to send messages
await self.bot.wait_until_ready()
for channel in chan_conf.discord_channels:
discord_channel = self.bot.get_channel(channel.id)
# Check if the channel still exists
if discord_channel is None:
log.error('Channel {} unavailable to display tweet {}.'.format(discord_channel.id, tweet.id_str))
continue
# Check for required permissions
perms = discord_channel.permissions_for(discord_channel.server.me)
if not perms.embed_links:
log.warning('Improper permissions in channel {} to display tweet {}.'.format(discord_channel.id, tweet.id_str))
try:
warning = '\N{WARNING SIGN} Missed tweet from {} : `Embed links` permission missing. \N{WARNING SIGN}'.format(tweet.author.screen_name)
await self.bot.send_message(discord_channel, warning)
except discord.DiscordException as e:
log.error('Could not send warning to channel {}.\n{}'.format(discord_channel.id, e))
continue
# Send the embed to the appropriate channel
log.debug('Scheduling Discord message on channel ({}) : {}'.format(channel.id, tweet.text))
await self.bot.send_message(discord_channel, content=content, embed=embed)
# Update stats and latest id when processing newer tweets
if tweet.id > chan_conf.latest_received:
channel.received_count += 1
chan_conf.latest_received = tweet.id
self.conf.save()
class TweepyAPI(tweepy.API):
"""Auto login tweepy api object."""
def __init__(self, conf):
tweepy.API.__init__(self, wait_on_rate_limit=True)
self.auth = tweepy.OAuthHandler(conf.consumer_key, conf.consumer_secret)
self.auth.set_access_token(conf.access_token, conf.access_token_secret)
log.info('Logged in Twitter as {username}'.format(username=self.verify_credentials().screen_name))
class SubProcessStream(multiprocessing.Process):
"""Aggregation of things to run a tweepy stream in a sub-process."""
def __init__(self, mp_queue, credentials, follows, *args, **kwargs):
self.mp_queue = mp_queue
self.credentials = credentials
self.follows = follows
super().__init__(*args, name='Tweepy Stream', target=self.run_tweepy, **kwargs)
def run_tweepy(self):
"""The entry point of the sub-process.
The tweepy.API object isn't pickable so let's just re-create it in the sub-process.
The tweepy.StreamListener instance then has to be created here too as a separate object instead of
this class inheriting from it.
Finally tweepy.Stream has to be instantiated here too to register the listener.
This feels kinda ugly.
"""
# Setup the logging for the sub-process
rlog = logging.getLogger()
rlog.setLevel(logging.INFO)
handler = logging.FileHandler(paths.TWITTER_SUBPROCESS_LOG.format(pid=os.getpid()), encoding='utf-8')
handler.setFormatter(logging.Formatter('{asctime} {levelname} {name} {message}', style='{'))
rlog.addHandler(handler)
# Do not join the queue's bg thread on exit
self.mp_queue.cancel_join_thread()
# Create the tweepy stream
log.info('Creating and starting tweepy stream.')
api = TweepyAPI(self.credentials) # Re-creation, much efficient, wow
listener = SubProcessStream.TweepyListener(self.mp_queue, api)
stream = tweepy.Stream(api.auth, listener)
log.info('Tweepy stream ready.')
# ERMAHGERD ! MAH <NAME> !
while True:
try:
log.info('Starting Tweepy stream.')
stream.filter(follow=self.follows)
except Exception as e:
log.exception('Recovering from exception : {}'.format(e))
else:
log.info('Exiting sub-process.')
return
class TweepyListener(tweepy.StreamListener):
def __init__(self, mp_queue, api=None):
tweepy.StreamListener.__init__(self, api)
self.mp_queue = mp_queue
def on_data(self, data):
"""Called when raw data is received from connection."""
if data is not None:
# Send the data to the parent process
logging.debug('Received raw data : ' + str(data))
self.mp_queue.put(data)
class TweepyStream(tweepy.StreamListener):
"""Abstraction of the tweepy streaming api."""
def __init__(self, handler, conf, api=None):
if api is None:
api = TweepyAPI(conf)
super().__init__(api)
self.handler = handler
self.conf = conf
self.sub_process = None
self.mp_queue = None
self.daemon = None
@property
def running(self):
"""Returns whether or not a Twitter stream is running."""
return self.sub_process and self.sub_process.is_alive()
async def start(self):
"""Starts the tweepy Stream."""
# Avoid being rate limited by Twitter when restarting the stream with the same follow list.
if self.sub_process and not set(self.sub_process.follows) != set(self.get_follows()):
return
# Kill the current stream before starting a new one
self.stop()
# No need to start a stream if we're not following anyone
if not self.conf.follows:
return
# Create a new multi-processes queue, a new stream object and a new Process
log.info('Creating new sub-process.')
self.mp_queue = multiprocessing.Queue()
self.mp_queue.cancel_join_thread()
self.sub_process = SubProcessStream(self.mp_queue, self.conf.credentials, self.get_follows())
log.info('Created new sub-process.')
# Schedule the polling daemon (it will take care of starting the child process)
self.daemon = asyncio.ensure_future(self._run())
def stop(self):
"""Stops the tweepy Stream."""
if self.running:
log.info('Stopping sub process (pid {}).'.format(self.sub_process.pid))
self.sub_process.terminate()
self.sub_process.join()
log.info('Stopped sub process (pid {}).'.format(self.sub_process.pid))
self.daemon.cancel()
log.info('Cancelled polling daemon for sub process {}.'.format(self.sub_process.pid))
# Cleanup the stream
log.info('Cleaning sub-process (pid {}).'.format(self.sub_process.pid))
self.mp_queue.close()
self.mp_queue = None
self.sub_process = None
self.daemon = None
def quit(self):
"""Prepares for a safe unloading."""
self.stop()
self.handler = None
def get_follows(self):
"""Returns a list containing the Twitter ID of the channels we're following."""
return [c.id for c in self.conf.follows]
async def _run(self):
"""Polling daemon that checks the multi-processes queue for data and dispatches it to `on_data`."""
self.sub_process.start()
log.info('Started sub process (pid {}).'.format(self.sub_process.pid))
# Wait until the process is actually started to not consider it dead when it's not even born yet
while not self.sub_process.is_alive():
try:
# Wtb milliseconds async sleep omg
await asyncio.wait_for(asyncio.sleep(1), 0.1)
except asyncio.TimeoutError:
pass
# ERMAHGERD ! <NAME> !
while True:
try:
data = self.mp_queue.get(False) # Do not block
except QueueEmpty:
if not self.sub_process.is_alive():
log.warning('Sub process (pid {}) appears dead.'.format(self.sub_process.pid))
asyncio.ensure_future(self.stop())
# Arbitrary sleep time after an unsuccessful poll
await asyncio.sleep(4)
except Exception as e:
# Might be triggered when the sub_process is terminated while putting data in the queue
log.error('Queue polling error: ' + str(e))
break
else:
if data is not None:
# Process the data sent by the subprocess
self.on_data(data)
def on_status(self, status):
"""Called when a new status arrives."""
log.debug('Received status: ' + str(status._json))
# Feed the handler with the tweet
asyncio.ensure_future(self.handler.tweepy_on_status(status))
|
StarcoderdataPython
|
1716332
|
#!/usr/bin/env python3
# coding: utf-8
from rdbox.k8s_response_helper import K8sResponseHelper
from rdbox.rdbox_node_formatter import RdboxNodeFormatter
from logging import getLogger
r_logger = getLogger('rdbox_cli')
r_print = getLogger('rdbox_cli').getChild("stdout")
class AnsibleRdboxNodeFormatter(RdboxNodeFormatter):
def output_report(self, rdbox_node_list):
output_str = ""
grouping_dict = rdbox_node_list.group_by("location")
longest_number = rdbox_node_list.get_longest_number_of_char_for(
"hostname")
longest_number_ip = rdbox_node_list.get_longest_number_of_char_for(
"ip")
if K8sResponseHelper.LOCATION_NOT_DEFINE in grouping_dict:
for rdbox_node in grouping_dict.get(K8sResponseHelper.LOCATION_NOT_DEFINE):
space = self._get_prety_string(
rdbox_node.get_hostname(), longest_number)
space_ip = self._get_prety_string(
rdbox_node.get_ip(), longest_number_ip)
output_str += "%s%s ansible_host=%s%s ansible_python_interpreter=/usr/bin/python3\n" % (
rdbox_node.get_hostname(), space, rdbox_node.get_ip(), space_ip)
output_str += "\n"
for key, list_of_group in grouping_dict.items():
if key != K8sResponseHelper.LOCATION_NOT_DEFINE:
output_str += "[%s]\n" % (key)
else:
continue
for rdbox_node in list_of_group:
space = self._get_prety_string(
rdbox_node.get_hostname(), longest_number)
space_ip = self._get_prety_string(
rdbox_node.get_ip(), longest_number_ip)
output_str += "%s%s ansible_host=%s%s ansible_python_interpreter=/usr/bin/python3\n" % (
rdbox_node.get_hostname(), space, rdbox_node.get_ip(), space_ip)
output_str += "\n"
r_print.info(output_str)
return rdbox_node_list, output_str
@classmethod
def _get_prety_string(self, before_string, max_width, prety_str=" "):
return prety_str * (max_width - len(before_string))
|
StarcoderdataPython
|
138841
|
<gh_stars>10-100
from abc import ABC
import warnings
import contextlib
from genie.conf.base.attributes import UnsupportedAttributeWarning,\
AttributesHelper
from genie.conf.base.cli import CliConfigBuilder
from genie.conf.base.config import CliConfig
from genie.libs.conf.interface import BviInterface
from genie.libs.conf.l2vpn.pseudowire import PseudowireNeighbor,\
PseudowireIPv4Neighbor, PseudowireEviNeighbor
class BridgeDomain(ABC):
class DeviceAttributes(ABC):
class InterfaceAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword (config-l2vpn-bg)
# iosxr: l2vpn / bridge group someword (config-l2vpn-bg)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 (config-l2vpn-bg-bd)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / routed interface BVI1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 (config-l2vpn-bg-bd-ac)
with configurations.submode_context(
attributes.format(
'routed interface {interface_name}' if isinstance(self.interface, BviInterface) else 'interface {interface_name}',
force=True),
exit_cmd='' if isinstance(self.interface, BviInterface) else 'exit', # routed interface may not be an actual submode
):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
if isinstance(self.interface, BviInterface):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / routed interface BVI1 / split-horizon group core
v = attributes.value('split_horizon_group_core')
if v is True:
configurations.append_line('split-horizon group core')
if configurations:
# There are configurations... It must be a submode; exit.
configurations.append_line('exit', raw=True)
else:
# There are no configurations... May not be be a submode; Don't exit.
pass
else:
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dhcp ipv4 none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dhcp ipv4 snoop profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection (config-l2vpn-bg-bd-ac-dai)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation (config-l2vpn-bg-bd-ac-dai-av)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / dst-mac
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / dst-mac disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / ipv4
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / ipv4 disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / src-mac
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / src-mac disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / flooding
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / flooding disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / flooding unknown-unicast
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / flooding unknown-unicast disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / igmp snooping profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / ip-source-guard (config-l2vpn-bg-bd-ac-ipsg)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / ip-source-guard / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / ip-source-guard / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / ip-source-guard / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac (config-l2vpn-bg-bd-ac-mac)
sub, attributes2 = attributes.namespace('mac')
if sub is not None:
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac (config-l2vpn-bg-bd-ac-mac)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mld snooping profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / split-horizon group
v = attributes.value('split_horizon_group')
if v is True:
configurations.append_line('split-horizon group')
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / static-mac-address aaaa.bbbb.cccc
configurations.append_line(attributes.format('static-mac-address {static_mac_address}'))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control broadcast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control broadcast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control multicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control multicast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control unknown-unicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control unknown-unicast pps 1
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class MacAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac (config-l2vpn-bg-bd-ac-mac)
with configurations.submode_context('mac', cancel_empty=True):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / aging (config-l2vpn-bg-bd-ac-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / aging / type inactivity
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / learning disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit (config-l2vpn-bg-bd-ac-mac-limit)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / action flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / action no-flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / maximum 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / notification both
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / notification none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / notification syslog
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / notification trap
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / port-down flush
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / port-down flush disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure (config-l2vpn-bg-bd-ac-mac-secure)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / action restrict
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / logging disable
pass
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class NeighborAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
nbr_ctx = None
nbr_is_submode = True
if isinstance(self.neighbor, PseudowireIPv4Neighbor):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 (config-l2vpn-bg-bd-pw)
assert self.ip is not None
assert self.pw_id is not None
nbr_ctx = attributes.format('neighbor {ip} pw-id {pw_id}', force=True)
elif isinstance(self.neighbor, PseudowireEviNeighbor):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor evpn 1 target 1
assert self.evi is not None
assert self.ac_id is not None
nbr_ctx = attributes.format('neighbor evpn {evi.evi_id} target {ac_id}', force=True)
nbr_is_submode = False
else:
raise ValueError(self.neighbor)
if not nbr_is_submode:
configurations.append_line(nbr_ctx)
else:
with configurations.submode_context(nbr_ctx):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / backup neighbor 1.2.3.4 pw-id 1 (config-l2vpn-bg-bd-pw-backup)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / backup neighbor 1.2.3.4 pw-id 1 / pw-class someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / dhcp ipv4 none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / dhcp ipv4 snoop profile someword3
v = attributes.value('dhcp_ipv4_snooping_profile')
if v is not None:
if v is False:
configurations.append_line('dhcp ipv4 none')
else:
configurations.append_line('dhcp ipv4 snoop profile {}'.format(v))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / flooding
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / flooding disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / flooding unknown-unicast
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / flooding unknown-unicast disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / igmp snooping profile someword3
v = attributes.value('igmp_snooping_profile')
if v is not None:
if v is False:
pass
else:
configurations.append_line('igmp snooping profile {}'.format(v))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac (config-l2vpn-bg-bd-pw-mac)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / aging (config-l2vpn-bg-bd-pw-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / aging / type inactivity
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / learning disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit (config-l2vpn-bg-bd-pw-mac-limit)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / action flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / action no-flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / maximum 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / notification both
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / notification none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / notification syslog
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / notification trap
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / port-down flush
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / port-down flush disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure (config-l2vpn-bg-bd-pw-mac-secure)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / action restrict
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mld snooping profile someword3
v = attributes.value('mld_snooping_profile')
if v is not None:
if v is False:
pass
else:
configurations.append_line('mld snooping profile {}'.format(v))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mpls static label local 16 remote 16
remote_label = attributes.value('mpls_static_label')
if remote_label is not None:
local_label = self.parent.neighbor_attr[self.remote_neighbor].mpls_static_label
if local_label is None:
warnings.warn(
'remote neighbor {!r} mpls_static_label missing'.format(self.remote_neighbor),
UnsupportedAttributeWarning)
else:
configurations.append_line('mpls static label local {} remote {}'.\
format(local_label, remote_label))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / pw-class someword3
v = attributes.value('pw_class')
if v is not None:
configurations.append_line('pw-class {}'.\
format(v.device_attr[self.device].name))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / split-horizon group
if attributes.value('split_horizon'):
configurations.append_line('split-horizon group')
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / static-mac-address aaaa.bbbb.cccc
configurations.append_line(attributes.format('static-mac-address {static_mac_address}'))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control broadcast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control broadcast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control multicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control multicast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control unknown-unicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control unknown-unicast pps 1
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class EviAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / evi 1 (config-l2vpn-bg-bd-evi)
with configurations.submode_context(
attributes.format('evi {evi_id}', force=True),
exit_cmd=''): # evi is not a sub-mode in all releases.
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class VniAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword (config-l2vpn-bg)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 (config-l2vpn-bg-bd)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / member vni 1 (config-l2vpn-bg-bd-vni)
with configurations.submode_context(attributes.format('member vni {vni_id}', force=True)):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class MacAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac (config-l2vpn-bg-bd-mac)
with configurations.submode_context('mac', cancel_empty=True):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / aging (config-l2vpn-bg-bd-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / aging / type inactivity
with configurations.submode_context('aging',cancel_empty=True):
configurations.append_line(attributes.format('time {aging_time}'))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / learning disable
v = attributes.value('learning_disable')
if v is True:
configurations.append_line('learning disable')
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit (config-l2vpn-bg-bd-mac-limit)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / action flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / action no-flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / maximum 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / notification both
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / notification none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / notification syslog
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / notification trap
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / port-down flush
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / port-down flush disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure (config-l2vpn-bg-bd-mac-secure)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / action restrict
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / static-address aaaa.bbbb.cccc drop
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw access-pw disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw optimize
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw relay
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw state-down
pass
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
def build_config(self, apply=True, attributes=None, unconfig=False,
contained=False, **kwargs):
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn (config-l2vpn)
submode_stack = contextlib.ExitStack()
if not contained:
submode_stack.enter_context(
configurations.submode_context('l2vpn'))
# iosxr: l2vpn / bridge group someword (config-l2vpn-bg)
with configurations.submode_context(attributes.format('bridge group {group_name}', force=True, cancel_empty=True)):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 (config-l2vpn-bg-bd)
with configurations.submode_context(attributes.format('bridge-domain {name}', force=True)):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / coupled-mode
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dhcp ipv4 snoop profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection (config-l2vpn-bg-bd-dai)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / address-validation (config-l2vpn-bg-bd-dai-av)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / address-validation / dst-mac
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / address-validation / ipv4
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / address-validation / src-mac
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / evi 1 (config-l2vpn-bg-bd-evi)
for sub, attributes2 in attributes.mapping_values('evi_attr', keys=self.evis, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / flooding disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / flooding unknown-unicast disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / igmp snooping disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / igmp snooping profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / routed interface BVI1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 (config-l2vpn-bg-bd-ac)
for sub, attributes2 in attributes.mapping_values('interface_attr', keys=self.interfaces, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / ip-source-guard (config-l2vpn-bg-bd-ipsg)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / ip-source-guard / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac (config-l2vpn-bg-bd-mac)
ns, attributes2 = attributes.namespace('mac')
if ns is not None:
configurations.append_block(
ns.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / member vni 1 (config-l2vpn-bg-bd-vni)
for sub, attributes2 in attributes.mapping_values('vni_attr', keys=self.vnis, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mld snooping profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mtu 100
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 (config-l2vpn-bg-bd-pw)
for sub, attributes2 in attributes.mapping_values('neighbor_attr', keys=self.pseudowire_neighbors, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor evpn evi 1 target 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor evpn evi 1 target 1 source 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / nv satellite (config-l2vpn-bg-bd-nv)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / nv satellite / offload ipv4 multicast enable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core (config-l2vpn-bg-bd-pbb-core)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / evi 1 (config-l2vpn-bg-bd-pbb-core-evi)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac (config-l2vpn-bg-bd-pbb-core-mac)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / aging (config-l2vpn-bg-bd-pbb-core-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / aging / type inactivity
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / learning disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mmrp-flood-optimization
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / rewrite ingress tag push dot1ad 1 symmetric
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 (config-l2vpn-bg-bd-pbb-edge)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / dhcp ipv4 none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / dhcp ipv4 snoop profile someword4
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / igmp snooping profile someword4
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac (config-l2vpn-bg-bd-pbb-edge-mac)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / aging (config-l2vpn-bg-bd-pbb-edge-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / aging / type inactivity
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / learning disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit (config-l2vpn-bg-bd-pbb-edge-mac-limit)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / action flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / action no-flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / maximum 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / notification both
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / notification none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / notification syslog
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / notification trap
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure (config-l2vpn-bg-bd-pbb-edge-mac-sec)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / accept-shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / action restrict
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / split-horizon group vfi disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / static-mac-address aaaa.bbbb.cccc bmac aaaa.bbbb.cccc
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / unknown-unicast-bmac aaaa.bbbb.cccc
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / shutdown
if attributes.value('shutdown'):
configurations.append_line('shutdown')
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control broadcast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control broadcast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control multicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control multicast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control unknown-unicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control unknown-unicast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / transport-mode vlan passthrough
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 (config-l2vpn-bg-bd-vfi)
for vfi, attributes2 in attributes.sequence_values('vfis'):
configurations.append_block(
str(vfi.build_config(apply=False, attributes=attributes2, unconfig=unconfig)))
submode_stack.close()
if apply:
if configurations:
self.device.configure(str(configurations), fail_invalid=True)
else:
return CliConfig(device=self.device, unconfig=unconfig,
cli_config=configurations, fail_invalid=True)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
|
StarcoderdataPython
|
85709
|
<gh_stars>1-10
import time
from threading import Thread
import RPi
from RPi import GPIO
from collections import Iterable
import itertools
HIGH = GPIO.HIGH
LOW = GPIO.LOW
def set_pin_mode(mode):
"""
Set pin numbering mode for all pins.
Args:
mode (str): mode to set, must be 'BOARD' or 'BCM'
"""
GPIO.setmode(getattr(GPIO, mode))
def setup_input_pin(pin, resistor=None):
"""
Setup an input pin.
Setup pin for input and optionally configure a pull up or pull down
resistor. `set_pin_mode` must be called first.
Args:
pin (int): number of pin to setup for input
resistor (str): how to configure the internal resistor for the pin.
Allowable values:
- `pull_up`: enable pull up resistor
- `pull down`: enable pull down resistor
- `float`: do not enable resistor
- `None`: same as `float`
"""
if resistor == 'pull_up':
pull_up_down = GPIO.PUD_UP
elif resistor == 'pull_down':
pull_up_down = GPIO.PUD_DOWN
elif resistor == 'float' or resistor is None:
pull_up_down = GPIO.PUD_OFF
else:
raise ValueError('invalid resistor setting {}'.format(resistor))
GPIO.setup(pin, GPIO.IN, pull_up_down=pull_up_down)
def pin_cleanup():
"""Reset GPIO pin state for all pins"""
GPIO.cleanup()
class Watcher(Thread):
def __init__(self, observer, pin, sleep, debounce_delay=0, daemon=True):
"""
Create daemon thread that reports pin changes to an observer callback.
Args:
observer (object): object to receive notifications. When a pin
change is detected the `observer.update_pin(pin, reading)`
method is called.
pin (int): pin to watch
sleep (float): how long to sleep in seconds in each polling loop.
For testing this can also be an Iterable of floats in which
case the Thread exits when the Iterable is complete.
debounce_delay (float): how long a new pin reading has to hold
steady before it is accepted (and passed to `update_pin()`)
daemon (bool): whether to run as daemon. Mostly for testing.
"""
super(Watcher, self).__init__(name='PinWatcher-{}'.format(pin))
self.daemon = daemon
self.observer = observer
self.pin = pin
if isinstance(sleep, Iterable):
self.sleep_iter = sleep
else:
self.sleep_iter = itertools.repeat(sleep)
self.debounce_delay = debounce_delay
self.reading = None
self.last_reading = None
self.debounce_time = 0
def run(self):
for sleep in self.sleep_iter:
new_reading = GPIO.input(self.pin)
now = time.time()
dt = now - self.debounce_time
if new_reading != self.last_reading:
self.debounce_time = now
if dt >= self.debounce_delay and new_reading != self.reading:
self.observer.update_pin(self.pin, new_reading)
self.reading = new_reading
self.last_reading = new_reading
time.sleep(sleep)
|
StarcoderdataPython
|
4804293
|
import time
from pathlib import Path
import cv2
from ir_tracker.utils import calibration_manager, debug_server, picam_wrapper
def draw_info(image, text):
cv2.putText(image, text, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
(255, 255, 0), 2, cv2.LINE_AA)
CHESSBOARD_HEIGHT = 8
CHESSBOARD_WIDTH = 5
PICTURE_TIME = 3
NUMBER_OF_IMAGES = 10
def main():
debug_image_container = debug_server.create_image_server()
with picam_wrapper.picamera_opencv_video(resolution=(640, 480),
framerate=30) as video_stream:
calibration_images = []
for frame in video_stream:
if len(calibration_images) >= 10:
break
# draw_info(
# frame,
# f"{PICTURE_TIME - time_delta:.1f}s left, {len(calibration_images)}/{NUMBER_OF_IMAGES}"
# )
# detect chessboard
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
found_chessboard, corners = cv2.findChessboardCorners(
gray, (CHESSBOARD_HEIGHT, CHESSBOARD_WIDTH), None)
cv2.drawChessboardCorners(frame,
(CHESSBOARD_HEIGHT, CHESSBOARD_WIDTH),
corners, found_chessboard)
debug_image_container["calib"] = frame
image_directory = Path.home().joinpath("calibration_images")
image_directory.mkdir(parents=True, exist_ok=True)
print(f"Saving images to {image_directory}")
for i, image in enumerate(calibration_images):
cv2.imwrite(f"{str(image_directory)}/image_{i}.png", image)
print("images saved")
print("Calibrating")
calibartion = calibration_manager.calibarate_from_images(
calibration_images, CHESSBOARD_HEIGHT, CHESSBOARD_WIDTH, 500)
calibration_dir = Path.home().joinpath("calibration")
calibration_dir.mkdir(parents=True, exist_ok=True)
calibration_path = calibration_dir.joinpath("picamera_calibration.yml")
print(f"Saving calibration to {calibration_path}")
calibartion.save_yaml(str(calibration_path))
calibartion_read = calibration_manager.ImageCalibration.load_yaml(
calibration_path)
for image in calibration_images:
undisorted = calibartion_read.undistort_image(image, False)
combined = cv2.vconcat((image, undisorted))
debug_image_container["calib"] = combined
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3290754
|
<filename>bin/ivar_variants_to_vcf.py
#!/usr/bin/env python
import os
import sys
import re
import errno
import argparse
import numpy as np
from scipy.stats import fisher_exact
def parse_args(args=None):
Description = "Convert iVar variants TSV file to VCF format."
Epilog = """Example usage: python ivar_variants_to_vcf.py <file_in> <file_out>"""
parser = argparse.ArgumentParser(description=Description, epilog=Epilog)
parser.add_argument("file_in", help="Input iVar TSV file.")
parser.add_argument("file_out", help="Full path to output VCF file.")
parser.add_argument(
"-po",
"--pass_only",
help="Only output variants that PASS filters.",
action="store_true",
)
parser.add_argument(
"-af",
"--allele_freq_threshold",
type=float,
default=0,
help="Only output variants where allele frequency is greater than this number (default: 0).",
)
parser.add_argument(
"-is",
"--ignore_strand_bias",
default=False,
help="Does not take strand bias into account, use this option when not using amplicon sequencing.",
action="store_true"
)
parser.add_argument(
"-ic",
"--ignore_merge_codons",
help="Output variants without taking into account if consecutive positions belong to the same codon.",
action="store_true"
)
return parser.parse_args(args)
def check_consecutive(mylist):
'''
Description:
This function checks if a list of three or two numbers are consecutive and returns how many items are consecutive.
input:
my_list - A list of integers
return:
Number of items consecutive in the list - [False, 1, 2]
'''
my_list = list(map(int, mylist))
## Check if the list contains consecutive numbers
if sorted(my_list) == list(range(min(my_list), max(my_list)+1)):
return len(my_list)
else:
## If not, and the list is > 1, remove the last item and reevaluate.
if len(my_list) > 1:
my_list.pop()
if sorted(my_list) == list(range(min(my_list), max(my_list)+1)):
return len(my_list)
else:
return False
return False
def codon_position(seq1,seq2):
'''
Description:
Function to compare two codon nucleotide sequences (size 3) and retuns the position where it differs.
Input:
seq1 - list size 3 [A,T,C,G]
seq2 - list size 3 [A,T,C,G]
Returns:
Returns position where seq1 != seq2
'''
if seq1 == "NA":
return False
ind_diff = [i for i in range(len(seq1)) if seq1[i] != seq2[i]]
if len(ind_diff) > 1:
print("There has been an issue, more than one difference between the seqs.")
return False
else:
return ind_diff[0]
def rename_vars(dict_lines,num_collapse):
'''
Description:
The function set the vars acordingly to the lines to collapse do to consecutive variants.
Input:
dict_lines - Dict with var lines.
num_collapse - number of lines to collapse [2,3]
Returns::
Vars fixed.
'''
CHROM = dict_lines["CHROM"][0]
POS = dict_lines["POS"][0]
ID = dict_lines["ID"][0]
# If two consecutive collapse 2 lines into one.
if int(num_collapse) == 2:
REF = str(dict_lines["REF"][0]) + str(dict_lines["REF"][1])
ALT = str(dict_lines["ALT"][0]) + str(dict_lines["ALT"][1])
# If three consecutive collapse 3 lines into one.
elif int(num_collapse) == 3:
REF = str(dict_lines["REF"][0]) + str(dict_lines["REF"][1]) + str(dict_lines["REF"][2])
ALT = str(dict_lines["ALT"][0]) + str(dict_lines["ALT"][1]) + str(dict_lines["ALT"][2])
## TODO Check how much differences we found among DPs in the three positions of a codon.
REF_DP = dict_lines["REF_DP"][0]
REF_RV = dict_lines["REF_RV"][0]
ALT_DP = dict_lines["ALT_DP"][0]
ALT_RV = dict_lines["ALT_RV"][0]
QUAL = dict_lines["QUAL"][0]
REF_CODON = REF
ALT_CODON = ALT
FILTER =dict_lines["FILTER"][0]
# INFO DP depends on the decision in the todo above. SB is left with the first one.
INFO = dict_lines["INFO"][0]
FORMAT = dict_lines["FORMAT"][0]
# sample depends on the decision in the todo above.
SAMPLE = dict_lines["SAMPLE"][0]
return CHROM,POS,ID,REF,ALT,QUAL,FILTER,INFO,FORMAT,SAMPLE
def make_dir(path):
'''
Description:
Create directory if it doesn't exist.
Input:
path - path where the directory will be created.
Returns:
None
'''
if not len(path) == 0:
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def ivar_variants_to_vcf(file_in, file_out, pass_only=False, min_allele_frequency=0, ignore_strand_bias=False, ignore_merge_codons=False):
'''
Description:
Main function to convert iVar variants TSV to VCF.
Input:
file_in : iVar variants TSV file
file_out : VCF output file
pass_only : Only keep variants that PASS filter [True, False]
min_allele_freq : Minimum allele frequency to keep a variant [0]
ignore_strand_bias : Do not apply strand-bias filter [True, False]
ignore_merge_codons : Do not take into account consecutive positions belong to the same codon.
Returns:
None
'''
## Create output directory
filename = os.path.splitext(file_in)[0]
out_dir = os.path.dirname(file_out)
make_dir(out_dir)
## Define VCF header
header_source = [
"##fileformat=VCFv4.2",
"##source=iVar"
]
header_info = [
'##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">'
]
header_filter = [
'##FILTER=<ID=PASS,Description="All filters passed">',
'##FILTER=<ID=ft,Description="Fisher\'s exact test of variant frequency compared to mean error rate, p-value > 0.05">'
]
header_format = [
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">',
'##FORMAT=<ID=REF_DP,Number=1,Type=Integer,Description="Depth of reference base">',
'##FORMAT=<ID=REF_RV,Number=1,Type=Integer,Description="Depth of reference base on reverse reads">',
'##FORMAT=<ID=REF_QUAL,Number=1,Type=Integer,Description="Mean quality of reference base">',
'##FORMAT=<ID=ALT_DP,Number=1,Type=Integer,Description="Depth of alternate base">',
'##FORMAT=<ID=ALT_RV,Number=1,Type=Integer,Description="Depth of alternate base on reverse reads">',
'##FORMAT=<ID=ALT_QUAL,Number=1,Type=Integer,Description="Mean quality of alternate base">',
'##FORMAT=<ID=ALT_FREQ,Number=1,Type=Float,Description="Frequency of alternate base">',
]
header_cols = [
f"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t{filename}"
]
if not ignore_strand_bias:
header_info += [
'##INFO=<ID=SB_PV,Number=1,Type=Float,Description="Strand-bias fisher-test p-value">'
]
header_filter += [
'##FILTER=<ID=sb,Description="Strand-bias fisher-test p-value < 0.05">'
]
header = header_source + header_info + header_filter + header_format + header_cols
## Initialise variables
var_list = []
var_count_dict = {"SNP": 0, "INS": 0, "DEL": 0}
dict_lines = {'CHROM':[], 'POS':[], 'ID':[], 'REF':[], 'ALT':[], 'REF_DP':[], 'REF_RV':[], 'ALT_DP':[], 'ALT_RV':[], 'QUAL':[], 'REF_CODON':[], 'ALT_CODON':[], 'FILTER': [], 'INFO':[], 'FORMAT':[], 'SAMPLE':[]}
write_line = False
fout = open(file_out, "w")
fout.write('\n'.join(header) + '\n')
with open(file_in, 'r') as fin:
for line in fin:
if not re.match("REGION", line):
line = re.split("\t", line)
## Assign intial fields to variables
CHROM = line[0]
POS = line[1]
ID = "."
REF = line[2]
ALT = line[3]
## REF/ALF depths
REF_DP = int(line[4])
REF_RV = int(line[5])
REF_FW = REF_DP - REF_RV
ALT_RV = int(line[8])
ALT_DP = int(line[7])
ALT_FW = ALT_DP - ALT_RV
## Perform a fisher_exact test for strand bias detection
table = np.array([[REF_FW, REF_RV], [ALT_FW, ALT_RV]])
oddsr, pvalue = fisher_exact(table, alternative='greater')
## Determine variant type
var_type = "SNP"
if ALT[0] == "+":
ALT = REF + ALT[1:]
var_type = "INS"
elif ALT[0] == "-":
REF += ALT[1:]
ALT = line[2]
var_type = "DEL"
QUAL = "."
## Determine FILTER field
INFO = f"DP={line[11]}"
pass_test = line[13]
if ignore_strand_bias:
if pass_test == "TRUE":
FILTER = "PASS"
else:
FILTER = "ft"
else:
## Add SB in the FILTER field if strand-bias p-value is significant
if pvalue < 0.05 and pass_test == "TRUE":
FILTER = "sb"
elif pvalue > 0.05 and pass_test == "TRUE":
FILTER = "PASS"
elif pvalue <= 0.05 and pass_test == "FALSE":
FILTER = "ft;sb"
else:
FILTER = "ft"
INFO += f":SB_PV={str(round(pvalue, 5))}"
FORMAT = "GT:REF_DP:REF_RV:REF_QUAL:ALT_DP:ALT_RV:ALT_QUAL:ALT_FREQ"
SAMPLE = f'1:{":".join(line[4:11])}'
REF_CODON = line[15]
ALT_CODON = line[17]
param_list = [CHROM, POS, ID, REF, ALT, REF_DP, REF_RV, ALT_DP, ALT_RV, QUAL, REF_CODON, ALT_CODON, FILTER, INFO, FORMAT, SAMPLE]
if ignore_merge_codons or var_type != "SNP":
write_line = True
oline = (CHROM + "\t" + POS + "\t" + ID + "\t" + REF + "\t" + ALT + "\t" + QUAL + "\t" + FILTER + "\t" + INFO + "\t" + FORMAT + "\t" + SAMPLE + "\n")
else:
## dict_lines contains all the informative fields for 3 positions in the vcf.
# dict_lines has a maximum size of three.
## Always fill dict_lines until size 2.
if len(dict_lines["POS"]) == 0 or len(dict_lines["POS"]) == 1:
for i,j in enumerate(dict_lines):
dict_lines.setdefault(j, []).append(param_list[i])
write_line=False
# If queue has size 2, we include the third line
elif len(dict_lines["POS"]) == 2:
for i,j in enumerate(dict_lines):
dict_lines.setdefault(j, []).append(param_list[i])
# Are two positions in the dict consecutive?
if check_consecutive(dict_lines["POS"]) == 2:
## If the first position is not on the third position of the codon they are in the same codon.
if codon_position(dict_lines["REF_CODON"][0],dict_lines["ALT_CODON"][0]) != 2:
write_line = True
num_collapse = "2"
CHROM, POS, ID, REF, ALT, QUAL, FILTER, INFO, FORMAT, SAMPLE = rename_vars(dict_lines, num_collapse)
oline = (CHROM + "\t" + POS + "\t" + ID + "\t" + REF + "\t" + ALT + "\t" + QUAL + "\t" + FILTER + "\t" + INFO + "\t" + FORMAT + "\t" + SAMPLE + "\n")
## We removed the first two items in dict_lines with have been just processed.
for i,j in enumerate(dict_lines):
dict_lines[list(dict_lines.keys())[i]].pop(0)
dict_lines[list(dict_lines.keys())[i]].pop(0)
else:
write_line = True
oline =(dict_lines["CHROM"][0] + "\t" + dict_lines["POS"][0] + "\t" + dict_lines["ID"][0] + "\t" + dict_lines["REF"][0] + "\t" + dict_lines["ALT"][0] + "\t" + dict_lines["QUAL"][0] + "\t" + dict_lines["FILTER"][0] + "\t" + dict_lines["INFO"][0] + "\t" + dict_lines["FORMAT"][0] + "\t" + dict_lines["SAMPLE"][0] + "\n")
for i,j in enumerate(dict_lines):
dict_lines[list(dict_lines.keys())[i]].pop(0)
# Are the three positions in the dict consecutive?
elif check_consecutive(dict_lines["POS"]) == 3:
## we check the first position in which codon position is to process it acordingly.
# If first position is in the first codon position all three positions belong to the same codon.
if codon_position(dict_lines["REF_CODON"][0], dict_lines["ALT_CODON"][0]) == 0:
write_line = True
num_collapse = 3
CHROM, POS, ID, REF, ALT, QUAL, FILTER, INFO, FORMAT, SAMPLE = rename_vars(dict_lines, num_collapse)
oline = (CHROM + "\t" + POS + "\t" + ID + "\t" + REF + "\t" + ALT + "\t" + QUAL + "\t" + FILTER + "\t" + INFO + "\t" + FORMAT + "\t" + SAMPLE + "\n")
for i,j in enumerate(dict_lines):
dict_lines[list(dict_lines.keys())[i]].pop(0)
dict_lines[list(dict_lines.keys())[i]].pop(0)
# we empty the dict_lines
dict_lines = {'CHROM':[], 'POS':[], 'ID':[], 'REF':[], 'ALT':[], 'REF_DP':[], 'REF_RV':[], 'ALT_DP':[], 'ALT_RV':[], 'QUAL':[], 'REF_CODON':[], 'ALT_CODON':[], 'FILTER':[], 'INFO':[], 'FORMAT':[], 'SAMPLE':[]}
# If first position is in the second codon position, we have the two first positions belonging to the same codon and the last one independent.
elif codon_position(dict_lines["REF_CODON"][0], dict_lines["ALT_CODON"][0]) == 1:
write_line = True
num_collapse = 2
CHROM, POS, ID, REF, ALT, QUAL, FILTER, INFO, FORMAT, SAMPLE = rename_vars(dict_lines, num_collapse)
oline = (CHROM + "\t" + POS + "\t" + ID + "\t" + REF + "\t" + ALT + "\t" + QUAL + "\t" + FILTER + "\t" + INFO + "\t" + FORMAT + "\t" + SAMPLE + "\n")
for i,j in enumerate(dict_lines):
dict_lines[list(dict_lines.keys())[i]].pop(0)
dict_lines[list(dict_lines.keys())[i]].pop(0)
## Finally if we have the first position in the last codon position, we write first position and left the remaining two to be evaluated in the next iteration.
elif codon_position(dict_lines["REF_CODON"][0], dict_lines["ALT_CODON"][0]) == 2:
write_line = True
oline =(dict_lines["CHROM"][0] + "\t" + dict_lines["POS"][0] + "\t" + dict_lines["ID"][0] + "\t" + dict_lines["REF"][0] + "\t" + dict_lines["ALT"][0] + "\t" + dict_lines["QUAL"][0] + "\t" + dict_lines["FILTER"][0] + "\t" + dict_lines["INFO"][0] + "\t" + dict_lines["FORMAT"][0] + "\t" + dict_lines["SAMPLE"][0] + "\n")
for i,j in enumerate(dict_lines):
dict_lines[list(dict_lines.keys())[i]].pop(0)
elif check_consecutive(dict_lines["POS"]) == False:
write_line = True
oline =(dict_lines["CHROM"][0] + "\t" + dict_lines["POS"][0] + "\t" + dict_lines["ID"][0] + "\t" + dict_lines["REF"][0] + "\t" + dict_lines["ALT"][0] + "\t" + dict_lines["QUAL"][0] + "\t" + dict_lines["FILTER"][0] + "\t" + dict_lines["INFO"][0] + "\t" + dict_lines["FORMAT"][0] + "\t" + dict_lines["SAMPLE"][0] + "\n")
for i,j in enumerate(dict_lines):
dict_lines[list(dict_lines.keys())[i]].pop(0)
else:
print("Something went terribly wrong!!" + str(len(dict_lines["POS"])))
## Determine whether to output variant
if pass_only and FILTER != "PASS":
write_line = False
if float(line[10]) < min_allele_frequency:
write_line = False
if (CHROM, POS, REF, ALT) in var_list:
write_line = False
else:
var_list.append((CHROM, POS, REF, ALT))
## Write to file
if write_line:
var_count_dict[var_type] += 1
fout.write(oline)
## Print variant counts to pass to MultiQC
var_count_list = [(k, str(v)) for k, v in sorted(var_count_dict.items())]
print("\t".join(["sample"] + [x[0] for x in var_count_list]))
print("\t".join([filename] + [x[1] for x in var_count_list]))
## Handle last 3 lines.
if len(dict_lines["POS"]) == 2:
if check_consecutive(dict_lines["POS"]) == 2:
if codon_position(dict_lines["REF_CODON"][0],dict_lines["ALT_CODON"][0]) != 2:
write_line = True
num_collapse = 2
CHROM, POS, ID, REF, ALT, QUAL, FILTER, INFO, FORMAT, SAMPLE = rename_vars(dict_lines, num_collapse)
oline = (CHROM + "\t" + POS + "\t" + ID + "\t" + REF + "\t" + ALT + "\t" + QUAL + "\t" + FILTER + "\t" + INFO + "\t" + FORMAT + "\t" + SAMPLE + "\n")
fout.write(oline)
else:
oline = (dict_lines["CHROM"][0] + "\t" + dict_lines["POS"][0] + "\t" + dict_lines["ID"][0] + "\t" + dict_lines["REF"][0] + "\t" + dict_lines["ALT"][0] + "\t" + dict_lines["QUAL"][0] + "\t" + dict_lines["FILTER"][0] + "\t" + dict_lines["INFO"][0] + "\t" + dict_lines["FORMAT"][0] + "\t" + dict_lines["SAMPLE"][0] + "\n")
oline1 = (dict_lines["CHROM"][1] + "\t" + dict_lines["POS"][1] + "\t" + dict_lines["ID"][1] + "\t" + dict_lines["REF"][1] + "\t" + dict_lines["ALT"][1] + "\t" + dict_lines["QUAL"][1] + "\t" + dict_lines["FILTER"][1] + "\t" + dict_lines["INFO"][1] + "\t" + dict_lines["FORMAT"][1] + "\t" + dict_lines["SAMPLE"][1] + "\n")
fout.write(oline)
fout.write(oline1)
elif len(dict_lines["POS"]) == 1:
oline =(dict_lines["CHROM"][0] + "\t" + dict_lines["POS"][0] + "\t" + dict_lines["ID"][0] + "\t" + dict_lines["REF"][0] + "\t" + dict_lines["ALT"][0] + "\t" + dict_lines["QUAL"][0] + "\t" + dict_lines["FILTER"][0] + "\t" + dict_lines["INFO"][0] + "\t" + dict_lines["FORMAT"][0] + "\t" + dict_lines["SAMPLE"][0] + "\n")
fout.write(oline)
fout.close()
def main(args=None):
args = parse_args(args)
ivar_variants_to_vcf(
args.file_in,
args.file_out,
args.pass_only,
args.allele_freq_threshold,
args.ignore_strand_bias,
args.ignore_merge_codons,
)
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
3281312
|
<reponame>hatchetjackk/artemis
import random
import re
import time
from collections import OrderedDict
from discord.ext import commands
import cogs.utilities as utilities
class Karma(commands.Cog):
def __init__(self, client):
self.client = client
self.karma_blacklist = ['Knights of Karma']
@commands.group()
async def karma(self, ctx):
if ctx.guild.name in self.karma_blacklist:
return
if ctx.invoked_subcommand is None:
await utilities.single_embed(
color=utilities.color_alert,
title='Try `karma help` for more options.',
channel=ctx
)
@karma.group()
async def help(self, ctx):
await utilities.single_embed(
color=utilities.color_help,
name='Karma Help',
value='`karma help` This menu!\n'
'`karma check` Check your own Karma\n'
'`karma check [user]` Check a member\'s Karma\n'
'`karma board` Check top 10 Karma leaders\n'
'`thanks [@user]` Give a member Karma\n',
channel=ctx
)
@karma.group()
async def check(self, ctx, *, member_check=None):
conn, c = await utilities.load_db()
if member_check is None:
c.execute("SELECT uid, karma FROM members WHERE uid = (:uid)", {'uid': ctx.author.id})
member_id, karma = c.fetchone()
name = ctx.author.nick
if name is None:
name = ctx.author.name
await utilities.single_embed(
title=f'You have {karma} karma, {name}!',
channel=ctx,
)
if len(member_check) < 3:
await utilities.single_embed(
color=utilities.color_alert,
title=':heart: Karma Error',
name='An error occurred when checking karma!',
value='Please search using 3 or more characters.',
channel=ctx
)
return
target_member = ''
for member_object in ctx.guild.members:
member_name = member_object.name.lower()
if member_object.nick is not None:
member_name = member_object.nick.lower()
if member_object.mention in member_check:
target_member = member_object
else:
pattern = re.compile(r'' + re.escape(member_check))
matches = pattern.findall(member_name)
for _ in matches:
target_member = member_object
c.execute("SELECT uid, karma FROM members WHERE uid = (:uid)", {'uid': target_member.id})
member_id, karma = c.fetchone()
name = target_member.name
if target_member.nick is not None:
name = target_member.nick
await utilities.single_embed(
title=f'{name} has {karma} karma!',
channel=ctx
)
@karma.group(aliases=['leaderboards', 'karmaboard', 'board'])
async def leaderboard(self, ctx):
conn, c = await utilities.load_db()
leaderboard = {}
c.execute("SELECT * FROM guild_members WHERE gid = (:gid)", {'gid': ctx.guild.id})
guild_members = c.fetchall()
for member in guild_members:
guild_id, guild, member_id, member_name, member_nick = member
c.execute("SELECT name, karma FROM members WHERE uid = (:uid)", {'uid': member_id})
member_name, karma = c.fetchone()
member_identity = member_nick
if member_identity is None:
member_identity = member_name
if member_identity != 'Artemis':
leaderboard[member_identity] = karma
sorted_karma = OrderedDict(reversed(sorted(leaderboard.items(), key=lambda x: x[1])))
counter = 1
karma_leaderboard = []
for key, value in sorted_karma.items():
karma_leaderboard.append(f'{counter}: {key} - **{value}** karma')
counter += 1
await utilities.single_embed(
title='Karma Leaderboard Top 10',
description='\n'.join(karma_leaderboard[:10]),
channel=ctx
)
@karma.group()
@commands.is_owner()
async def add(self, ctx, points: int, member_check):
conn, c = await utilities.load_db()
target_member = None
for guild_member in ctx.guild.members:
member_name = guild_member.name.lower()
if guild_member.nick is not None:
member_name = guild_member.nick.lower()
if guild_member.mention in member_check:
target_member = guild_member
else:
pattern = re.compile(r'' + re.escape(member_check))
matches = pattern.findall(member_name)
for _ in matches:
target_member = guild_member
c.execute("SELECT uid, karma FROM members WHERE uid = (:uid)", {'uid': target_member.id})
member_id, karma = c.fetchone()
karma = karma + points
with conn:
c.execute("UPDATE members SET karma = (:karma) WHERE uid = (:uid)",
{'karma': karma, 'uid': target_member.id})
await utilities.single_embed(
title=f'{target_member.name} has gained {points} karma!',
channel=ctx
)
@karma.group(aliases=['sub'])
@commands.is_owner()
async def subtract(self, ctx, points: int, member_check):
conn, c = await utilities.load_db()
target_member = None
for guild_member in ctx.guild.members:
member_name = guild_member.name.lower()
if guild_member.nick is not None:
member_name = guild_member.nick.lower()
if guild_member.mention in member_check:
target_member = guild_member
else:
pattern = re.compile(r'' + re.escape(member_check))
matches = pattern.findall(member_name)
for _ in matches:
target_member = guild_member
c.execute("SELECT uid, karma FROM members WHERE uid = (:uid)", {'uid': target_member.id})
member_id, karma = c.fetchone()
karma = karma - points
if karma < 0:
karma = 0
with conn:
c.execute("UPDATE members SET karma = (:karma) WHERE uid = (:uid)",
{'karma': karma, 'uid': target_member.id})
await utilities.single_embed(
title=f'{target_member.name} has lost {points} karma!',
channel=ctx
)
@commands.Cog.listener()
async def on_message(self, message):
try:
if (message.author.id == self.client.user.id
or message.author.name == 'Dyno'
or message.content.startswith('!')
or message.guild.name in self.karma_blacklist):
return
karma_keywords = ['thanks', 'thank', 'gracias', 'kudos', 'thx', 'appreciate it', 'cheers']
msg = [word.lower().replace('.', '') for word in message.content.split()]
karma_key = [word for word in karma_keywords if word in msg]
if len(karma_key) > 0:
thanked_members = [member for member in message.guild.members if member.mention in msg]
if len(thanked_members) > 0:
conn, c = await utilities.load_db()
c.execute("SELECT * FROM members WHERE uid = (:uid)", {'uid': message.author.id})
member_id, membername, points, last_karma_given = c.fetchone()
if last_karma_given is not None:
remaining_time = int(time.time() - last_karma_given)
time_limit = 60 * 3
if remaining_time < time_limit:
msg = f'You must wait {time_limit - remaining_time} seconds to give karma again.'
await message.channel.send(msg)
return
for member in thanked_members:
member_name = member.name
if member.nick is not None:
member_name = member.nick
# catch artemis karma
if member.id == self.client.user.id:
c.execute("SELECT response FROM bot_responses WHERE message_type = 'client_karma'")
client_karma = c.fetchall()
msg = random.choice([response[0] for response in client_karma])
await utilities.single_embed(title=msg, channel=message.channel)
# catch self karma
elif member.id == message.author.id:
c.execute("SELECT response FROM bot_responses WHERE message_type = 'bad_karma'")
bad_karma = c.fetchall()
msg = random.choice([response[0] for response in bad_karma]).format(message.author.id)
await utilities.single_embed(
title=msg,
color=utilities.color_alert,
channel=message.channel
)
else:
c.execute("SELECT * FROM members WHERE uid = (:uid)", {'uid': member.id})
member_id, membername, points, last_karma_given = c.fetchone()
last_karma = int(time.time())
points += 1
with conn:
c.execute("UPDATE members SET karma = (:karma) WHERE uid = (:uid)",
{'karma': points, 'uid': member.id})
c.execute("UPDATE members SET last_karma = (:last_karma) WHERE uid = (:uid)",
{'last_karma': last_karma, 'uid': message.author.id})
c.execute("SELECT response FROM bot_responses WHERE message_type = 'good_karma'")
good_responses = c.fetchall()
msg = random.choice([response[0] for response in good_responses]).format(member_name)
await utilities.single_embed(title=msg, channel=message.channel)
await self.client.process_commands(message)
except Exception as e:
print(f'An unexpected error occurred when giving karma: {e}')
raise
def setup(client):
client.add_cog(Karma(client))
|
StarcoderdataPython
|
1642543
|
# Generated by Django 2.0 on 2017-12-08 17:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feedback', '0002_auto_20160104_1521'),
]
operations = [
migrations.AlterModelOptions(
name='feedback',
options={'ordering': ('-created_at',), 'verbose_name': 'Отзыв', 'verbose_name_plural': 'Отзывы'},
),
migrations.AlterField(
model_name='feedback',
name='status',
field=models.CharField(choices=[('DF', 'Не требует решения'), ('PR', 'В процессе'), ('NW', 'Новое'), ('SL', 'Решено'), ('RJ', 'Не решено')], default='NW', max_length=2, verbose_name='Статус'),
),
]
|
StarcoderdataPython
|
1638657
|
<filename>devops/cluedo/create_object_relations.py
import csv
import os
import sys
import Queue
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
from cswaExtras import *
# from loadCSpace import *
username = os.environ['LOGIN']
password = os.environ['PASSWORD']
server = os.environ['CSPACEURL']
realm = 'org.collectionspace.services'
# only use collectionobjects and storage locations, then link those using a group thing
# read in entities.csv
entities_file = "entities.csv"
locations_file = "locationauthorities.created.csv"
objects_file = "collectionobjects.created.csv"
request_csids = False
if len(sys.argv) > 1:
entities_file = sys.argv[1]
if len(sys.argv) == 2:
request_csids = True #this means only entities were provided, and fetching will be needed
else:
locations_file = sys.argv[2]
objects_file = sys.argv[3]
# Step 1: Read in the file that we will use to pair things!
type_to_name_map = {}
with open(entities_file, "rb") as csvfile:
reader = csv.reader(csvfile, delimiter="\t")
for row in reader:
record_type = row[0]
obj_type = row[1]
obj_name = row[2]
if record_type in type_to_name_map:
termslist = type_to_name_map[record_type]
termslist.append(obj_name)
type_to_name_map[record_type] = termslist
else:
type_to_name_map[record_type] = [obj_name]
def pair_ids_from_csv():
"""
Pairs Locations and Objects, getting their csids from a CSV file rather than through
HTTP requests.
"""
location_ids = {}
location_refnames = {}
with open(locations_file, "rb") as loccsv:
reader = csv.reader(loccsv, delimiter="\t")
for row in reader:
location_ids[row[2]] = row[3]
location_refnames[row[2]] = row[5]
object_ids = {}
object_refnames = {}
with open(objects_file, "rb") as objcsv:
reader = csv.reader(objcsv, delimiter="\t")
for row in reader:
object_ids[row[2]] = row[3]
object_refnames[row[2]] = row[5]
pairedCSV = csv.writer(open("paired_entities.csv", "wb"), delimiter="\t")
# Queue the rooms in order to put elements inside of them
locations_queue = Queue.Queue()
for location in type_to_name_map["storagelocation"]:
locations_queue.put(location)
for obj in type_to_name_map["collectionobject"]:
location = locations_queue.get()
object_id = object_ids[obj]
loc_id = location_ids[location]
pairedCSV.writerow([obj, object_id, location, loc_id])
# pairedCSV.writerow([location, loc_id, obj, object_id])
locations_queue.put(location)
return (location_refnames, object_refnames)
def pair_ids_from_request():
return {}, {}
def substitute(mh,payload):
for m in mh.keys():
payload = payload.replace('{%s}' % m, escape(mh[m]))
# get rid of any unsubstituted items in the template
payload = re.sub(r'\{.*?\}', '', payload)
return payload
if request_csids:
location_refnames, object_refnames = pair_ids_from_request()
else:
location_refnames, object_refnames = pair_ids_from_csv()
xmlfile = 'xml/%s.xml' % "movements"
template = open(xmlfile).read()
username = os.environ['LOGIN']
password = os.environ['PASSWORD']
server = os.environ['CSPACEURL']
realm = 'org.collectionspace.services'
uri = 'movements'
relations_uri = 'relations'
movementscreated = csv.writer(open("movements.created.csv", "wb"), delimiter="\t")
# load file from paired_entities.csv [obj, obj_id, loc, loc_id]
sequence_number = 0
# with open(entities_file, "rb") as csvfile:
# reader = csv.reader(csvfile, delimiter="\t")
mov2obj_template = open("xml/mov2obj.xml").read()
obj2mov_template = open("xml/obj2mov.xml").read()
with open("paired_entities.csv", "rb") as entity_pairs:
reader = csv.reader(entity_pairs, delimiter="\t")
for row in reader:
sequence_number += 1
obj = row[0]
obj_id = row[1]
loc = row[2]
loc_id = row[3]
loc_refname = location_refnames[loc]
# 1. Create a new movement record, link the movement location
payload = substitute({"authority": "", "sequencenumber":"%03d" % sequence_number,"currentLocation":loc_refname}, template)
(url, data, movement_id) = make_request("POST", uri, realm, server, username, password, payload)
# 2. Link the movement record and the object record
payload = substitute({"objectCsid": obj_id, "movementCsid":movement_id}, mov2obj_template)
(url, data, relation_id1) = make_request("POST", relations_uri, realm, server, username, password, payload)
# 3. Link object and movement
payload = substitute({"objectCsid": obj_id, "movementCsid": movement_id}, obj2mov_template)
(url, data, relation_id2) = make_request("POST", relations_uri, realm, server, username, password, payload)
x = ("curl -S --stderr - -X DELETE https://nightly.collectionspace.org/cspace-services/movements/%s --basic -u \"<EMAIL>:Administrator\" -H \"Content-Type: application/xml\"" % movement_id)
y = ("curl -S --stderr - -X DELETE https://nightly.collectionspace.org/cspace-services/relations/%s --basic -u \"<EMAIL>:Administrator\" -H \"Content-Type: application/xml\"" % relation_id1)
z = ("curl -S --stderr - -X DELETE https://nightly.collectionspace.org/cspace-services/relations/%s --basic -u \"<EMAIL>:Administrator\" -H \"Content-Type: application/xml\"" % relation_id2)
movementscreated.writerow([x])
movementscreated.writerow([y])
movementscreated.writerow([z])
|
StarcoderdataPython
|
1716060
|
<gh_stars>1-10
from django.views.generic import View
from django.shortcuts import render, redirect
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.urlresolvers import reverse
from django.contrib import messages
from items.models import ItemPost, BookList
class PostDelete(LoginRequiredMixin, View):
login_url = '/login/'
def get(self, request, pk, *args, **kwargs):
itempost = ItemPost.objects.get(pk=pk)
# 현재 유저가 삭제하려는 포스트의 업로더와
# 같은 유저인 것을 확인 한 후 삭제
if request.user.username == itempost.user.username:
itempost.is_deleted = True
itempost.save()
messages.add_message(
request,
messages.WARNING,
"정상적으로 삭제되었습니다.",
)
return redirect(reverse("postlist"))
# 같은 유저가 아니면 삭제하지 않고 리다이렉트
else:
messages.add_message(
request,
messages.WARNING,
"이런 장난은 하지 마십시오",
)
return redirect(
reverse(
"postdetail",
kwargs={
'pk': itempost.id,
},
)
)
|
StarcoderdataPython
|
5062
|
<gh_stars>1-10
from django.db import models
class Idea(models.Model):
title = models.CharField(max_length=255, unique=True)
description = models.TextField()
author = models.OneToOneField('events.Registrant',
related_name='author_idea',
on_delete=models.CASCADE,
blank=True,
null=True)
written_by = models.ForeignKey('users.User',
related_name='written_idea',
on_delete=models.CASCADE,
blank=True,
null=True)
event = models.ForeignKey('events.Event',
related_name='event_idea',
on_delete=models.CASCADE,
blank=True,
null=True)
is_valid = models.BooleanField(default=False)
max_number_of_participants = models.PositiveIntegerField(default=7)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(default=True)
class Meta():
ordering = ['-created_at', '-id']
def __str__(self):
return self.title
class IdeaTeamMember(models.Model):
idea = models.ForeignKey(Idea, related_name='idea_team_member', on_delete=models.CASCADE)
member = models.OneToOneField('events.Registrant', related_name='member_idea', on_delete=models.CASCADE)
class Meta():
ordering = ['idea']
unique_together = ('idea', 'member')
verbose_name = 'Team Member'
verbose_name_plural = 'Groups'
|
StarcoderdataPython
|
3357245
|
from django.template.response import TemplateResponse
from oscar.apps.checkout.views import (PaymentMethodView as CorePaymentMethodView,
PaymentDetailsView as CorePaymentDetailsView,
OrderPreviewView as CoreOrderPreviewView)
from oscar.apps.payment.forms import BankcardForm, BillingAddressForm
from oscar.apps.shipping.methods import ShippingMethod
from oscar.core.loading import import_module
import_module('payment.models', ['Source', 'SourceType'], locals())
import_module('payment.exceptions', ['TransactionDeclinedException'], locals())
import_module('payment.utils', ['Bankcard'], locals())
import_module('payment.datacash.utils', ['Gateway', 'Facade'], locals())
import_module('order.models', ['PaymentEvent', 'PaymentEventType', 'PaymentEventQuantity'], locals())
class PaymentMethodView(CorePaymentMethodView):
template_file = 'checkout/payment_method.html'
def handle_GET(self):
return TemplateResponse(self.request, self.template_file, self.context)
def handle_POST(self):
method = self.request.POST['method_code']
self.co_data.pay_by(method)
return self.get_success_response()
class OrderPreviewView(CoreOrderPreviewView):
u"""View a preview of the order before submitting."""
def handle_GET(self):
# Forward straight onto the payment details - no need for preview
return self.get_success_response()
class PaymentDetailsView(CorePaymentDetailsView):
template_file = 'checkout/payment_details.html'
def handle_GET(self):
if self.is_cheque_payment():
self.template_file = 'checkout/payment_details_cheque.html'
else:
shipping_addr = self.get_shipping_address()
card_values = {'name': shipping_addr.name()}
self.context['bankcard_form'] = BankcardForm(initial=card_values)
addr_values = {'first_name': shipping_addr.first_name,
'last_name': shipping_addr.last_name,}
self.context['billing_address_form'] = BillingAddressForm(initial=addr_values)
return TemplateResponse(self.request, self.template_file, self.context)
def handle_POST(self):
if self.is_cheque_payment():
return self.submit()
try:
self.bankcard_form = BankcardForm(self.request.POST)
self.billing_addr_form = BillingAddressForm(self.request.POST)
if self.bankcard_form.is_valid() and self.billing_addr_form.is_valid():
return self.submit()
except TransactionDeclinedException, e:
self.context['payment_error'] = str(e)
self.context['bankcard_form'] = self.bankcard_form
self.context['billing_address_form'] = self.billing_addr_form
return TemplateResponse(self.request, self.template_file, self.context)
def handle_payment(self, order_number, total):
if self.is_cheque_payment():
self.handle_cheque_payment(total)
else:
self.handle_bankcard_payment(order_number, total)
def is_cheque_payment(self):
payment_method = self.co_data.payment_method()
return payment_method == 'cheque'
def handle_cheque_payment(self, total):
# Nothing to do except create a payment source
type,_ = SourceType.objects.get_or_create(name="Cheque")
source = Source(type=type, allocation=total)
self.payment_sources.append(source)
def handle_bankcard_payment(self, order_number, total):
# Handle payment problems with an exception
# Make payment submission - handle response from DC
# - could be an iframe open
# - could be failure
# - could be redirect
# Create bankcard object
bankcard = self.bankcard_form.get_bankcard_obj()
# Handle new card submission
dc_facade = Facade()
reference = dc_facade.debit(order_number, total, bankcard, self.basket)
# Create payment source (but don't save just yet)
type,_ = SourceType.objects.get_or_create(name='DataCash', code='datacash')
source = Source(type=type,
allocation=total,
amount_debited=total,
reference=reference)
self.payment_sources.append(source)
def place_order(self, basket, order_number, total_incl_tax, total_excl_tax):
order = super(PaymentDetailsView, self).place_order(basket, order_number, total_incl_tax, total_excl_tax)
if self.is_cheque_payment():
order.status = "Awaiting cheque"
order.save()
return order
def create_billing_address(self):
if not hasattr(self, 'billing_addr_form'):
return None
return self.billing_addr_form.save()
def save_payment_events(self, order):
event_type,_ = PaymentEventType.objects.get_or_create(code="paid-for")
event = PaymentEvent.objects.create(order=order, event_type=event_type)
for line in order.lines.all():
line_qty = PaymentEventQuantity.objects.create(event=event,
line=line,
quantity=line.quantity)
|
StarcoderdataPython
|
1789758
|
import os
import unittest
import json
import trebek
import entities
import fakeredis
import time
import datetime
# Reference this SO post on getting distances between strings:
# http://stackoverflow.com/a/1471603/98562
def get_clue_json():
with open('test-json-output.json') as json_data:
clue = json.load(json_data)
return clue
def fake_fetch_random_clue():
return entities.Question(**get_clue_json())
def fake_get_year_month():
now = datetime.datetime.now()
year, month = divmod(now.month + 1, 12)
if month == 0:
month = 12
year = year -1
next_month = datetime.datetime(now.year + year, month, 1)
return "{0}-{1}".format(next_month.year, str(next_month.month).zfill(2))
_fetch_count = 0
_invalid_clue = None
def fetch_invalid_clue():
global _fetch_count, _invalid_clue
clue = get_clue_json()
if _fetch_count == 0:
clue = _invalid_clue
_fetch_count += 1
return entities.Question(**clue)
class TestTrebek(unittest.TestCase):
def setUp(self):
d = self.get_setup_json()
self.room_message = entities.HipChatRoomMessage(**d)
self.trebek_bot = self.create_bot_with_dictionary(d)
def tearDown(self):
self.trebek_bot.redis.flushall()
def get_setup_json(self):
with open('test-room-message.json') as data:
d = json.load(data)
return d
def create_bot_with_dictionary(self, room_dictionary):
bot = trebek.Trebek(entities.HipChatRoomMessage(**room_dictionary))
bot.redis = fakeredis.FakeStrictRedis()
bot.fetch_random_clue = fake_fetch_random_clue
return bot
def create_user_scores(self, bot = None):
if bot != None:
r = bot.redis
else:
r = self.trebek_bot.redis
bot = self.trebek_bot
hipchat = trebek.Trebek.hipchat_user_key
r.set(hipchat.format(1), 'Aaron')
r.set(hipchat.format(2), 'Allen')
r.set(hipchat.format(3), 'Cordarrell')
r.set(hipchat.format(4), 'Melvin')
r.set(hipchat.format(5), 'Mark')
r.set(hipchat.format(6), 'Richard')
r.set(hipchat.format(7), '<NAME>')
r.set(hipchat.format(8), 'Arian')
r.set(hipchat.format(9), 'Zach')
r.set(hipchat.format(10), '<NAME>')
r.set(hipchat.format(11), 'Alex')
r.set(hipchat.format(12), 'Michael')
r.set(hipchat.format(13), 'Reggie')
r.set(hipchat.format(14), 'Legacy Score')
user = bot.user_score_prefix + ":{0}"
r.set(user.format(1), 100)
r.set(user.format(2), 20)
r.set(user.format(3), 70)
r.set(user.format(4), 50)
r.set(user.format(5), 30)
r.set(user.format(6), 200)
r.set(user.format(7), 500)
r.set(user.format(8), 5430)
r.set(user.format(9), 412)
r.set(user.format(10), 123)
r.set(user.format(11), 225)
r.set(user.format(12), 94)
r.set(user.format(13), 87)
# Regression test old score keys will still appear in lifetime loserboard
r.set("user_score:{0}".format(14), 5)
bot.get_year_month = fake_get_year_month
user = bot.user_score_prefix + ":{0}"
r.set(user.format(1), 100)
r.set(user.format(2), 20)
r.set(user.format(3), 70)
r.set(user.format(4), 50)
r.set(user.format(5), 30)
r.set(user.format(6), 200)
r.set(user.format(7), 500)
r.set(user.format(8), 5430)
r.set(user.format(9), 412)
r.set(user.format(10), 123)
r.set(user.format(11), 225)
r.set(user.format(12), 94)
r.set(user.format(13), 87)
def test_when_value_not_included_default_to_200(self):
test_clue = self.trebek_bot.fetch_random_clue()
self.assertEqual(test_clue.value, 200)
def test_when_answer_includes_html_answer_is_sanitized(self):
# example answer: <i>Let\\'s Make a Deal</i>
self.trebek_bot.fetch_random_clue = fake_fetch_random_clue
test_clue = self.trebek_bot.fetch_random_clue()
self.assertEqual(test_clue.answer, "Let's Make a Deal")
def test_when_response_doesNot_begin_with_question_return_none(self):
response = "some test response"
assert self.trebek_bot.response_is_a_question(response) == None
def test_when_response_is_question_return_true(self):
response = "what is some test response"
assert self.trebek_bot.response_is_a_question(response)
def test_fuzzy_matching_of_answer(self):
test_clue = fake_fetch_random_clue()
self.assertFalse(self.trebek_bot.is_correct_answer("polygamist", "polyamourus"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is let's make a deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Lets Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Dela"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Mae a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is elt's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer("a ukulele", "a ukelele"))
self.assertTrue(self.trebek_bot.is_correct_answer("Scrabble", "Scrablle"))
self.assertTrue(self.trebek_bot.is_correct_answer("(Aristotle) Onassis", "Onassis"))
self.assertTrue(self.trebek_bot.is_correct_answer("(William) Blake", "blake"))
self.assertTrue(self.trebek_bot.is_correct_answer("wings (or feathers)", "feathers"))
self.assertTrue(self.trebek_bot.is_correct_answer("A.D. (Anno Domini)", "AD"))
self.assertTrue(self.trebek_bot.is_correct_answer("(Little Orphan) Annie", "annie"))
self.assertTrue(self.trebek_bot.is_correct_answer("a turtle (or a tortoise)", "turtle"))
self.assertTrue(self.trebek_bot.is_correct_answer("a turtle (or a tortoise)", "tortoise"))
# self.assertTrue(self.trebek_bot.is_correct_answer("ben affleck and matt damon", "<NAME> & <NAME>"))
def test_given_json_dictionary_hipchat_object_is_parsed(self):
with open ('test-room-message.json') as data:
d = json.load(data)
t = entities.HipChatRoomMessage(**d)
self.assertEqual(t.item.message.message, "jeopardy")
self.assertEqual(t.item.message.user_from.name, "<NAME>")
def test_message_object_trims_leading_slash_command(self):
p = {}
p['from'] = { 'id':None, 'links': None, 'mention_name':None, 'name': None, 'version': None}
p['message'] = '/trebek jeopardy me'
msg = entities.HipChatMessage(p)
self.assertEqual(msg.message, "jeopardy me")
def test_when_get_response_message_is_called_user_name_is_saved(self):
self.trebek_bot.get_response_message()
key = trebek.Trebek.hipchat_user_key.format('582174')
self.assertTrue(self.trebek_bot.redis.exists(key))
user_name = self.trebek_bot.redis.get(trebek.Trebek.hipchat_user_key.format('582174')).decode()
self.assertEqual("<NAME>", user_name)
def test_number_is_formatted_as_currency(self):
currency = self.trebek_bot.format_currency("100")
self.assertEqual("$100", currency)
currency = self.trebek_bot.format_currency("1000")
self.assertEqual("$1,000", currency)
currency = self.trebek_bot.format_currency("1000000000")
self.assertEqual("$1,000,000,000", currency)
currency = self.trebek_bot.format_currency("-100")
self.assertEqual("<span style='color: red;'>-$100</span>", currency)
currency = self.trebek_bot.format_currency("-1000000000")
self.assertEqual("<span style='color: red;'>-$1,000,000,000</span>", currency)
def test_user_requests_score_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek score"
bot = self.create_bot_with_dictionary(d)
key = "{0}:{1}".format(bot.user_score_prefix,
bot.room_message.item.message.user_from.id)
bot.redis.set(key, 500)
response = bot.get_response_message()
self.assertEqual("$500", response)
def test_user_leaderboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek leaderboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
year, month = [int(x) for x in bot.get_year_month().split('-')]
dt = datetime.datetime(year, month, 1)
expected = "<p>Leaderboard for {0} {1}:</p>".format(dt.strftime("%B"), dt.year)
expected += "<ol><li>Arian: $5,430</li>"
expected += "<li><NAME>: $500</li>"
expected += "<li>Zach: $412</li>"
expected += "<li>Alex: $225</li>"
expected += "<li>Richard: $200</li></ol>"
self.assertEqual(expected, response)
def test_user_loserboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek show me the loserboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
year, month = [int(x) for x in bot.get_year_month().split('-')]
dt = datetime.datetime(year, month, 1)
expected = "<p>Loserboard for {0} {1}:</p>".format(dt.strftime("%B"), dt.year)
expected += "<ol><li>Allen: $20</li>"
expected += "<li>Mark: $30</li>"
expected += "<li>Melvin: $50</li>"
expected += "<li>Cordarrell: $70</li>"
expected += "<li>Reggie: $87</li></ol>"
self.assertEqual(expected, response)
def test_jeopardy_round_can_start_from_nothing(self):
response = self.trebek_bot.get_response_message()
expected = "The category is <b>CLASSIC GAME SHOW TAGLINES</b> for $200: "
expected += "<b>\"CAVEAT EMPTOR. LET THE BUYER BEWARE\"</b> (Air Date: 18-Oct-2001)"
self.assertEqual(expected, response)
def test_user_cannot_answer_same_question_twice(self):
# Arrange
clue = self.trebek_bot.get_jeopardy_clue()
d = self.get_setup_json()
user_answer_key = trebek.Trebek.user_answer_key.format(
self.trebek_bot.room_id, clue.id, d['item']['message']['from']['id'])
self.trebek_bot.redis.set(user_answer_key, 'true')
self.trebek_bot.get_question()
d['item']['message']['message'] = '/trebek this is an answer'
bot = self.create_bot_with_dictionary(d)
bot.redis = self.trebek_bot.redis
# Act
response = bot.get_response_message()
# Assert
self.assertEqual("You have already answered <NAME>. Let someone else respond.", response)
def test_given_incorrect_answer_user_score_decreased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = '/trebek some test answer'
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
score_string = "<span style='color: red;'>-$200</span>"
self.assertEqual(score_string, bot.format_currency(score))
self.assertEqual("That is incorrect, <NAME>. Your score is now {0}".format(score_string), response)
def test_given_correct_answer_user_score_increased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek what is Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertEqual("$200", bot.format_currency(score))
self.assertEqual("That is correct, <NAME>. Your score is now $200 (Expected Answer: Let's Make a Deal)", response)
def test_given_correct_answer_nonQuestion_form_user_score_decreased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
score_string = "<span style='color: red;'>-$200</span>"
self.assertEqual(score_string, bot.format_currency(score))
self.assertEqual("That is correct <NAME>, however responses should be in the form of a question. Your score is now {0}".format(score_string), response)
def test_given_incorrect_answer_time_is_up_response(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek foobar"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
clue = bot.get_active_clue()
clue.expiration = time.time() - (bot.seconds_to_expire + 1)
key = bot.clue_key.format(bot.room_id)
bot.redis.set(key, json.dumps(clue, cls = entities.QuestionEncoder))
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertFalse(score)
self.assertEqual(response, "Time is up! The correct answer was: <b>Let's Make a Deal</b>")
def test_given_correct_answer_time_is_up_response(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek what is Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
clue = bot.get_active_clue()
clue.expiration = time.time() - (bot.seconds_to_expire + 1)
key = bot.clue_key.format(bot.room_id)
bot.redis.set(key, json.dumps(clue, cls = entities.QuestionEncoder))
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertFalse(score)
self.assertEqual(response, "That is correct James A, however time is up. (Expected Answer: Let's Make a Deal)")
def test_when_asked_for_answer_bot_responds_with_answer(self):
d = self.get_setup_json()
bot = self.create_bot_with_dictionary(d)
bot.get_question()
d['item']['message']['message'] = "/trebek answer"
bot = self.create_bot_with_dictionary(d)
response = bot.get_response_message()
self.assertEqual("The answer was: Let's Make a Deal", response)
def test_when_no_question_exists_answer_returns_no_active_clue(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek answer"
bot = self.create_bot_with_dictionary(d)
bot.redis.flushdb()
response = bot.get_response_message()
self.assertEqual("No active clue. Type '/trebek jeopardy' to start a round", response)
def test_when_answer_contains_HTML_word_is_filtered(self):
# e.g.: ANSWER: the <i>Stegosaurus</i>
c = {'id':1, 'title': 'foo', 'created_at': 'bar', 'updated_at': 'foobar', 'clues_count':1}
q = entities.Question(1, answer= "the <i>Stegosaurus</i>", category = c)
self.assertEqual("the Stegosaurus", q.answer)
# e.g.: ANSWER: <i>the Seagull</i>
q = entities.Question(1, answer= "<i>the Seagull</i>", category = c)
self.assertEqual("the Seagull", q.answer)
q = entities.Question(1, answer= "Theodore Roosevelt", category = c)
self.assertEqual("Theodore Roosevelt", q.answer)
def test_when_fetched_clue_is_invalid_get_new_clue(self):
global _invalid_clue, _fetch_count
_fetch_count = 0
clue = get_clue_json()
clue['invalid_count'] = 1
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertEqual(clue.invalid_count, None)
def test_when_fetched_clue_is_missing_question_get_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = ""
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertNotEqual(clue.question.strip(), "")
def test_when_fetched_clue_contains_visual_clue_request_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = "the picture seen here, contains some test data"
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertFalse("seen here" in clue.question)
def test_when_fetched_clue_contains_audio_clue_request_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = "the audio heard here, contains some test data"
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertFalse("heard here" in clue.question)
def test_when_new_month_arrives_score_resets_to_zero(self):
self.trebek_bot.update_score(200)
self.trebek_bot.get_year_month = fake_get_year_month
self.assertEqual("$0", self.trebek_bot.get_user_score())
def test_lifetimescore_includes_multiple_months(self):
# Seed other user's data (to reproduce bug)
self.create_user_scores()
self.trebek_bot.update_score(200)
self.trebek_bot.get_year_month = fake_get_year_month
self.trebek_bot.update_score(200)
self.assertEqual("$400", self.trebek_bot.get_user_score(True))
def test_user_lifetime_loserboard_value_includes_multiple_months(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek show me the lifetime loserboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
expected = "<ol><li>Legacy Score: $5</li>"
expected += "<li>Allen: $40</li>"
expected += "<li>Mark: $60</li>"
expected += "<li>Melvin: $100</li>"
expected += "<li>Cordarrell: $140</li></ol>"
self.assertEqual(expected, response)
def test_user_lifetime_leaderboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek lifetime leaderboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
expected = "<ol><li>Arian: $10,860</li>"
expected += "<li><NAME>: $1,000</li>"
expected += "<li>Zach: $824</li>"
expected += "<li>Alex: $450</li>"
expected += "<li>Richard: $400</li></ol>"
self.assertEqual(expected, response)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1701374
|
<reponame>sinhmd/raster-vision
import unittest
import numpy as np
from rastervision2.core.data import SegmentationClassTransformer
from rastervision2.core.data.utils import color_to_triple
from rastervision2.core.data.class_config import ClassConfig
class TestSegmentationClassTransformer(unittest.TestCase):
def setUp(self):
self.class_config = ClassConfig(
names=['a', 'b', 'c'], colors=['red', 'green', 'blue'])
self.class_config.ensure_null_class()
self.transformer = SegmentationClassTransformer(self.class_config)
self.rgb_image = np.zeros((1, 3, 3))
self.rgb_image[0, 0, :] = color_to_triple('red')
self.rgb_image[0, 1, :] = color_to_triple('green')
self.rgb_image[0, 2, :] = color_to_triple('blue')
self.class_image = np.array([[0, 1, 2]])
def test_rgb_to_class(self):
class_image = self.transformer.rgb_to_class(self.rgb_image)
expected_class_image = self.class_image
np.testing.assert_array_equal(class_image, expected_class_image)
def test_class_to_rgb(self):
rgb_image = self.transformer.class_to_rgb(self.class_image)
expected_rgb_image = self.rgb_image
np.testing.assert_array_equal(rgb_image, expected_rgb_image)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3376692
|
<reponame>Vector35/traceapi
#!/usr/bin/env python
import sys
import json
import os
import tarfile
import base64
import operator
from Crypto.Hash import *
if len(sys.argv) < 3:
print "Expected challenge name and tar path"
sys.exit(1)
desired_cs = sys.argv[1]
files = sys.argv[2:]
for tar_path in files:
tar = tarfile.open(tar_path)
rounds = []
# Determine set of missing rounds that are in this tarball
for f in tar.getnames():
if f.endswith('score_data.json'):
round_num = int(f.split('/')[0])
rounds.append(round_num)
rounds.sort()
for round_num in rounds:
# Extract score data from round
scores = json.loads(tar.extractfile('%d/score_data.json' % (round_num)).read())
# The 'challenges' list contains challenges that are no longer live, so compute
# the set of active challenges based on scoring data instead
active_cs_list = []
for team in scores['teams']:
for cs in scores['teams'][team]['scores']:
if cs['csid'] not in active_cs_list:
active_cs_list.append(cs['csid'])
for cs in active_cs_list:
if cs == desired_cs:
print "Live in round %d" % round_num
sys.exit(0)
|
StarcoderdataPython
|
59501
|
from htm_rl.modules.htm.pattern_memory import PatternMemory
from htm.bindings.sdr import SDR
import numpy as np
from tqdm import tqdm
EPS = 1e-12
def get_labels(pm: PatternMemory, data, input_size):
labels = dict()
input_pattern = SDR(input_size)
for i, item in enumerate(data):
input_pattern.sparse = item
labels[i] = pm.compute(input_pattern, False)
return labels
def train(pm: PatternMemory, data, epochs, input_size, noise=0.0):
input_pattern = SDR(input_size)
indices = np.arange(len(data))
for epoch in tqdm(range(epochs)):
np.random.shuffle(indices)
for i in indices:
if noise > 0:
n_bits = int(noise * len(data[i]))
bits_to_remove = np.random.choice(data[i], n_bits, replace=False)
bits_to_add = np.random.choice(np.arange(input_size), n_bits, replace=False)
noisy_sample = np.setdiff1d(data[i], bits_to_remove)
noisy_sample = np.union1d(noisy_sample, bits_to_add)
else:
noisy_sample = data[i]
input_pattern.sparse = noisy_sample
pm.compute(input_pattern, True)
# print(f'epoch {epoch}: {get_labels(pm, data, input_size)}')
labels = get_labels(pm, data, input_size)
return labels
def test_retrieval(pm: PatternMemory, data, labels):
iou = list()
for i, item in enumerate(data):
if labels[i] is not None:
pattern = pm.get_pattern(labels[i])
iou.append(np.intersect1d(pattern, item).size/(np.union1d(pattern, item).size + EPS))
else:
iou.append(0)
return sum(iou)/len(iou)
def generate_data(input_size, n_patterns, sparsity):
data = [np.random.choice(np.arange(0, input_size), max(int(input_size * sparsity), 1), replace=False) for _ in range(n_patterns)]
return data
def main():
input_size = 1000
epochs = 20
seed = 5436
n_patterns = 1000
sparsity = 0.05
config = dict(
input_size=input_size,
max_segments=1000,
min_distance=0.1,
permanence_increment=0.1,
permanence_decrement=0.01,
segment_decrement=0.1,
permanence_connected_threshold=0.5,
seed=seed
)
data = generate_data(input_size, n_patterns, sparsity)
pm = PatternMemory(**config)
labels = train(pm, data, epochs, input_size, noise=0.09)
mean_iou = test_retrieval(pm, data, labels)
print(mean_iou)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1632600
|
import abc
import re
import urllib2
from cave import Cave
from lxml import html
class CaveService(object):
_user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' \
'(KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'
_regex_number = re.compile("[0-9]+")
_regex_namespace = {'re': 'http://exslt.org/regular-expressions'}
def __init__(self, search_url='', search_description=''):
self.search_url = search_url
self.search_description = search_description
headers = {
'User-Agent': self._user_agent
}
self._opener = urllib2.build_opener()
self._opener.addheaders = headers.items()
@abc.abstractmethod
def search_caves(self, old_caves=[]):
return
class IdealistaService(CaveService):
def search_caves(self, old_caves=[]):
new_caves = []
data_html = self._opener.open(self.search_url).read()
dom = html.fromstring(data_html)
caves = dom.xpath('.//li[re:test(@id, "[0-9]+")]',
namespaces=self._regex_namespace)
caves = {cave.attrib['id']: cave for cave in caves}
# A solution with a comprehension will use another for ;)
for cave_id in caves:
if cave_id not in old_caves:
new_cave = caves[cave_id]
price = new_cave.xpath('.//li[@class="col-0"]')[0].text
price = self._regex_number.findall(price)[0]
meters = new_cave.xpath('.//li[@class="col-1"]')[0].text
meters = self._regex_number.findall(meters)[0]
description = new_cave.xpath(
'.//a[@href="/inmueble/{0}/"]'.format(
new_cave.attrib['id']))[1].text.strip()
url = 'http://idealista.com/inmueble/{0}/'.format(
new_cave.attrib['id'])
new_cave_obj = Cave(price, meters, description, url,
self.search_url)
new_caves.append(new_cave_obj)
return new_caves
class SegundaManoService(CaveService):
def search_caves(self, old_caves=[]):
new_caves = []
data_html = self._opener.open(self.search_url).read()
dom = html.fromstring(data_html)
caves = dom.xpath('.//ul[re:test(@id, "[0-9]+")]',
namespaces=self._regex_namespace)
caves = {cave.attrib['id']: cave for cave in caves}
# A solution with a comprehension will use another for ;)
for cave_id in caves:
if cave_id not in old_caves:
new_cave = caves[cave_id]
price = new_cave.xpath('.//a[@class="subjectPrice"]')[0].text
price = self._regex_number.findall(price)[0]
try:
meters = new_cave.xpath(
'.//div[@class="infoBottom"]/text()')[3]
meters = self._regex_number.findall(meters)[0]
except:
meters = 'not available'
description = new_cave.xpath(
'.//a[@class="subjectTitle"]')[0].text.strip()
url = new_cave.xpath(
'.//a[@class="subjectTitle"]')[0].attrib['href']
new_cave_obj = Cave(price, meters, description, url,
self.search_url)
new_caves.append(new_cave_obj)
return new_caves
|
StarcoderdataPython
|
184651
|
import numpy as np
class MiniBatch:
def __init__(self, X: np.array, y: np.array, n, batch_size=1, shuffle=True):
"""
Creates iterator throw given data
:param X: features array
:param y: marks array
:param n: number of elements
:param batch_size: mini-batch size
:param shuffle: check whether data needed to be shuffled
"""
self.X = X
self.y = y
self.n = n
self.k = 0
self.batch_size = batch_size
self.shuffle = shuffle
if self.shuffle:
self.X, self.y = self.__shuffle__(X=self.X, y=self.y, n=self.n)
def __iter__(self):
return self
def __next__(self):
if self.n <= self.batch_size * self.k:
raise StopIteration
start = self.k * self.batch_size
end = start + self.batch_size
self.k += 1
return self.X[start:end], self.y[start:end]
@staticmethod
def __shuffle__(X, y, n):
indices = np.arange(n)
np.random.seed(indices)
X_, y_ = [], []
for i in indices:
X_.append(X[i])
y_.append(y[i])
return np.array(X_), np.array(y_)
def __reset_index__(self):
self.k = 0
if self.shuffle:
self.X, self.y = self.__shuffle__(self.X, self.y, self.n)
|
StarcoderdataPython
|
1651398
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
class Sessions(models.Model):
_name = "my_modulee.sessions"
_description = "my_modulee.sessions"
name = fields.Text(
string='Name'
)
start_date = fields.Date(
string='Start Date',
default=lambda self: fields.Date.context_today(self)
)
duration = fields.Float(
digits=(6,2),
string='Duration (days)'
)
seats = fields.Integer(
string='Number of Seats'
)
instructor_id = fields.Many2one(
'res.partner',
string='Instructor',
ondelete='set null',
domain=['|', ('instructor','=',True),'|',('teacher_level1','=',True),('teacher_level2','=',True)]
#domain=['|',('teacher_level1','=',True),('teacher_level2','=',True)]
)
courses_id = fields.Many2one(
'my_modulee.courses',
required=True,
string='Course Id',
)
#Many to many
atendees_ids = fields.Many2many(
'res.partner',
string='Attendees',
)
#Porcentaje de asientos tomados en comparación a la cantidad de asientos.
percentage_taken_seats = fields.Float(
string='Taken Seats Percentage', compute='_compute_taken_seats'
)
#Atributo de sesión "activa"
active = fields.Boolean(string='Active',default=True)
#Atributo para tener el número de asistentes a una sesión.
atendees_num = fields.Float(
string='Number of Attendees',
compute='_compute_atendees_number',
store=True
)
#This stuff is for the Kanban view
color = fields.Integer()
#Computar un campo.
@api.depends('atendees_ids')
def _compute_taken_seats(self):
for record in self:
if((record.seats is None) or (record.seats <= 0)):
record.percentage_taken_seats = 0
else:
record.percentage_taken_seats = (len(record.atendees_ids)*100)/(record.seats)
#Computar el número de asistentes a una sesión
@api.depends('atendees_ids')
def _compute_atendees_number(self):
for record in self:
#if(record.atendees_ids):
record.atendees_num = len(record.atendees_ids)
#Cada que se haga un cambio en los campos del registro "self" indicados en el onchange, se va a llamar a este método.
@api.onchange('atendees_ids','seats')
def _onchange_percentage_taken_seats(self):
if (len(self.atendees_ids) > self.seats):
return {
'warning': {
'title': _("That's not possible"),
'message': _('You cannot have more attendees than seats in a session.')
}
}
elif (self.seats < 0):
return {
'warning': {
'title': _("That's not possible"),
'message': _('You cannot have a session with less than 0 persons.')
}
}
#Este es un check, pero con Python, no con SQL.
@api.constrains('instructor_id', 'atendees_ids')
def _check_method(self):
for record in self: #Recorrer todos los registros
if (record.instructor_id in record.atendees_ids):
raise ValidationError(_('The instructor ') + '%s'%record.instructor_id.name + _(' cannot be in his/her session as an attendee too.'))
|
StarcoderdataPython
|
3333559
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-27 13:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0015_s3upload_is_valid'),
('team', '0075_auto_20180327_1303'),
]
operations = [
migrations.RemoveField(
model_name='historicalinvoicesummary',
name='changed_by',
),
migrations.RemoveField(
model_name='historicalinvoicesummary',
name='created_by',
),
migrations.RemoveField(
model_name='historicalinvoicesummary',
name='history_user',
),
migrations.RemoveField(
model_name='invoicesummary',
name='changed_by',
),
migrations.AddField(
model_name='invoicesummary',
name='s3_upload',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.S3Upload'),
),
migrations.DeleteModel(
name='HistoricalInvoiceSummary',
),
]
|
StarcoderdataPython
|
1700239
|
# Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '<NAME>'
import os
import uuid
import StringIO
import ConfigParser
from boto.exception import S3CreateError
from boto.s3.connection import S3Connection
class S3Utils(object):
MAX_DELETE = 999
GTFAR_S3_BUCKET_PREFIX = 'pegasus-gtfar'
def __init__(self, s3_cfg=None):
if s3_cfg is None:
s3_cfg = os.path.join(os.path.expanduser('~'), '.s3cfg')
if not os.path.isfile(s3_cfg):
raise Exception('Unable to locate S3 configuration file')
ini_str = open(s3_cfg, 'r').read()
ini_fp = StringIO.StringIO(ini_str)
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
self._access_key = config.get('pegasus@amazon', 'access_key')
self._secret_key = config.get('pegasus@amazon', 'secret_key')
self._conn = S3Connection(self._access_key, self._secret_key)
self._bucket = self._init_bucket(S3Utils.GTFAR_S3_BUCKET_PREFIX)
def _init_bucket(self, bucket_prefix):
buckets = self._conn.get_all_buckets()
for bucket in buckets:
if bucket.name.startswith(bucket_prefix):
return self._conn.get_bucket(bucket.name)
else:
while True:
try:
bucket_name = '%s-%s' % (bucket_prefix, str(uuid.uuid4()).split('-')[-1])
bucket = self._conn.create_bucket(bucket_name)
return bucket
except S3CreateError:
pass
def dir_exists(self, key):
key = key if key.endswith('/') else key + '/'
files = self._bucket.list(key)
for file in files:
return True
return False
def get_download_url(self, workflow_name, file_name):
file_path = 'data/runs/%s/output/%s' % (workflow_name, file_name)
key = self._bucket.get_key(file_path)
if key:
return key.generate_url(expires_in=60)
return None
def get_bucket_name(self):
return self._bucket.name
def delete_run_dir(self, _id):
prefix = 'data/runs/%s/' % _id
self._delete_key(prefix)
def delete_staging_dir(self, _id):
prefix = 'data/runs/%s/scratch/' % _id
self._delete_key(prefix)
def delete_output_dir(self, _id):
prefix = 'data/runs/%s/output/' % _id
self._delete_key(prefix)
def _delete_key(self, key):
# Is the key a directory?
if key.endswith('/'):
keys = self._bucket.list(key)
dir_list = []
del_list = []
for s3_object in keys:
name = s3_object.name
# Directory contains a directory
if name.endswith('/'):
dir_list.insert(0, name)
else:
del_list.append(name)
if len(del_list) == S3Utils.MAX_DELETE:
self._bucket.delete_keys(del_list)
del_list = []
if len(del_list) > 0:
self._bucket.delete_keys(del_list)
for i in range(0, len(dir_list), S3Utils.MAX_DELETE):
l = dir_list[i:i + S3Utils.MAX_DELETE]
self._bucket.delete_keys(l)
else:
self._bucket.delete_key(key)
def get_index_files(self):
prefix = 'data/index'
return self._get_files(prefix)
def get_output_files(self, _id):
prefix = 'data/runs/%s/output' % _id
files = self._get_files(prefix)
return [(os.path.basename(name), size) for name, size in files if size > 0]
def _get_files(self, prefix):
files_rs = self._bucket.list(prefix)
files = []
for key in files_rs:
if not key.name.endswith('/'):
file_size = key.size
files.append((key.name, file_size))
return files
|
StarcoderdataPython
|
3250205
|
from collections import OrderedDict, Counter
import hashlib
from utils import gethostname, getpath
import networkx as nx
import matplotlib.pyplot as plt
class SimpleSiteMap(object):
gr = None
tokens = OrderedDict()
tokens_counter = 0
edges = []
def __init__(self, site, exceptions=None):
self.gr = nx.DiGraph()
self.map = nx.DiGraph()
self.site = site
self.exceptions = exceptions
def append_tokens(self, tuple):
self.tokens_counter += 1
if tuple == ():
if '/' in self.tokens[0]:
self.tokens[0]['/'] += 1
else:
self.tokens[0]['/'] = 1
step = 0
for i in tuple:
if step not in self.tokens.keys():
self.tokens[step] = OrderedDict()
if i in self.tokens[step]:
self.tokens[step][i] += 1
else:
self.tokens[step][i] = 1
step += 1
def reduce_tokens(self):
sorted_tokens = OrderedDict()
for k in self.tokens.keys():
sorted_tokens[k] = OrderedDict(sorted(self.tokens[k].items(), key=lambda t: t[1], reverse=True))
self.tokens = sorted_tokens
def normalize_tokens_weight(self):
for k in self.tokens.keys():
local_tokens_counter = 0
for token in self.tokens[k].keys():
local_tokens_counter += self.tokens[k][token]
for token in self.tokens[k].keys():
self.tokens[k][token] /= float(local_tokens_counter)
def get_simplified_node(self, node, cut_limit):
simplified_node = []
step = 0
if node == ():
node = ('/',)
for token in node:
try:
if self.tokens[step][token] < cut_limit:
simplified_node.append("*")
else:
simplified_node.append(token)
except:
simplified_node.append(token)
step += 1
if Counter(simplified_node)['*'] > 1:
print node, simplified_node
return tuple(simplified_node)
def get_simplified_edge(self, tokens, cut_limit):
nin, nout = tokens
return self.get_simplified_node(nin, cut_limit), self.get_simplified_node(nout, cut_limit)
def reduce_graph(self, cut_limit=0.005):
self.map.clear()
visits = len(self.edges)
for edge in self.edges:
e = self.get_simplified_edge(edge, cut_limit)
self.map.add_edge(e[0], e[1])
try:
self.map[e[0]][e[1]]['weight'] += 1/float(visits)
except KeyError:
self.map[e[0]][e[1]]['weight'] = 1/float(visits)
print(self.map.number_of_nodes())
def add_node(self, uri, referer):
for ex in self.exceptions:
if uri.startswith(ex):
return
if referer and gethostname(referer) == gethostname(self.site):
referer = getpath(referer)
input_chain = filter(None, referer.split('/'))
else:
input_name = 'external'
m = hashlib.md5()
m.update(referer)
input_chain = [input_name]
output_chain = filter(None, getpath(uri).split('/'))
tic, toc = tuple(input_chain), tuple(output_chain)
self.append_tokens(toc)
self.edges.append((tic, toc))
self.gr.add_edge(tic, toc)
def get_graph(self):
self.map = self.gr.copy()
self.reduce_tokens()
self.normalize_tokens_weight()
self.reduce_graph()
return self.map
def get_weight(self, uri, referer):
pass
def draw_graph(self):
G = self.get_graph()
pos=nx.spring_layout(G, iterations=10)
e = G.edges()
weights = [int(1+G[u][v]['weight']*30) for u, v in e]
nx.draw_networkx_edges(G, pos=pos, alpha=0.3, width=weights, edge_color='m')
nx.draw_networkx_labels(G, pos=pos, font_size=12)
plt.show()
|
StarcoderdataPython
|
1795419
|
import importlib
import logging
import os
from contextlib import contextmanager
import yadageschemas
from .steering_object import YadageSteering
from .strategies import get_strategy
log = logging.getLogger(__name__)
def run_workflow(*args, **kwargs):
"""
convenience function around steering context, when no additional settings
are desired.
"""
with steering_ctx(*args, **kwargs):
pass
def execute_steering(
steering_object,
updateinterval=0.02,
loginterval=30,
default_trackers=True,
strategy=None,
strategyopts=None,
backend=None,
cache=None,
):
ys = steering_object
ys.adage_argument(
default_trackers=default_trackers,
trackevery=loginterval,
update_interval=updateinterval,
recursive_updates=True,
)
if cache:
if cache == "checksums":
backend.enable_cache(
":".join([cache, os.path.join(ys.metadir, "cache.json")])
)
else:
backend.enable_cache(cache)
custom_tracker = os.environ.get("YADAGE_CUSTOM_TRACKER", None)
if custom_tracker:
modulename, trackerclassname = custom_tracker.split(":")
module = importlib.import_module(modulename)
trackerclass = getattr(module, trackerclassname)
ys.adage_argument(additional_trackers=[trackerclass()])
if strategy is not None:
ys.adage_argument(**get_strategy(strategy, strategyopts))
ys.run_adage(backend)
@contextmanager
def steering_ctx(
dataarg,
workflow=None,
initdata=None,
toplevel=os.getcwd(),
backend=None,
controller="frommodel",
ctrlopts=None,
workflow_json=None,
cache=None,
dataopts=None,
updateinterval=0.02,
loginterval=30,
schemadir=yadageschemas.schemadir,
metadir=None,
strategy=None,
strategyopts=None,
validate=True,
visualize=True,
wflowopts=None,
accept_metadir=False,
modelsetup="inmem",
modelopts=None,
):
ys = YadageSteering.create(
metadir=metadir,
accept_metadir=True if (accept_metadir or cache) else False,
dataarg=dataarg,
dataopts=dataopts,
wflowopts=wflowopts,
workflow_json=workflow_json,
workflow=workflow,
toplevel=toplevel,
schemadir=schemadir,
validate=validate,
initdata=initdata,
modelsetup=modelsetup,
modelopts=modelopts,
controller=controller,
ctrlopts=ctrlopts,
)
yield ys
try:
execute_steering(
steering_object=ys,
updateinterval=updateinterval,
loginterval=loginterval,
default_trackers=visualize,
strategy=strategy,
strategyopts=strategyopts,
backend=backend,
cache=cache,
)
finally:
log.info("done. dumping workflow to disk.")
ys.serialize()
if visualize:
log.info("visualizing workflow.")
ys.visualize()
|
StarcoderdataPython
|
3349664
|
from typing import Optional, List, Union
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Session
from autofit.database import query as q
from .scrape import scrape_directory
from .. import model as m
from ..query.query import AbstractQuery, Attribute
class NullPredicate(AbstractQuery):
@property
def fit_query(self) -> str:
return "SELECT id FROM fit"
def __and__(self, other):
return other
class Query:
"""
API for creating a query on the best fit instance
"""
@staticmethod
def for_name(name: str) -> q.Q:
"""
Create a query for fits based on the name of a
top level instance attribute
Parameters
----------
name
The name of the attribute. e.g. galaxies
Returns
-------
A query generating object
"""
return q.Q(name)
def __getattr__(self, name):
return self.for_name(name)
class FitQuery(Query):
"""
API for creating a query on the attributes of a fit,
such as:
name
unique_tag
path_prefix
is_complete
is_grid_search
"""
@staticmethod
def for_name(name: str) -> Union[
AbstractQuery,
Attribute
]:
"""
Create a query based on some attribute of the Fit.
Parameters
----------
name
The name of an attribute of the Fit class
Returns
-------
A query based on an attribute
Examples
--------
aggregator.fit.name == 'example name'
"""
if name not in m.fit_attributes:
raise AttributeError(
f"Fit has no attribute {name}"
)
if m.fit_attributes[
name
].type.python_type == bool:
return q.BA(name)
return q.A(name)
class Aggregator:
def __init__(
self,
session: Session,
filename: Optional[str] = None,
predicate: AbstractQuery = NullPredicate(),
offset=0,
limit=None
):
"""
Query results from an intermediary SQLite database.
Results can be scraped from a directory structure and stored in the database.
Parameters
----------
session
A session for communicating with the database.
filename
"""
self.session = session
self.filename = filename
self._fits = None
self._predicate = predicate
self._offset = offset
self._limit = limit
def __iter__(self):
return iter(
self.fits
)
@property
def search(self) -> FitQuery:
"""
An object facilitating queries on fit attributes such as:
name
unique_tag
path_prefix
is_complete
is_grid_search
"""
return FitQuery()
@property
def info(self):
"""
Query info associated with the fit in the info dictionary
"""
return q.AnonymousInfo()
def values(self, name: str) -> list:
"""
Retrieve the value associated with each fit with the given
parameter name
Parameters
----------
name
The name of some pickle, such as 'samples'
Returns
-------
A list of objects, one for each fit
"""
return [
fit[name]
for fit
in self
]
def __len__(self):
return len(self.fits)
def __eq__(self, other):
if isinstance(other, list):
return self.fits == other
return super().__eq__(other)
@property
def fits(self) -> List[m.Fit]:
"""
Lazily query the database for a list of Fit objects that
match the aggregator's predicate.
"""
if self._fits is None:
self._fits = self._fits_for_query(
self._predicate.fit_query
)
return self._fits
def map(self, func):
for fit in self.fits:
yield func(fit)
def __repr__(self):
return f"<{self.__class__.__name__} {self.filename} {len(self)}>"
def __getattr__(self, name: str) -> Union[AbstractQuery, q.A]:
"""
Facilitates query construction. If the Fit class has an
attribute with the given name then a predicate is generated
based on that attribute. Otherwise the query is assumed to
apply to the best fit instance.
Parameters
----------
name
The name of an attribute of the Fit class or the model
Returns
-------
A query
"""
return Query.for_name(name)
def __call__(self, predicate) -> "Aggregator":
"""
Concise query syntax
"""
return self.query(predicate)
def query(self, predicate: AbstractQuery) -> "Aggregator":
# noinspection PyUnresolvedReferences
"""
Apply a query on the model.
Parameters
----------
predicate
A predicate constructed to express which models should be included.
Returns
-------
A list of objects that match the predicate
Examples
--------
>>>
>>> aggregator = Aggregator.from_database(
>>> "my_database.sqlite"
>>> )
>>>
>>> lens = aggregator.galaxies.lens
>>>
>>> aggregator.filter((lens.bulge == EllSersicCore) & (lens.disk == EllSersic))
>>> aggregator.filter((lens.bulge == EllSersicCore) | (lens.disk == EllSersic))
"""
return self._new_with(
predicate=self._predicate & predicate
)
def _new_with(
self,
**kwargs
):
kwargs = {
"session": self.session,
"filename": self.filename,
"predicate": self._predicate,
**kwargs
}
return Aggregator(
**kwargs
)
def children(self) -> "Aggregator":
"""
An aggregator comprising the children of the fits encapsulated
by this aggregator. This is used to query children in a grid search.
"""
return Aggregator(
session=self.session,
filename=self.filename,
predicate=q.ChildQuery(
self._predicate
)
)
def __getitem__(self, item):
offset = self._offset
limit = self._limit
if isinstance(
item, int
):
return self.fits[item]
elif isinstance(
item, slice
):
if item.start is not None:
if item.start >= 0:
offset += item.start
else:
offset = len(self) + item.start
if item.stop is not None:
if item.stop >= 0:
limit = len(self) - item.stop - offset
else:
limit = len(self) + item.stop
return self._new_with(
offset=offset,
limit=limit
)
def _fits_for_query(
self,
query: str
) -> List[m.Fit]:
"""
Execute a raw SQL query and return a Fit object
for each Fit id returned by the query
Parameters
----------
query
A SQL query that selects ids from the fit table
Returns
-------
A list of fit objects, one for each id returned by the
query
"""
fit_ids = {
row[0]
for row
in self.session.execute(
query
)
}
return self.session.query(
m.Fit
).filter(
m.Fit.id.in_(
fit_ids
)
).offset(
self._offset
).limit(
self._limit
).all()
def add_directory(
self,
directory: str,
auto_commit=True
):
"""
Recursively search a directory for autofit results
and add them to this database.
Any pickles found in the pickles file are implicitly added
to the fit object.
Warnings
--------
If a directory is added twice then that will result in
duplicate entries in the database.
Parameters
----------
auto_commit
If True the session is committed writing the new objects
to the database
directory
A directory containing autofit results embedded in a
file structure
"""
for fit in scrape_directory(
directory
):
self.session.add(
fit
)
if auto_commit:
self.session.commit()
@classmethod
def from_database(
cls,
filename: str,
completed_only: bool = False
) -> "Aggregator":
"""
Create an instance from a sqlite database file.
If no file exists then one is created with the schema of the database.
Parameters
----------
completed_only
filename
The name of the database file.
Returns
-------
An aggregator connected to the database specified by the file.
"""
engine = create_engine(
f'sqlite:///{filename}'
)
session = sessionmaker(
bind=engine
)()
m.Base.metadata.create_all(
engine
)
aggregator = Aggregator(
session,
filename
)
if completed_only:
return aggregator(
aggregator.search.is_complete
)
return aggregator
|
StarcoderdataPython
|
90608
|
"""
LC 6014
You are given a string s and an integer repeatLimit. Construct a new string repeatLimitedString using the characters of s such that no letter appears more than repeatLimit times in a row. You do not have to use all characters from s.
Return the lexicographically largest repeatLimitedString possible.
A string a is lexicographically larger than a string b if in the first position where a and b differ, string a has a letter that appears later in the alphabet than the corresponding letter in b. If the first min(a.length, b.length) characters do not differ, then the longer string is the lexicographically larger one.
Example 1:
Input: s = "cczazcc", repeatLimit = 3
Output: "zzcccac"
Explanation: We use all of the characters from s to construct the repeatLimitedString "zzcccac".
The letter 'a' appears at most 1 time in a row.
The letter 'c' appears at most 3 times in a row.
The letter 'z' appears at most 2 times in a row.
Hence, no letter appears more than repeatLimit times in a row and the string is a valid repeatLimitedString.
The string is the lexicographically largest repeatLimitedString possible so we return "zzcccac".
Note that the string "zzcccca" is lexicographically larger but the letter 'c' appears more than 3 times in a row, so it is not a valid repeatLimitedString.
Example 2:
Input: s = "aababab", repeatLimit = 2
Output: "bbabaa"
Explanation: We use only some of the characters from s to construct the repeatLimitedString "bbabaa".
The letter 'a' appears at most 2 times in a row.
The letter 'b' appears at most 2 times in a row.
Hence, no letter appears more than repeatLimit times in a row and the string is a valid repeatLimitedString.
The string is the lexicographically largest repeatLimitedString possible so we return "bbabaa".
Note that the string "bbabaaa" is lexicographically larger but the letter 'a' appears more than 2 times in a row, so it is not a valid repeatLimitedString.
"""
class Solution:
def repeatLimitedString(self, s: str, repeatLimit: int) -> str:
cnt = dict(Counter(s))
cs = sorted(cnt)
ans = []
# print(cnt, cs)
while cs:
self.use_letter(cnt, cs, ans, repeatLimit)
# print(ans)
return "".join(ans)
def use_letter(self, cnt, cs, ans, repeatLimit):
c = cs[-1]
while True:
app_n = min(repeatLimit, cnt[c])
ans.append(c * app_n)
cnt[c] -= app_n
if cnt[c] > 0 and len(cs) > 1:
backup = cs[-2]
ans.append(backup)
cnt[backup] -= 1
if cnt[backup] == 0:
cs.pop(len(cs) - 2)
else:
break
cs.pop()
"""
Time/Space O(N)
"""
|
StarcoderdataPython
|
4818994
|
from Graph.Graph import Graph
class BreadthFirstPaths:
def __init__(self, graph, s):
self._marked = [False] * graph.V()
self.edgeTo = [None] * graph.V()
self.bfs(graph, s)
def bfs(self, graph, v):
queue = []
queue.append(v)
self._marked[v] = True
while len(queue):
v = queue[0]
del queue[0]
for w in graph.adj(v):
if not self._marked[w]:
queue.append(w)
self._marked[w] = True
self.edgeTo[w] = v
def hasPathTo(self, v):
return self._marked[v]
def pathTo(self, v):
path = []
while v != None:
path.append(v)
v = self.edgeTo[v]
return path[::-1]
|
StarcoderdataPython
|
55966
|
<gh_stars>10-100
from benchmarkstt.segmentation import core
from benchmarkstt.schema import Item
import pytest
@pytest.mark.parametrize('text,expected', [
('hello world! how are you doing?! ', ['hello ', 'world! ', 'how ', 'are ', 'you ', 'doing?! ']),
('\nhello world! how are you doing?! ', ['\nhello ', 'world! ', 'how ', 'are ', 'you ', 'doing?! ']),
('single-word', ['single-word']),
(' test', [' test']),
(' test', [' test']),
(' test ', [' test ']),
('test ', ['test ']),
('test B', ['test ', 'B']),
('test B ', ['test ', 'B ']),
('\n\n', ['\n\n'])
])
def test_simple(text, expected):
result = list(core.Simple(text))
assert ''.join([word['@raw'] for word in result]) == text
assert len(result) == len(expected)
for i in range(0, len(expected)):
expected_raw = expected[i]
gotten = result[i]
assert type(gotten) is Item
assert expected_raw == gotten['@raw']
assert expected_raw.strip() == gotten['item']
|
StarcoderdataPython
|
3252331
|
<reponame>kuldeepaman/tf-pose
# -*- coding: utf-8 -*-
"""
Shows use of PlotWidget to display panning data
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
win = pg.GraphicsLayoutWidget(show=True)
win.setWindowTitle('pyqtgraph example: PanningPlot')
plt = win.addPlot()
#plt.setAutoVisibleOnly(y=True)
curve = plt.plot()
data = []
count = 0
def update():
global data, curve, count
data.append(np.random.normal(size=10) + np.sin(count * 0.1) * 5)
if len(data) > 100:
data.pop(0)
curve.setData(np.hstack(data))
count += 1
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(50)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
StarcoderdataPython
|
3201342
|
"""
Draws a window filled with BG_COLOR
"""
import pygame
import constants as con
TITLE = "beasties"
TILES_HORIZONTAL = 4
TILES_VERTICAL = 4
TILESIZE = 128
WINDOW_WIDTH = TILESIZE * TILES_HORIZONTAL
WINDOW_HEIGHT = TILESIZE * TILES_VERTICAL
class Game:
def __init__(self):
pygame.init()
self.clock = pygame.time.Clock()
pygame.display.set_caption(TITLE)
self.surface = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
self.BG_COLOR = con.LIGHTGREY
self.keep_looping = True
def events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.keep_looping = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.keep_looping = False
def update(self):
pass
def draw(self):
self.surface.fill(self.BG_COLOR)
pygame.display.update()
def main(self):
while self.keep_looping:
self.events()
self.update()
self.draw()
if __name__ == "__main__":
mygame = Game()
mygame.main()
|
StarcoderdataPython
|
3256860
|
import matplotlib.pyplot as plt
import os
def summarize_performance(history, Model_path):
print(history.history.keys())
if('lr' in history.history.keys()):
plt.plot(history.history['lr'])
plt.title('Model lr')
plt.ylabel('lr')
plt.xlabel('Epoch')
plt.savefig(os.path.join(Model_path,'lr.png'))
plt.clf()
plt.cla()
plt.close()
# Plot training & validation accuracy values
plt.plot(history.history['iou_label'])
plt.plot(history.history['val_iou_label'])
plt.title('Model iou_label')
plt.ylabel('iou_label')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.savefig(os.path.join(Model_path,'iou_label.png'))
plt.clf()
plt.cla()
plt.close()
plt.plot(history.history['per_pixel_acc'])
plt.plot(history.history['val_per_pixel_acc'])
plt.title('Model per_pixel_acc')
plt.ylabel('per_pixel_acc')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.savefig(os.path.join(Model_path,'per_pixel_acc.png'))
plt.clf()
plt.cla()
plt.close()
plt.plot(history.history['Mean_IOU'])
plt.plot(history.history['val_Mean_IOU'])
plt.title('Model Mean_IOU')
plt.ylabel('Mean_IOU')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.savefig(os.path.join(Model_path,'Mean_IOU.png'))
plt.clf()
plt.cla()
plt.close()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.savefig(os.path.join(Model_path,'loss.png'))
plt.clf()
plt.cla()
plt.close()
|
StarcoderdataPython
|
4838410
|
import numpy as np
import argparse
from glob import glob
from copy import copy
import random
import pickle
# network
import torch
import torch.nn.functional as F
torch.manual_seed(0)
# GPU config
GPU = False
device = torch.device("cuda" if GPU else "cpu")
hidden_dim = 128
mb = 32
opt = "Adam" # SGD, Adam
C = 3 # word2vec window size satisfying C >= 1
x_length = 1 + C * 2 # training label length
TopN = 10 # display N similar word in test
# lr, iteration
train_factors = [[0.01, 1000]]
import MeCab
mecab = MeCab.Tagger("-Owakati")
class Word2Vec(torch.nn.Module):
def __init__(self, input_size, dim=512):
super(Word2Vec, self).__init__()
self.embed = torch.nn.Linear(input_size, dim)
self.outs = []
for _ in range(C * 2):
self.outs.append(torch.nn.Linear(dim, input_size))
self.out = torch.nn.Linear(dim, input_size)
def forward(self, input):
embed = self.embed(input)
xs = []
for i in range(C * 2):
x = self.outs[i](embed)
x = F.softmax(x, dim=1)
xs.append(x)
#x = self.out(embed)
#x = F.softmax(x, dim=1)
return xs
def get_vec(self, input):
return self.embed(input)
def data_load():
sentences = []
# get vocabrary
_chars = "あいうおえかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわをんがぎぐげござじずぜぞだぢづでどばびぶべぼぱぴぷぺぽぁぃぅぇぉゃゅょっアイウエオカキクケコサシスセソタチツテトナニヌネノハヒフヘホマミムメモヤユヨラリルレロワヲンガギグゲゴザジズゼゾダヂヅデドバビブベボパピプペポァィゥェォャュョッー、。「」1234567890!?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz,.@#"
voca = ["<BRANK>"] + [c for c in _chars]
# each file
for file_path in glob("./sandwitchman_*.txt"):
print("read:", file_path)
with open(file_path, 'r') as f:
# get line in 1 file
lines = [x.strip() for x in f.read().strip().split("\n")]
# get vocabrary from mecab parsed
for line in lines:
voca = list(set(voca) | set(mecab.parse(line).strip().split(" ")))
# add sentences
sentences += lines
# vocabrary sort
voca.sort()
# display sentence number
print("sentence pairs num:", len(sentences))
sentence_index = []
# each sentence
for s in sentences:
# mecab parse
s_parse = mecab.parse(s).strip().split(" ")
# add brank label first and end
_s = ["<BRANK>"] * C + s_parse + ["<BRANK>"] * C
# make training pairs
for i in range(C, len(s_parse) + C):
s_index = [voca.index(x) for x in _s[i-C : i+C+1]]
sentence_index += [s_index]
return voca, sentence_index
# train
def train():
# data load
voca, sentence_index = data_load()
voca_num = len(voca)
# write vocabrary lists
pickle.dump(voca, open("vocabrary_word2vec.bn", "wb"))
print("vocabrary num:", voca_num)
print("e.x.", voca[:5])
# model
model = Word2Vec(voca_num, dim=hidden_dim).to(device)
# minibatch index
mbi = 0
data_num = len(sentence_index)
train_ind = np.arange(data_num)
np.random.seed(0)
np.random.shuffle(train_ind)
# loss function
loss_fn = torch.nn.NLLLoss()
# each learning rate and iteration
for lr, ite in train_factors:
print("lr", lr, " ite", ite)
# optimizer
if opt == "SGD":
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
elif opt == "Adam":
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
else:
raise Exception("invalid optimizer:", opt)
# each iteration
for ite in range(ite):
# get minibatch index
if mbi + mb > data_num:
mb_ind = copy(train_ind[mbi:])
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(data_num-mbi))]))
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
# get minibatch
X_inds = [sentence_index[i] for i in mb_ind]
loss = 0
accuracy = 0.
total_len = 0
# each data of minibatch
for mb_index in range(mb):
# 1 data of minibatch
Xs = np.array(X_inds[mb_index]).reshape([-1, 1])
input_X = np.zeros([1, voca_num])
input_X[:, Xs[C]] = 1
input_X = torch.tensor(input_X, dtype=torch.float).to(device)
# reset graph
optimizer.zero_grad()
# data length
total_len += x_length
# forward network
ys = model(input_X)
# target label index
t_inds = [_i for _i in range(x_length) if _i != C]
# each target label
for i, y in zip(t_inds, ys):
# target label
t = torch.tensor(Xs[i], dtype=torch.long).to(device)
# get loss
loss += loss_fn(torch.log(y), t)
# count accuracy
if y.argmax() == t:
accuracy += 1
"""
# each target label
for i in range(x_length):
# forward network
y = model(input_X)
# target label
t = torch.tensor(Xs[i], dtype=torch.long).to(device)
#t = torch.tensor(Xs[i], dtype=torch.long).to(device).view(-1, voca_num)
# get loss
loss += loss_fn(torch.log(y), t)
# count accuracy
if y.argmax() == t:
accuracy += 1
"""
# loss backward
loss.backward()
# update weight
optimizer.step()
# get loss
loss = loss.item() / total_len
accuracy = accuracy / total_len
if (ite + 1) % 10 == 0:
print("iter :", ite+1, ",loss >>:", loss, "accuracy:", accuracy)
torch.save(model.state_dict(), 'word2vec.pt')
# test
def test(first_sentence="サンドウィッチマン"):
# get vocabrary
voca = pickle.load(open("vocabrary_word2vec.bn", "rb"))
voca_num = len(voca)
print("vocabrary num:", voca_num)
# load trained model
model = Word2Vec(voca_num, dim=hidden_dim).to(device)
model.load_state_dict(torch.load('word2vec.pt'))
xs = []
# if word not found in vocabrary
if first_sentence not in voca:
raise Exception("not found word:", first_sentence)
# get vector features of vocabrary
mb = 32
# feature vectors library
features = np.ndarray([0, hidden_dim])
for i in range(0, voca_num, mb):
# get minibatch
_mb = min(mb, voca_num - i)
# one hot vector
input_X = torch.zeros([_mb, voca_num], dtype=torch.float).to(device)
input_X[np.arange(_mb), np.arange(i, min(i + mb, voca_num))] = 1
# get vector feature
vecs = model.get_vec(input_X)
vecs = vecs.detach().cpu().numpy()
# add feature vectors
features = np.vstack([features, vecs])
print(features.shape)
# make one hot input X
input_X = torch.zeros([1, voca_num], dtype=torch.float).to(device)
input_X[:, voca.index(first_sentence)] = 1
# get target feature vector
vec = model.get_vec(input_X)
vec = vec.detach().cpu().numpy()[0]
# get similarity
#similarity_scores = np.sum(np.abs(features - vec) ** 2, axis=1)
# get cosine similarity
Norm_A = np.linalg.norm(features, axis=1)
Norm_B = np.linalg.norm(vec)
similarity_scores = np.dot(features, vec) / Norm_A / Norm_B
# get min index,, Skip first because it is target input word
min_inds = similarity_scores.argsort()[::-1]
print("Target:", first_sentence)
# print
for i in range(TopN):
ind = min_inds[i]
print("top{}: {} ({:.4f})".format(i + 1, voca[ind], similarity_scores[ind]))
def arg_parse():
parser = argparse.ArgumentParser(description='CNN implemented with Keras')
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='test', action='store_true')
parser.add_argument('--input', dest='input', default="サンドウィッチマン", type=str)
args = parser.parse_args()
return args
# main
if __name__ == '__main__':
args = arg_parse()
if args.train:
train()
if args.test:
test(args.input)
if not (args.train or args.test):
print("please select train or test flag")
print("train: python main.py --train")
print("test: python main.py --test")
print("both: python main.py --train --test")
|
StarcoderdataPython
|
1768453
|
# coding:utf-8
'''
@author = super_fazai
@File : str_utils.py
@Time : 2018/8/4 13:15
@connect : <EMAIL>
'''
|
StarcoderdataPython
|
3363586
|
'''
说明:多进程 + 协程的例子
'''
import asyncio
from multiprocessing import Process, Queue, current_process
import aiofiles
from aiohttp import ClientSession
async def req1(session: ClientSession, i):
async with session.get(f'http://httpbin.org/get?a={i}') as resp:
async with aiofiles.open(f'data/{i}.log', mode='w', encoding='utf-8') as f:
v = f'{str(current_process().name)} | {(await resp.json())["args"]["a"]}'
await f.write(v)
print(v)
return i
async def main_async(req_list):
async with ClientSession() as session:
res = await asyncio.gather(*[req1(session, i) for i in req_list])
return res
def main(req_list, res_value: Queue):
res = asyncio.get_event_loop().run_until_complete(main_async(req_list))
res_value.put(res)
def split(list, step):
# 将大集合分割成小集合split([1,2,3,4,5,6], 2)结果是[[1,2],[3,4],[5,6]]
length = len(list)
return [list[i:i+step] for i in range(0, length, step)]
def main_process(req_list, process_count):
raw_list = [i for i in range(req_list)]
split_list = split(raw_list, process_count)
res_value = Queue()
ps = [Process(target=main, args=(i, res_value))
for i in split_list]
for i in ps:
i.start()
for i in ps:
i.join()
res_list = []
for i in range(len(split_list)):
res_list.extend(res_value.get())
diff = set(raw_list) - set(res_list)
assert not diff, f'异常的单号:{diff}'
if __name__ == '__main__':
main_process(10, 3)
'''
输出:可以看出开了4个进程,Process-1处理了 [0,1,2],Process-2处理了 [3,4,5]....
Process-1 | 2
Process-1 | 0
Process-2 | 3
Process-1 | 1
Process-3 | 6
Process-3 | 8
Process-2 | 5
Process-4 | 9
Process-2 | 4
Process-3 | 7
'''
|
StarcoderdataPython
|
3329976
|
import time
import random
import itertools
# import gc
import os
import sys
import datetime
import numpy as np
import yaml
import pickle
from operator import itemgetter
from optparse import OptionParser
from sklearn.model_selection import KFold
from sklearn.metrics import roc_curve, auc, average_precision_score
sys.path.insert(0, os.path.join(sys.path[0], ".."))
from tiknib.utils import do_multiprocess, parse_fname
from tiknib.utils import load_func_data
from tiknib.utils import flatten
from tiknib.utils import store_cache
from tiknib.utils import load_cache
from get_roc_graph import plot_roc_all
import logging
import coloredlogs
logger = logging.getLogger(__name__)
coloredlogs.install(level=logging.INFO)
coloredlogs.install(level=logging.DEBUG)
np.seterr(divide="ignore", invalid="ignore")
def debughere():
import ipdb; ipdb.set_trace(sys._getframe().f_back)
def get_package(func_key):
return func_key[0]
def get_binary(func_key):
return func_key[1]
def get_func(func_key):
return (func_key[2], func_key[3])
def get_opti(option_key):
return option_key[0]
def get_arch(option_key):
return option_key[1]
def get_arch_nobits(option_key):
return option_key[1].split("_")[0]
def get_bits(option_key):
return option_key[1].split("_")[1]
def get_compiler(option_key):
return option_key[2]
def get_others(option_key):
return option_key[3]
def parse_other_options(bin_path):
other_options = ["lto", "pie", "noinline"]
for opt in other_options:
if opt in bin_path:
return opt
return "normal"
def get_optionidx_map(options):
return {opt: idx for idx, opt in enumerate(sorted(options))}
def is_valid(dictionary, s):
return s in dictionary and dictionary[s]
def load_options(config):
options = ["opti", "arch", "compiler", "others"]
src_options = []
dst_options = []
fixed_options = []
for idx, opt in enumerate(options):
src_options.append(config["src_options"][opt])
dst_options.append(config["dst_options"][opt])
if is_valid(config, "fixed_options") and opt in config["fixed_options"]:
fixed_options.append(idx)
src_options = set(itertools.product(*src_options))
dst_options = set(itertools.product(*dst_options))
options = sorted(src_options.union(dst_options))
optionidx_map = get_optionidx_map(options)
dst_options_filtered = {}
# Filtering dst options
for src_option in src_options:
def _check_option(opt):
if opt == src_option:
return False
for idx in fixed_options:
if opt[idx] != src_option[idx]:
return False
return True
candidates = list(filter(_check_option, dst_options))
# arch needs more filtering ...
# - 32 vs 64 bits
# - little vs big endian
# need to have same archs without bits
# TODO: move this file name checking into config option.
if "arch_bits" in config["fname"]:
def _check_arch_without_bits(opt):
return get_arch_nobits(opt) == get_arch_nobits(src_option)
candidates = list(filter(_check_arch_without_bits, candidates))
# need to have same bits
elif "arch_endian" in config["fname"]:
def _check_bits(opt):
return get_bits(opt) == get_bits(src_option)
candidates = list(filter(_check_bits, candidates))
candidates = list(set([optionidx_map[opt] for opt in candidates]))
dst_options_filtered[optionidx_map[src_option]] = candidates
logger.info("total %d options.", len(options))
logger.info("%d src options.", len(src_options))
logger.info("%d dst options.", len(dst_options))
logger.info("%d filtered dst options.", len(dst_options_filtered))
return options, dst_options_filtered
def pre_k(ranks, k):
count = 0
for r in ranks:
if r <= k:
count += 1
return count / len(ranks)
def analyze_top_k_results(config, all_data):
for target_key in all_data:
logger.info("Analyzing %s", target_key)
all_ranks=[]
all_funcs=[]
for target_option in all_data[target_key]:
result_arch, result, scores = all_data[target_key][target_option]
ranks, func_counts, other_ranks = result_arch
ranks = list(ranks.values())
func_counts = list(func_counts.values())
logger.info("Top-K %s(%s)", target_key, target_option)
logger.info("Avg Rank: %0.4f", np.mean(ranks))
logger.info("Std Rank: %0.4f", np.std(ranks))
logger.info("Prec Top 1: %0.4f", pre_k(ranks,1))
logger.info("Prec Top 10: %0.4f", pre_k(ranks,10))
logger.info("Prec Top 100: %0.4f", pre_k(ranks,100))
logger.info("Avg Counts: %0.4f", np.mean(func_counts))
all_ranks.extend(ranks)
all_funcs.extend(func_counts)
logger.info("Top-K %s", target_key)
logger.info("Avg Rank: %0.4f", np.mean(all_ranks))
logger.info("Std Rank: %0.4f", np.std(all_ranks))
logger.info("Prec Top 1: %0.4f", pre_k(all_ranks,1))
logger.info("Prec Top 10: %0.4f", pre_k(all_ranks,10))
logger.info("Prec Top 100: %0.4f", pre_k(all_ranks,100))
logger.info("Avg Counts: %0.4f", np.mean(all_funcs))
logger.info("============= normal feature set=============")
for target_key in all_data:
logger.info("Analyzing %s", target_key)
all_ranks=[]
all_funcs=[]
for target_option in all_data[target_key]:
result_arch, result, scores = all_data[target_key][target_option]
ranks, func_counts, other_ranks = result
ranks = list(ranks.values())
func_counts = list(func_counts.values())
logger.info("Top-K %s(%s)", target_key, target_option)
logger.info("Avg Rank: %0.4f", np.mean(ranks))
logger.info("Std Rank: %0.4f", np.std(ranks))
logger.info("Prec Top 1: %0.4f", pre_k(ranks,1))
logger.info("Prec Top 10: %0.4f", pre_k(ranks,10))
logger.info("Prec Top 100: %0.4f", pre_k(ranks,100))
logger.info("Avg Counts: %0.4f", np.mean(func_counts))
all_ranks.extend(ranks)
all_funcs.extend(func_counts)
logger.info("Top-K %s", target_key)
logger.info("Avg Rank: %0.4f", np.mean(all_ranks))
logger.info("Std Rank: %0.4f", np.std(all_ranks))
logger.info("Prec Top 1: %0.4f", pre_k(all_ranks,1))
logger.info("Prec Top 10: %0.4f", pre_k(all_ranks,10))
logger.info("Prec Top 100: %0.4f", pre_k(all_ranks,100))
logger.info("Avg Counts: %0.4f", np.mean(all_funcs))
def check_opt(ops, option):
if type(ops) is not list:
ops = [ops]
for op in ops:
pass
def analyze_opt(data, op1, op2, arch=True):
global interested_func_keys
all_ranks=[]
all_funcs=[]
all_other={}
max_ranks=[]
if type(op1) is not list:
op1 = [op1]
if type(op2) is not list:
op2 = [op2]
for target_option in data:
if any([o not in str(target_option) for o in op1]):
continue
result_arch, result, scores = data[target_option]
if arch:
ranks, funcs, other_ranks = result_arch
else:
ranks, funcs, other_ranks = result
for src_option in ranks:
tmp_ranks = []
if any([o not in str(src_option) for o in op2]):
continue
all_ranks.append(ranks[src_option])
all_funcs.append(funcs[src_option])
tmp_ranks.append(ranks[src_option])
for f in other_ranks[src_option]:
if f in all_other:
all_other[f].append(other_ranks[src_option][f])
else:
all_other[f] = [other_ranks[src_option][f]]
tmp_ranks.append(other_ranks[src_option][f])
max_ranks.append(min(tmp_ranks))
result=[]
#result.append('%s to %s'%(op1[0],op2[0]))
result.append(op1[0])
result.append(op2[0])
result.append(str(len(all_ranks)))
result.append(np.mean(all_funcs))
result.append(np.mean(all_ranks))
result.append(pre_k(all_ranks, 1))
for f in interested_func_keys:
result.append(np.mean(all_other[f]))
result.append(np.mean(max_ranks))
result.append(pre_k(max_ranks, 1))
'''
print("%0.3f"% np.mean(all_ranks))
print("%0.3f"% np.std(all_ranks))
print("%0.3f"% np.mean(all_funcs))
print("%0.3f"% pre_k(all_ranks,1))
#print("%0.3f"% pre_k(all_ranks,10))
#print("%0.3f"% pre_k(all_ranks,100))
for f in interested_func_keys:
print(f[3])
print("%0.3f"% np.mean(all_other[f]))
#print("%0.3f"% pre_k(all_other[f],1))
#print("%0.3f"% pre_k(all_other[f],10))
#print("%0.3f"% pre_k(all_other[f],100))
print("MAX")
print("%0.3f"% np.mean(max_ranks))
print("%0.3f"% np.std(max_ranks))
print("%0.3f"% pre_k(max_ranks,1))
print("%0.3f"% pre_k(max_ranks,10))
print("%0.3f"% pre_k(max_ranks,100))
'''
return result
def analyze(opts):
global interested_func_keys
config_fname = opts.config
with open(config_fname, "r") as f:
config = yaml.safe_load(f)
config["fname"] = config_fname
#file_handler = logging.FileHandler(os.path.join(outdir, "log.txt"))
#logger.addHandler(file_handler)
logger.info("config file name: %s", config["fname"])
options, dst_options = load_options(config)
features = sorted(config["features"])
target_funcs = config["target_funcs"]
patched_funcs = config["patched_funcs"]
target_func_keys = []
patched_func_keys = []
for target_func in target_funcs:
package, bin_name, src_file, src_line = target_func
func_key = (package, bin_name, src_file, src_line)
logger.info("Target function: %s", func_key)
target_func_keys.append(func_key)
for patched_func in patched_funcs:
package, bin_name, src_file, src_line = patched_func
func_key = (package, bin_name, src_file, src_line)
logger.info("Patched function: %s", func_key)
patched_func_keys.append(func_key)
is_all = False
if "top-k_all" in opts.pickle:
is_all = True
else:
funcname = os.path.basename(opts.pickle)[6:-7]
with open(opts.pickle, 'rb') as f:
if is_all:
all_data = pickle.load(f)
else:
all_data = {}
all_data[funcname] = pickle.load(f)
analyze_top_k_results(config, all_data)
#analyze_opt(all_data[funcname], 'normal', 'normal')
#analyze_opt(all_data[funcname], ['clang-4','x86_64','O1'], ['gcc-4','x86_64','O1'])
interested_func_keys = [k for k in target_func_keys if k[3] != funcname]
interested_func_keys += patched_func_keys
tests = [('norm', 'norm'),
('arm','arm'),
('arm','mips'),
('arm','x86'),
('mips','mips'),
('mips','arm'),
('mips','x86'),
('x86','x86'),
('x86','arm'),
('x86','mips'),
('O2','O3'),
('O3','O2'),
('gcc','clang'),
('gcc-4','gcc-8'),
('gcc-8','gcc-4'),
('clang-4','clang-7'),
('clang-7','clang-4')
]
all_res = []
all_res.append(['X','Y', '# of test', '# of Func', 'Rank', 'Pre@1', 'Rank (dtls)', 'Rank (tls-patched)',
'Rank (dtls-patched)', 'Rank', 'Pre@1'])
idx = 0
for test in tests:
A, B = test
res = analyze_opt(all_data[funcname], A, B)
#res2 = analyze_opt(all_data[funcname], A, B, False)
all_res.append(res)
delim = ','
for j in range(len(all_res[0])):
for i in range(len(all_res)):
if type(all_res[i][j]) is str:
print(all_res[i][j], end=delim)
else:
print("%0.2f"% all_res[i][j], end=delim)
print('')
if __name__ == "__main__":
op = OptionParser()
op.add_option(
"--config",
action="store",
dest="config",
help="give config file (ex) config/config_default.yml",
)
op.add_option(
"--pickle",
type="str",
action="store",
dest="pickle",
help="a file containing pickled result"
)
op.add_option(
"--train_funcs_limit",
type="int",
action="store",
dest="train_funcs_limit",
default=200000,
help="a number to limit the number of functions in training",
)
(opts, args) = op.parse_args()
if not opts.config:
op.print_help()
exit(1)
analyze(opts)
|
StarcoderdataPython
|
124692
|
<filename>attention_to_gif/visualizer.py
# importing matplot lib
import matplotlib.pyplot as plt
import numpy as np
import torch
# importig movie py libraries
from moviepy.editor import VideoClip
from moviepy.video.io.bindings import mplfig_to_npimage
class AttentionVisualizer:
"""
Creates a GIF of the transition of attention weights across the layers.
layer_wise_attention_weights : Tensor : (num_layers,batch_size,num_heads,seq_len_x,seq_len_y)
x_label_toks : labels for indices in the sequence.
y_label_toks : labels for indices in the sequence.
title_message: Message to print in the video/Gif
chosen_head : int : a particular to head to visualise attention for.
If None: Attention is a summed for all heads at a layer
seq_len_x_lim : constrain the size of of head in the x dimension
seq_len_y_lim : constrain the size of of head in the y dimension
"""
def __init__(self,\
layer_wise_attention_weights:torch.Tensor,
seq_len_x_lim=None,
seq_len_y_lim=None,
chosen_item=0,
chosen_head=None,
x_label_toks=[],
y_label_toks=[],
fig_size=(10,10),
title_message='',
) -> None:
#()
self.num_layers, self.batch_size , self.num_attention_heads , seq_len_x,seq_len_y = layer_wise_attention_weights.size()
# Doing this ensure that it work.
self.seq_len_x_lim = seq_len_x_lim
self.seq_len_y_lim = seq_len_y_lim
self.chosen_item=chosen_item
self.layer_wise_attention_weights = layer_wise_attention_weights
self.fig, self.ax = None,None
self.fig_size=fig_size
self.x_label_toks = x_label_toks
self.y_label_toks = y_label_toks
self.title_message = title_message
self.chosen_head = chosen_head
# self.fig.colorbar()
def get_attention_values(self,layer,chosen_head=None):
if chosen_head is not None:
conv_arr = self.layer_wise_attention_weights[int(layer)][self.chosen_item][chosen_head].cpu().numpy()
else:
conv_arr = self.layer_wise_attention_weights[int(layer)][self.chosen_item].sum(dim=0).cpu().numpy()
if self.seq_len_x_lim is not None:
conv_arr= conv_arr[:,:self.seq_len_x_lim]
if self.seq_len_y_lim is not None:
conv_arr= conv_arr[:self.seq_len_y_lim]
return conv_arr
def __call__(self,t):
# clear
if self.fig is None:
self.fig, self.ax = plt.subplots(figsize=self.fig_size)
if len(self.ax.images) > 0:
self.ax.images[-1].colorbar.remove()
self.ax.clear()
conv_arr = self.get_attention_values(t,chosen_head=self.chosen_head)
cax = self.ax.matshow(conv_arr,origin='lower', cmap='viridis',aspect='auto')
# self.y_label_toks
self.fig.colorbar(cax,ax=self.ax)
if len(self.x_label_toks) > 0:
self.ax.set_xticks([i for i in range(len(self.x_label_toks))])
self.ax.set_xticklabels(self.x_label_toks,)
if len(self.y_label_toks) > 0:
self.ax.set_yticks([len(self.y_label_toks)-i-1 for i in range(len(self.y_label_toks))])
self.ax.set_yticklabels(self.y_label_toks)
default_title = f" Attention At Layer : {int(t)} \n"
if self.chosen_head is not None:
default_title = f" Attention At Layer : {int(t)} And Head : {self.chosen_head}\n"
if self.title_message is '':
self.ax.set_title(default_title)
else:
self.ax.set_title(f"{self.title_message}\n {default_title}")
return mplfig_to_npimage(self.fig)
def save_visualisation(self,viz_name='attention_viz.gif',fps=20):
animation = VideoClip(make_frame=self,duration=self.num_layers)
animation.write_gif(viz_name,fps=fps)
def show_visualisation(self,viz_name='attention_viz.gif',fps = 20, loop = False, autoplay = False):
# animation = VideoClip(self, duration = self.num_layers)
animation = VideoClip(make_frame=self,duration=self.num_layers)
animation.ipython_display(fps =fps ,loop=loop,autoplay=autoplay)
def create_single_plot(self, fig_size=(10,10)):
fig,axes = plt.subplots(nrows=self.num_layers, ncols=self.num_attention_heads,figsize=fig_size)
for lidx,layerax in enumerate(axes):
for hidx,headax in enumerate(layerax):
conv_arr = self.get_attention_values(lidx,chosen_head=hidx)
cax = headax.matshow(conv_arr,origin='lower', cmap='viridis',aspect='auto')
fig.colorbar(cax,ax=headax)
if len(self.x_label_toks) > 0:
headax.set_xticks([i for i in range(len(self.x_label_toks))])
headax.set_xticklabels(self.x_label_toks,)
if len(self.y_label_toks) > 0:
headax.set_yticks([len(self.y_label_toks)-i-1 for i in range(len(self.y_label_toks))])
headax.set_yticklabels(self.y_label_toks)
default_title = f" Attention At Layer : {int(lidx)} And Head : {hidx}\n"
headax.set_title(default_title)
return fig
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.