code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
#!/usr/bin/env python
from __future__ import print_function
import pybullet as p
from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, \
get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, \
multiply, Pose, Point, interpolate_poses, HideOutput
from pybullet_tools.ikfast.franka_panda.ik import PANDA_INFO, FRANKA_URDF
from pybullet_tools.ikfast.ikfast import get_ik_joints, closest_inverse_kinematics
def test_retraction(robot, info, tool_link, distance=0.1, **kwargs):
ik_joints = get_ik_joints(robot, info, tool_link)
start_pose = get_link_pose(robot, tool_link)
end_pose = multiply(start_pose, Pose(Point(z=-distance)))
for pose in interpolate_poses(start_pose, end_pose, pos_step_size=0.01):
conf = next(closest_inverse_kinematics(robot, info, tool_link, pose, **kwargs), None)
if conf is None:
print('Failure!')
wait_for_user()
break
set_joint_positions(robot, ik_joints, conf)
wait_for_user()
# for conf in islice(ikfast_inverse_kinematics(robot, info, tool_link, pose, max_attempts=INF, max_distance=0.5), 1):
# set_joint_positions(robot, joints[:len(conf)], conf)
# wait_for_user()
#####################################
def main():
connect(use_gui=True)
add_data_path()
plane = p.loadURDF("plane.urdf")
with HideOutput():
with LockRenderer():
robot = load_model(FRANKA_URDF, fixed_base=True)
dump_body(robot)
print('Start?')
wait_for_user()
tool_link = link_from_name(robot, 'panda_hand')
joints = get_movable_joints(robot)
print('Joints', [get_joint_name(robot, joint) for joint in joints])
sample_fn = get_sample_fn(robot, joints)
for i in range(10):
print('Iteration:', i)
conf = sample_fn()
set_joint_positions(robot, joints, conf)
wait_for_user()
test_retraction(robot, PANDA_INFO, tool_link, max_distance=0.01, max_time=0.05)
disconnect()
if __name__ == '__main__':
main()
|
[
"pybullet_tools.utils.get_movable_joints",
"pybullet_tools.utils.get_joint_name",
"pybullet_tools.ikfast.ikfast.get_ik_joints",
"pybullet_tools.utils.LockRenderer",
"pybullet_tools.utils.disconnect",
"pybullet_tools.utils.Point",
"pybullet_tools.utils.interpolate_poses",
"pybullet_tools.ikfast.ikfast.closest_inverse_kinematics",
"pybullet_tools.utils.HideOutput",
"pybullet_tools.utils.link_from_name",
"pybullet_tools.utils.connect",
"pybullet_tools.utils.set_joint_positions",
"pybullet_tools.utils.wait_for_user",
"pybullet_tools.utils.get_sample_fn",
"pybullet_tools.utils.get_link_pose",
"pybullet_tools.utils.dump_body",
"pybullet_tools.utils.add_data_path",
"pybullet_tools.utils.load_model",
"pybullet.loadURDF"
] |
[((618, 655), 'pybullet_tools.ikfast.ikfast.get_ik_joints', 'get_ik_joints', (['robot', 'info', 'tool_link'], {}), '(robot, info, tool_link)\n', (631, 655), False, 'from pybullet_tools.ikfast.ikfast import get_ik_joints, closest_inverse_kinematics\n'), ((673, 704), 'pybullet_tools.utils.get_link_pose', 'get_link_pose', (['robot', 'tool_link'], {}), '(robot, tool_link)\n', (686, 704), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((783, 842), 'pybullet_tools.utils.interpolate_poses', 'interpolate_poses', (['start_pose', 'end_pose'], {'pos_step_size': '(0.01)'}), '(start_pose, end_pose, pos_step_size=0.01)\n', (800, 842), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1392, 1413), 'pybullet_tools.utils.connect', 'connect', ([], {'use_gui': '(True)'}), '(use_gui=True)\n', (1399, 1413), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1418, 1433), 'pybullet_tools.utils.add_data_path', 'add_data_path', ([], {}), '()\n', (1431, 1433), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1447, 1471), 'pybullet.loadURDF', 'p.loadURDF', (['"""plane.urdf"""'], {}), "('plane.urdf')\n", (1457, 1471), True, 'import pybullet as p\n'), ((1589, 1605), 'pybullet_tools.utils.dump_body', 'dump_body', (['robot'], {}), '(robot)\n', (1598, 1605), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1630, 1645), 'pybullet_tools.utils.wait_for_user', 'wait_for_user', ([], {}), '()\n', (1643, 1645), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1663, 1698), 'pybullet_tools.utils.link_from_name', 'link_from_name', (['robot', '"""panda_hand"""'], {}), "(robot, 'panda_hand')\n", (1677, 1698), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1712, 1737), 'pybullet_tools.utils.get_movable_joints', 'get_movable_joints', (['robot'], {}), '(robot)\n', (1730, 1737), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1826, 1854), 'pybullet_tools.utils.get_sample_fn', 'get_sample_fn', (['robot', 'joints'], {}), '(robot, joints)\n', (1839, 1854), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((2102, 2114), 'pybullet_tools.utils.disconnect', 'disconnect', ([], {}), '()\n', (2112, 2114), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1047, 1090), 'pybullet_tools.utils.set_joint_positions', 'set_joint_positions', (['robot', 'ik_joints', 'conf'], {}), '(robot, ik_joints, conf)\n', (1066, 1090), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1099, 1114), 'pybullet_tools.utils.wait_for_user', 'wait_for_user', ([], {}), '()\n', (1112, 1114), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1481, 1493), 'pybullet_tools.utils.HideOutput', 'HideOutput', ([], {}), '()\n', (1491, 1493), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1945, 1985), 'pybullet_tools.utils.set_joint_positions', 'set_joint_positions', (['robot', 'joints', 'conf'], {}), '(robot, joints, conf)\n', (1964, 1985), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1994, 2009), 'pybullet_tools.utils.wait_for_user', 'wait_for_user', ([], {}), '()\n', (2007, 2009), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((746, 764), 'pybullet_tools.utils.Point', 'Point', ([], {'z': '(-distance)'}), '(z=-distance)\n', (751, 764), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((864, 930), 'pybullet_tools.ikfast.ikfast.closest_inverse_kinematics', 'closest_inverse_kinematics', (['robot', 'info', 'tool_link', 'pose'], {}), '(robot, info, tool_link, pose, **kwargs)\n', (890, 930), False, 'from pybullet_tools.ikfast.ikfast import get_ik_joints, closest_inverse_kinematics\n'), ((1005, 1020), 'pybullet_tools.utils.wait_for_user', 'wait_for_user', ([], {}), '()\n', (1018, 1020), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1508, 1522), 'pybullet_tools.utils.LockRenderer', 'LockRenderer', ([], {}), '()\n', (1520, 1522), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1544, 1584), 'pybullet_tools.utils.load_model', 'load_model', (['FRANKA_URDF'], {'fixed_base': '(True)'}), '(FRANKA_URDF, fixed_base=True)\n', (1554, 1584), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n'), ((1759, 1787), 'pybullet_tools.utils.get_joint_name', 'get_joint_name', (['robot', 'joint'], {}), '(robot, joint)\n', (1773, 1787), False, 'from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, multiply, Pose, Point, interpolate_poses, HideOutput\n')]
|
from django.http.response import JsonResponse
from django.shortcuts import redirect, render
from django.views import generic
from .models import Product
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from .forms import ProductModelForm
import stripe
from django.conf import settings
stripe.api_key = settings.STRIPE_SECRET_KEY
# Create your views here.
class ProductListView(generic.ListView):
template_name = "discover.html"
queryset = Product.objects.filter(active=True)
class ProductDetailView(generic.DetailView):
template_name = "products/product.html"
queryset = Product.objects.all()
context_object_name = "product"
def get_context_data(self, **kwargs):
context = super(ProductDetailView, self).get_context_data(**kwargs)
context.update({
"STRIPE_PUBLIC_KEY": settings.STRIPE_PUBLIC_KEY
})
return context
class UserProductListView(LoginRequiredMixin, generic.ListView):
# shows the users created products
template_name = "products.html"
def get_queryset(self):
return Product.objects.filter(user=self.request.user)
class ProductCreateView(LoginRequiredMixin, generic.CreateView):
template_name = "products/product_create.html"
form_class = ProductModelForm
def get_success_url(self) -> str:
return reverse("products:product-detail", kwargs={
"slug": self.product.slug
})
def form_valid(self, form):
instance = form.save(commit=False)
instance.user = self.request.user
instance.save()
self.product = instance
return super(ProductCreateView, self).form_valid(form)
class ProductUpdateView(LoginRequiredMixin, generic.UpdateView):
template_name = "products/product_update.html"
form_class = ProductModelForm
def get_queryset(self):
return Product.objects.filter(user=self.request.user)
def get_success_url(self) -> str:
return reverse("products:product-detail", kwargs={
"slug": self.get_object().slug
})
class ProductDeleteView(LoginRequiredMixin, generic.DeleteView):
template_name = "products/product_delete.html"
def get_queryset(self):
return Product.objects.filter(user=self.request.user)
def get_success_url(self) -> str:
return reverse("user-products")
class CreateCheckoutSessionView(generic.View):
def post(self, request, *args, **kwargs):
product = Product.objects.get(slug=self.kwargs["slug"])
print(product)
domain = "http://domain.com"
if settings.DEBUG:
domain = "http://127.0.0.1:8000"
session = stripe.checkout.Session.create(
line_items=[
{
'price_data': {
'currency': 'usd',
'product_data': {
'name': product.name,
},
'unit_amount': product.price,
},
'quantity': 1,
}],
mode='payment',
success_url=domain + reverse("success"),
cancel_url=domain + reverse("discover"),)
return JsonResponse({
"id": session.id
})
class SuccessView(generic.TemplateView):
template_name = "success.html"
|
[
"django.urls.reverse",
"django.http.response.JsonResponse"
] |
[((1371, 1441), 'django.urls.reverse', 'reverse', (['"""products:product-detail"""'], {'kwargs': "{'slug': self.product.slug}"}), "('products:product-detail', kwargs={'slug': self.product.slug})\n", (1378, 1441), False, 'from django.urls import reverse\n'), ((2359, 2383), 'django.urls.reverse', 'reverse', (['"""user-products"""'], {}), "('user-products')\n", (2366, 2383), False, 'from django.urls import reverse\n'), ((3210, 3242), 'django.http.response.JsonResponse', 'JsonResponse', (["{'id': session.id}"], {}), "({'id': session.id})\n", (3222, 3242), False, 'from django.http.response import JsonResponse\n'), ((3120, 3138), 'django.urls.reverse', 'reverse', (['"""success"""'], {}), "('success')\n", (3127, 3138), False, 'from django.urls import reverse\n'), ((3172, 3191), 'django.urls.reverse', 'reverse', (['"""discover"""'], {}), "('discover')\n", (3179, 3191), False, 'from django.urls import reverse\n')]
|
"""Code for setting up the optimization problem for certification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import numpy as np
from scipy.sparse.linalg import eigs, LinearOperator
import tensorflow as tf
from tensorflow.contrib import autograph
from cleverhans.experimental.certification import utils
# Bound on lowest value of certificate to check for numerical errors
LOWER_CERT_BOUND = -10.0
UPDATE_PARAM_CONSTANT = -0.1
class Optimization(object):
"""Class that sets up and runs the optimization of dual_formulation"""
def __init__(self, dual_formulation_object, sess, optimization_params):
"""Initialize the class variables.
Args:
dual_formulation_object: Instance of DualFormulation that contains the
dual variables and objective
sess: tf session to be used to run
optimization_params: Dictionary with the following
eig_num_iter - Number of iterations to run for computing minimum eigen
value
eig_learning_rate - Learning rate for minimum eigen value iterations
init_smooth - Starting value of the smoothness parameter (typically
around 0.001)
smooth_decay - The factor by which to decay after every outer loop epoch
optimizer - one of gd, adam, momentum or adagrad
eig_type - The method to compute eigenvalues (TF or SCIPY)
"""
self.sess = sess
self.dual_object = dual_formulation_object
self.params = optimization_params
self.penalty_placeholder = tf.placeholder(tf.float32, shape=[])
# The dimensionality of matrix M is the sum of sizes of all layers + 1
# The + 1 comes due to a row and column of M representing the linear terms
self.eig_init_vec_placeholder = tf.placeholder(
tf.float32, shape=[1 + self.dual_object.dual_index[-1], 1])
self.smooth_placeholder = tf.placeholder(tf.float32, shape=[])
self.eig_num_iter_placeholder = tf.placeholder(tf.int32, shape=[])
self.current_eig_val_estimate = None
# Create graph for optimization
self.prepare_for_optimization()
def tf_min_eig_vec(self):
"""Function for min eigen vector using tf's full eigen decomposition."""
# Full eigen decomposition requires the explicit psd matrix M
_, matrix_m = self.dual_object.get_full_psd_matrix()
[eig_vals, eig_vectors] = tf.self_adjoint_eig(matrix_m)
index = tf.argmin(eig_vals)
return tf.reshape(
eig_vectors[:, index], shape=[eig_vectors.shape[0].value, 1])
def tf_smooth_eig_vec(self):
"""Function that returns smoothed version of min eigen vector."""
_, matrix_m = self.dual_object.get_full_psd_matrix()
# Easier to think in terms of max so negating the matrix
[eig_vals, eig_vectors] = tf.self_adjoint_eig(-matrix_m)
exp_eig_vals = tf.exp(tf.divide(eig_vals, self.smooth_placeholder))
scaling_factor = tf.reduce_sum(exp_eig_vals)
# Multiplying each eig vector by exponential of corresponding eig value
# Scaling factor normalizes the vector to be unit norm
eig_vec_smooth = tf.divide(
tf.matmul(eig_vectors, tf.diag(tf.sqrt(exp_eig_vals))),
tf.sqrt(scaling_factor))
return tf.reshape(
tf.reduce_sum(eig_vec_smooth, axis=1),
shape=[eig_vec_smooth.shape[0].value, 1])
def get_min_eig_vec_proxy(self, use_tf_eig=False):
"""Computes the min eigen value and corresponding vector of matrix M.
Args:
use_tf_eig: Whether to use tf's default full eigen decomposition
Returns:
eig_vec: Minimum absolute eigen value
eig_val: Corresponding eigen vector
"""
if use_tf_eig:
# If smoothness parameter is too small, essentially no smoothing
# Just output the eigen vector corresponding to min
return tf.cond(self.smooth_placeholder < 1E-8,
self.tf_min_eig_vec,
self.tf_smooth_eig_vec)
# Using autograph to automatically handle
# the control flow of minimum_eigen_vector
min_eigen_tf = autograph.to_graph(utils.minimum_eigen_vector)
def _vector_prod_fn(x):
return self.dual_object.get_psd_product(x)
estimated_eigen_vector = min_eigen_tf(
x=self.eig_init_vec_placeholder,
num_steps=self.eig_num_iter_placeholder,
learning_rate=self.params['eig_learning_rate'],
vector_prod_fn=_vector_prod_fn)
return estimated_eigen_vector
def get_scipy_eig_vec(self):
"""Computes scipy estimate of min eigenvalue for matrix M.
Returns:
eig_vec: Minimum absolute eigen value
eig_val: Corresponding eigen vector
"""
if not self.params['has_conv']:
matrix_m = self.sess.run(self.dual_object.matrix_m)
min_eig_vec_val, estimated_eigen_vector = eigs(matrix_m, k=1, which='SR',
tol=1E-4)
min_eig_vec_val = np.reshape(np.real(min_eig_vec_val), [1, 1])
return np.reshape(estimated_eigen_vector, [-1, 1]), min_eig_vec_val
else:
dim = self.dual_object.matrix_m_dimension
input_vector = tf.placeholder(tf.float32, shape=(dim, 1))
output_vector = self.dual_object.get_psd_product(input_vector)
def np_vector_prod_fn(np_vector):
np_vector = np.reshape(np_vector, [-1, 1])
output_np_vector = self.sess.run(output_vector, feed_dict={input_vector:np_vector})
return output_np_vector
linear_operator = LinearOperator((dim, dim), matvec=np_vector_prod_fn)
# Performing shift invert scipy operation when eig val estimate is available
min_eig_vec_val, estimated_eigen_vector = eigs(linear_operator,
k=1, which='SR', tol=1E-4)
min_eig_vec_val = np.reshape(np.real(min_eig_vec_val), [1, 1])
return np.reshape(estimated_eigen_vector, [-1, 1]), min_eig_vec_val
def prepare_for_optimization(self):
"""Create tensorflow op for running one step of descent."""
if self.params['eig_type'] == 'TF':
self.eig_vec_estimate = self.get_min_eig_vec_proxy()
else:
self.eig_vec_estimate = tf.placeholder(tf.float32, shape=(self.dual_object.matrix_m_dimension, 1))
self.stopped_eig_vec_estimate = tf.stop_gradient(self.eig_vec_estimate)
# Eig value is v^\top M v, where v is eigen vector
self.eig_val_estimate = tf.matmul(
tf.transpose(self.stopped_eig_vec_estimate),
self.dual_object.get_psd_product(self.stopped_eig_vec_estimate))
# Penalizing negative of min eigen value because we want min eig value
# to be positive
self.total_objective = (
self.dual_object.unconstrained_objective
+ 0.5 * tf.square(
tf.maximum(-self.penalty_placeholder * self.eig_val_estimate, 0)))
global_step = tf.Variable(0, trainable=False)
# Set up learning rate as a placeholder
self.learning_rate = tf.placeholder(tf.float32, shape=[])
# Set up the optimizer
if self.params['optimizer'] == 'adam':
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
elif self.params['optimizer'] == 'adagrad':
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate)
elif self.params['optimizer'] == 'momentum':
self.optimizer = tf.train.MomentumOptimizer(
learning_rate=self.learning_rate,
momentum=self.params['momentum_parameter'],
use_nesterov=True)
else:
self.optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.learning_rate)
# Write out the projection step
self.train_step = self.optimizer.minimize(
self.total_objective, global_step=global_step)
self.sess.run(tf.global_variables_initializer())
# Projecting the dual variables
proj_ops = []
for i in range(self.dual_object.nn_params.num_hidden_layers + 1):
# Lambda_pos is non negative for switch indices,
# Unconstrained for positive indices
# Zero for negative indices
proj_ops.append(self.dual_object.lambda_pos[i].assign(
tf.multiply(self.dual_object.positive_indices[i],
self.dual_object.lambda_pos[i])+
tf.multiply(self.dual_object.switch_indices[i],
tf.nn.relu(self.dual_object.lambda_pos[i]))))
proj_ops.append(self.dual_object.lambda_neg[i].assign(
tf.multiply(self.dual_object.negative_indices[i],
self.dual_object.lambda_neg[i])+
tf.multiply(self.dual_object.switch_indices[i],
tf.nn.relu(self.dual_object.lambda_neg[i]))))
# Lambda_quad is only non zero and positive for switch
proj_ops.append(self.dual_object.lambda_quad[i].assign(
tf.multiply(self.dual_object.switch_indices[i],
tf.nn.relu(self.dual_object.lambda_quad[i]))))
# Lambda_lu is always non negative
proj_ops.append(self.dual_object.lambda_lu[i].assign(
tf.nn.relu(self.dual_object.lambda_lu[i])))
self.proj_step = tf.group(proj_ops)
# Create folder for saving stats if the folder is not None
if (self.params.get('stats_folder') and
not tf.gfile.IsDirectory(self.params['stats_folder'])):
tf.gfile.MkDir(self.params['stats_folder'])
self.current_scipy_eig_val = None
def run_one_step(self, eig_init_vec_val, eig_num_iter_val, smooth_val,
penalty_val, learning_rate_val):
"""Run one step of gradient descent for optimization.
Args:
eig_init_vec_val: Start value for eigen value computations
eig_num_iter_val: Number of iterations to run for eigen computations
smooth_val: Value of smoothness parameter
penalty_val: Value of penalty for the current step
learning_rate_val: Value of learning rate
Returns:
found_cert: True is negative certificate is found, False otherwise
"""
# Project onto feasible set of dual variables
if self.current_step != 0 and self.current_step % self.params['projection_steps'] == 0:
current_certificate = self.dual_object.compute_certificate()
tf.logging.info('Inner step: %d, current value of certificate: %f',
self.current_step, current_certificate)
# Sometimes due to either overflow or instability in inverses,
# the returned certificate is large and negative -- keeping a check
if LOWER_CERT_BOUND < current_certificate < 0:
tf.logging.info('Found certificate of robustness!')
return True
# Running step
step_feed_dict = {self.eig_init_vec_placeholder: eig_init_vec_val,
self.eig_num_iter_placeholder: eig_num_iter_val,
self.smooth_placeholder: smooth_val,
self.penalty_placeholder: penalty_val,
self.learning_rate: learning_rate_val}
if self.params['eig_type'] == 'SCIPY':
current_eig_vector, self.current_eig_val_estimate = self.get_scipy_eig_vec()
step_feed_dict.update({
self.eig_vec_estimate: current_eig_vector
})
self.sess.run(self.train_step, feed_dict=step_feed_dict)
[
_, self.current_eig_vec_val, self.current_eig_val_estimate
] = self.sess.run([
self.proj_step,
self.eig_vec_estimate,
self.eig_val_estimate
], feed_dict=step_feed_dict)
if self.current_step % self.params['print_stats_steps'] == 0:
[self.current_total_objective, self.current_unconstrained_objective,
self.current_eig_vec_val,
self.current_eig_val_estimate,
self.current_nu] = self.sess.run(
[self.total_objective,
self.dual_object.unconstrained_objective,
self.eig_vec_estimate,
self.eig_val_estimate,
self.dual_object.nu], feed_dict=step_feed_dict)
stats = {
'total_objective':
float(self.current_total_objective),
'unconstrained_objective':
float(self.current_unconstrained_objective),
'min_eig_val_estimate':
float(self.current_eig_val_estimate)
}
tf.logging.debug('Current inner step: %d, optimization stats: %s',
self.current_step, stats)
if self.params['stats_folder'] is not None:
stats = json.dumps(stats)
filename = os.path.join(self.params['stats_folder'],
str(self.current_step) + '.json')
with tf.gfile.Open(filename) as file_f:
file_f.write(stats)
return False
def run_optimization(self):
"""Run the optimization, call run_one_step with suitable placeholders.
Returns:
True if certificate is found
False otherwise
"""
penalty_val = self.params['init_penalty']
# Don't use smoothing initially - very inaccurate for large dimension
self.smooth_on = False
smooth_val = 0
learning_rate_val = self.params['init_learning_rate']
self.current_outer_step = 1
while self.current_outer_step <= self.params['outer_num_steps']:
tf.logging.info('Running outer step %d with penalty %f',
self.current_outer_step, penalty_val)
# Running inner loop of optimization with current_smooth_val,
# current_penalty as smoothness parameters and penalty respectively
self.current_step = 0
# Run first step with random eig initialization and large number of steps
found_cert = self.run_one_step(
np.random.random(size=(1 + self.dual_object.dual_index[-1], 1)),
self.params['large_eig_num_steps'], smooth_val, penalty_val, learning_rate_val)
if found_cert:
return True
while self.current_step < self.params['inner_num_steps']:
self.current_step = self.current_step + 1
found_cert = self.run_one_step(self.current_eig_vec_val,
self.params['small_eig_num_steps'],
smooth_val, penalty_val,
learning_rate_val)
if found_cert:
return -1
# Update penalty only if it looks like current objective is optimizes
if self.current_total_objective < UPDATE_PARAM_CONSTANT:
penalty_val = penalty_val * self.params['beta']
learning_rate_val = learning_rate_val*self.params['learning_rate_decay']
else:
# To get more accurate gradient estimate
self.params['small_eig_num_steps'] = (
1.5 * self.params['small_eig_num_steps'])
# If eigen values seem small enough, turn on smoothing
# useful only when performing full eigen decomposition
if np.abs(self.current_eig_val_estimate) < 0.01:
smooth_val = self.params['smoothness_parameter']
self.current_outer_step = self.current_outer_step + 1
return False
|
[
"tensorflow.cond",
"tensorflow.reduce_sum",
"numpy.abs",
"tensorflow.logging.info",
"tensorflow.logging.debug",
"tensorflow.maximum",
"tensorflow.reshape",
"json.dumps",
"tensorflow.multiply",
"tensorflow.argmin",
"tensorflow.Variable",
"tensorflow.divide",
"scipy.sparse.linalg.LinearOperator",
"tensorflow.self_adjoint_eig",
"tensorflow.sqrt",
"tensorflow.contrib.autograph.to_graph",
"tensorflow.nn.relu",
"tensorflow.placeholder",
"numpy.reshape",
"numpy.real",
"tensorflow.gfile.IsDirectory",
"tensorflow.global_variables_initializer",
"tensorflow.stop_gradient",
"tensorflow.train.AdagradOptimizer",
"tensorflow.transpose",
"tensorflow.group",
"tensorflow.train.MomentumOptimizer",
"tensorflow.train.GradientDescentOptimizer",
"scipy.sparse.linalg.eigs",
"tensorflow.gfile.Open",
"numpy.random.random",
"tensorflow.gfile.MkDir",
"tensorflow.train.AdamOptimizer"
] |
[((1580, 1616), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]'}), '(tf.float32, shape=[])\n', (1594, 1616), True, 'import tensorflow as tf\n'), ((1808, 1882), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[1 + self.dual_object.dual_index[-1], 1]'}), '(tf.float32, shape=[1 + self.dual_object.dual_index[-1], 1])\n', (1822, 1882), True, 'import tensorflow as tf\n'), ((1922, 1958), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]'}), '(tf.float32, shape=[])\n', (1936, 1958), True, 'import tensorflow as tf\n'), ((1995, 2029), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[]'}), '(tf.int32, shape=[])\n', (2009, 2029), True, 'import tensorflow as tf\n'), ((2403, 2432), 'tensorflow.self_adjoint_eig', 'tf.self_adjoint_eig', (['matrix_m'], {}), '(matrix_m)\n', (2422, 2432), True, 'import tensorflow as tf\n'), ((2445, 2464), 'tensorflow.argmin', 'tf.argmin', (['eig_vals'], {}), '(eig_vals)\n', (2454, 2464), True, 'import tensorflow as tf\n'), ((2476, 2548), 'tensorflow.reshape', 'tf.reshape', (['eig_vectors[:, index]'], {'shape': '[eig_vectors.shape[0].value, 1]'}), '(eig_vectors[:, index], shape=[eig_vectors.shape[0].value, 1])\n', (2486, 2548), True, 'import tensorflow as tf\n'), ((2808, 2838), 'tensorflow.self_adjoint_eig', 'tf.self_adjoint_eig', (['(-matrix_m)'], {}), '(-matrix_m)\n', (2827, 2838), True, 'import tensorflow as tf\n'), ((2932, 2959), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['exp_eig_vals'], {}), '(exp_eig_vals)\n', (2945, 2959), True, 'import tensorflow as tf\n'), ((4062, 4108), 'tensorflow.contrib.autograph.to_graph', 'autograph.to_graph', (['utils.minimum_eigen_vector'], {}), '(utils.minimum_eigen_vector)\n', (4080, 4108), False, 'from tensorflow.contrib import autograph\n'), ((6247, 6286), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['self.eig_vec_estimate'], {}), '(self.eig_vec_estimate)\n', (6263, 6286), True, 'import tensorflow as tf\n'), ((6805, 6836), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (6816, 6836), True, 'import tensorflow as tf\n'), ((6906, 6942), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]'}), '(tf.float32, shape=[])\n', (6920, 6942), True, 'import tensorflow as tf\n'), ((9045, 9063), 'tensorflow.group', 'tf.group', (['proj_ops'], {}), '(proj_ops)\n', (9053, 9063), True, 'import tensorflow as tf\n'), ((2865, 2909), 'tensorflow.divide', 'tf.divide', (['eig_vals', 'self.smooth_placeholder'], {}), '(eig_vals, self.smooth_placeholder)\n', (2874, 2909), True, 'import tensorflow as tf\n'), ((3199, 3222), 'tensorflow.sqrt', 'tf.sqrt', (['scaling_factor'], {}), '(scaling_factor)\n', (3206, 3222), True, 'import tensorflow as tf\n'), ((3255, 3292), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['eig_vec_smooth'], {'axis': '(1)'}), '(eig_vec_smooth, axis=1)\n', (3268, 3292), True, 'import tensorflow as tf\n'), ((3822, 3912), 'tensorflow.cond', 'tf.cond', (['(self.smooth_placeholder < 1e-08)', 'self.tf_min_eig_vec', 'self.tf_smooth_eig_vec'], {}), '(self.smooth_placeholder < 1e-08, self.tf_min_eig_vec, self.\n tf_smooth_eig_vec)\n', (3829, 3912), True, 'import tensorflow as tf\n'), ((4796, 4839), 'scipy.sparse.linalg.eigs', 'eigs', (['matrix_m'], {'k': '(1)', 'which': '"""SR"""', 'tol': '(0.0001)'}), "(matrix_m, k=1, which='SR', tol=0.0001)\n", (4800, 4839), False, 'from scipy.sparse.linalg import eigs, LinearOperator\n'), ((5113, 5155), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(dim, 1)'}), '(tf.float32, shape=(dim, 1))\n', (5127, 5155), True, 'import tensorflow as tf\n'), ((5465, 5517), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(dim, dim)'], {'matvec': 'np_vector_prod_fn'}), '((dim, dim), matvec=np_vector_prod_fn)\n', (5479, 5517), False, 'from scipy.sparse.linalg import eigs, LinearOperator\n'), ((5649, 5699), 'scipy.sparse.linalg.eigs', 'eigs', (['linear_operator'], {'k': '(1)', 'which': '"""SR"""', 'tol': '(0.0001)'}), "(linear_operator, k=1, which='SR', tol=0.0001)\n", (5653, 5699), False, 'from scipy.sparse.linalg import eigs, LinearOperator\n'), ((6136, 6210), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(self.dual_object.matrix_m_dimension, 1)'}), '(tf.float32, shape=(self.dual_object.matrix_m_dimension, 1))\n', (6150, 6210), True, 'import tensorflow as tf\n'), ((6389, 6432), 'tensorflow.transpose', 'tf.transpose', (['self.stopped_eig_vec_estimate'], {}), '(self.stopped_eig_vec_estimate)\n', (6401, 6432), True, 'import tensorflow as tf\n'), ((7037, 7093), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (7059, 7093), True, 'import tensorflow as tf\n'), ((7722, 7755), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7753, 7755), True, 'import tensorflow as tf\n'), ((9242, 9285), 'tensorflow.gfile.MkDir', 'tf.gfile.MkDir', (["self.params['stats_folder']"], {}), "(self.params['stats_folder'])\n", (9256, 9285), True, 'import tensorflow as tf\n'), ((10120, 10232), 'tensorflow.logging.info', 'tf.logging.info', (['"""Inner step: %d, current value of certificate: %f"""', 'self.current_step', 'current_certificate'], {}), "('Inner step: %d, current value of certificate: %f', self.\n current_step, current_certificate)\n", (10135, 10232), True, 'import tensorflow as tf\n'), ((12129, 12226), 'tensorflow.logging.debug', 'tf.logging.debug', (['"""Current inner step: %d, optimization stats: %s"""', 'self.current_step', 'stats'], {}), "('Current inner step: %d, optimization stats: %s', self.\n current_step, stats)\n", (12145, 12226), True, 'import tensorflow as tf\n'), ((13069, 13168), 'tensorflow.logging.info', 'tf.logging.info', (['"""Running outer step %d with penalty %f"""', 'self.current_outer_step', 'penalty_val'], {}), "('Running outer step %d with penalty %f', self.\n current_outer_step, penalty_val)\n", (13084, 13168), True, 'import tensorflow as tf\n'), ((4926, 4950), 'numpy.real', 'np.real', (['min_eig_vec_val'], {}), '(min_eig_vec_val)\n', (4933, 4950), True, 'import numpy as np\n'), ((4973, 5016), 'numpy.reshape', 'np.reshape', (['estimated_eigen_vector', '[-1, 1]'], {}), '(estimated_eigen_vector, [-1, 1])\n', (4983, 5016), True, 'import numpy as np\n'), ((5286, 5316), 'numpy.reshape', 'np.reshape', (['np_vector', '[-1, 1]'], {}), '(np_vector, [-1, 1])\n', (5296, 5316), True, 'import numpy as np\n'), ((5786, 5810), 'numpy.real', 'np.real', (['min_eig_vec_val'], {}), '(min_eig_vec_val)\n', (5793, 5810), True, 'import numpy as np\n'), ((5833, 5876), 'numpy.reshape', 'np.reshape', (['estimated_eigen_vector', '[-1, 1]'], {}), '(estimated_eigen_vector, [-1, 1])\n', (5843, 5876), True, 'import numpy as np\n'), ((7165, 7224), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (7190, 7224), True, 'import tensorflow as tf\n'), ((9184, 9233), 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (["self.params['stats_folder']"], {}), "(self.params['stats_folder'])\n", (9204, 9233), True, 'import tensorflow as tf\n'), ((10455, 10506), 'tensorflow.logging.info', 'tf.logging.info', (['"""Found certificate of robustness!"""'], {}), "('Found certificate of robustness!')\n", (10470, 10506), True, 'import tensorflow as tf\n'), ((12311, 12328), 'json.dumps', 'json.dumps', (['stats'], {}), '(stats)\n', (12321, 12328), False, 'import json\n'), ((13484, 13547), 'numpy.random.random', 'np.random.random', ([], {'size': '(1 + self.dual_object.dual_index[-1], 1)'}), '(size=(1 + self.dual_object.dual_index[-1], 1))\n', (13500, 13547), True, 'import numpy as np\n'), ((14669, 14706), 'numpy.abs', 'np.abs', (['self.current_eig_val_estimate'], {}), '(self.current_eig_val_estimate)\n', (14675, 14706), True, 'import numpy as np\n'), ((3166, 3187), 'tensorflow.sqrt', 'tf.sqrt', (['exp_eig_vals'], {}), '(exp_eig_vals)\n', (3173, 3187), True, 'import tensorflow as tf\n'), ((6720, 6784), 'tensorflow.maximum', 'tf.maximum', (['(-self.penalty_placeholder * self.eig_val_estimate)', '(0)'], {}), '(-self.penalty_placeholder * self.eig_val_estimate, 0)\n', (6730, 6784), True, 'import tensorflow as tf\n'), ((7297, 7425), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': 'self.learning_rate', 'momentum': "self.params['momentum_parameter']", 'use_nesterov': '(True)'}), "(learning_rate=self.learning_rate, momentum=self.\n params['momentum_parameter'], use_nesterov=True)\n", (7323, 7425), True, 'import tensorflow as tf\n'), ((7485, 7552), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (7518, 7552), True, 'import tensorflow as tf\n'), ((8979, 9020), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.dual_object.lambda_lu[i]'], {}), '(self.dual_object.lambda_lu[i])\n', (8989, 9020), True, 'import tensorflow as tf\n'), ((12469, 12492), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['filename'], {}), '(filename)\n', (12482, 12492), True, 'import tensorflow as tf\n'), ((8085, 8171), 'tensorflow.multiply', 'tf.multiply', (['self.dual_object.positive_indices[i]', 'self.dual_object.lambda_pos[i]'], {}), '(self.dual_object.positive_indices[i], self.dual_object.\n lambda_pos[i])\n', (8096, 8171), True, 'import tensorflow as tf\n'), ((8387, 8473), 'tensorflow.multiply', 'tf.multiply', (['self.dual_object.negative_indices[i]', 'self.dual_object.lambda_neg[i]'], {}), '(self.dual_object.negative_indices[i], self.dual_object.\n lambda_neg[i])\n', (8398, 8473), True, 'import tensorflow as tf\n'), ((8821, 8864), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.dual_object.lambda_quad[i]'], {}), '(self.dual_object.lambda_quad[i])\n', (8831, 8864), True, 'import tensorflow as tf\n'), ((8270, 8312), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.dual_object.lambda_pos[i]'], {}), '(self.dual_object.lambda_pos[i])\n', (8280, 8312), True, 'import tensorflow as tf\n'), ((8572, 8614), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.dual_object.lambda_neg[i]'], {}), '(self.dual_object.lambda_neg[i])\n', (8582, 8614), True, 'import tensorflow as tf\n')]
|
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["ExplanationOfBenefitStatus"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class ExplanationOfBenefitStatus:
"""
ExplanationOfBenefitStatus
A code specifying the state of the resource instance.
Status: draft - Version: 4.0.1
Copyright None
http://hl7.org/fhir/explanationofbenefit-status
"""
active = CodeSystemConcept(
{
"code": "active",
"definition": "The resource instance is currently in-force.",
"display": "Active",
}
)
"""
Active
The resource instance is currently in-force.
"""
cancelled = CodeSystemConcept(
{
"code": "cancelled",
"definition": "The resource instance is withdrawn, rescinded or reversed.",
"display": "Cancelled",
}
)
"""
Cancelled
The resource instance is withdrawn, rescinded or reversed.
"""
draft = CodeSystemConcept(
{
"code": "draft",
"definition": "A new resource instance the contents of which is not complete.",
"display": "Draft",
}
)
"""
Draft
A new resource instance the contents of which is not complete.
"""
entered_in_error = CodeSystemConcept(
{
"code": "entered-in-error",
"definition": "The resource instance was entered in error.",
"display": "Entered In Error",
}
)
"""
Entered In Error
The resource instance was entered in error.
"""
class Meta:
resource = _resource
|
[
"pathlib.Path",
"oops_fhir.utils.CodeSystemConcept"
] |
[((502, 626), 'oops_fhir.utils.CodeSystemConcept', 'CodeSystemConcept', (["{'code': 'active', 'definition':\n 'The resource instance is currently in-force.', 'display': 'Active'}"], {}), "({'code': 'active', 'definition':\n 'The resource instance is currently in-force.', 'display': 'Active'})\n", (519, 626), False, 'from oops_fhir.utils import CodeSystemConcept\n'), ((778, 926), 'oops_fhir.utils.CodeSystemConcept', 'CodeSystemConcept', (["{'code': 'cancelled', 'definition':\n 'The resource instance is withdrawn, rescinded or reversed.', 'display':\n 'Cancelled'}"], {}), "({'code': 'cancelled', 'definition':\n 'The resource instance is withdrawn, rescinded or reversed.', 'display':\n 'Cancelled'})\n", (795, 926), False, 'from oops_fhir.utils import CodeSystemConcept\n'), ((1087, 1231), 'oops_fhir.utils.CodeSystemConcept', 'CodeSystemConcept', (["{'code': 'draft', 'definition':\n 'A new resource instance the contents of which is not complete.',\n 'display': 'Draft'}"], {}), "({'code': 'draft', 'definition':\n 'A new resource instance the contents of which is not complete.',\n 'display': 'Draft'})\n", (1104, 1231), False, 'from oops_fhir.utils import CodeSystemConcept\n'), ((1403, 1550), 'oops_fhir.utils.CodeSystemConcept', 'CodeSystemConcept', (["{'code': 'entered-in-error', 'definition':\n 'The resource instance was entered in error.', 'display':\n 'Entered In Error'}"], {}), "({'code': 'entered-in-error', 'definition':\n 'The resource instance was entered in error.', 'display':\n 'Entered In Error'})\n", (1420, 1550), False, 'from oops_fhir.utils import CodeSystemConcept\n'), ((200, 214), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (204, 214), False, 'from pathlib import Path\n')]
|
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram import Bot, Dispatcher
from config import TOKEN
import logging
# уровень логов
logging.basicConfig(level=logging.INFO)
# место хранения данных FSM
storage = MemoryStorage()
# инициализация бота
bot = Bot(token=TOKEN, parse_mode='html')
dp = Dispatcher(bot, storage=storage)
|
[
"aiogram.contrib.fsm_storage.memory.MemoryStorage",
"aiogram.Bot",
"aiogram.Dispatcher",
"logging.basicConfig"
] |
[((168, 207), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (187, 207), False, 'import logging\n'), ((250, 265), 'aiogram.contrib.fsm_storage.memory.MemoryStorage', 'MemoryStorage', ([], {}), '()\n', (263, 265), False, 'from aiogram.contrib.fsm_storage.memory import MemoryStorage\n'), ((297, 332), 'aiogram.Bot', 'Bot', ([], {'token': 'TOKEN', 'parse_mode': '"""html"""'}), "(token=TOKEN, parse_mode='html')\n", (300, 332), False, 'from aiogram import Bot, Dispatcher\n'), ((339, 371), 'aiogram.Dispatcher', 'Dispatcher', (['bot'], {'storage': 'storage'}), '(bot, storage=storage)\n', (349, 371), False, 'from aiogram import Bot, Dispatcher\n')]
|
# -*- coding:utf-8 -*-
# __author__ = '<NAME>'
# 管理后台入口文件
from flask import Blueprint
admin = Blueprint('admin', __name__, template_folder='./templates', static_folder='./static')
theme = 'default'
from . import views
|
[
"flask.Blueprint"
] |
[((96, 186), 'flask.Blueprint', 'Blueprint', (['"""admin"""', '__name__'], {'template_folder': '"""./templates"""', 'static_folder': '"""./static"""'}), "('admin', __name__, template_folder='./templates', static_folder=\n './static')\n", (105, 186), False, 'from flask import Blueprint\n')]
|
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def get_count_except(counter, exception):
keys = []
results = []
for key in counter:
if key == exception:
continue
keys.append(key)
results.append(counter[key])
return keys, results
def count_cols(series):
counter = Counter()
for item in series:
counter.update(item.split(', '))
return counter
def generate_hbar(x, y, xlabel, ylabel, title):
plt.figure(figsize=(20,10))
x_pos = [i for i, _ in enumerate(x)]
plt.barh(x_pos, y)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.yticks(x_pos, x)
plt.title(title)
plt.show()
def generate_hbar_subplot(x, y, xlabel, ylabel, title, ax):
x_pos = [i for i, _ in enumerate(x)]
ax.barh(x_pos, y)
ax.set(xlabel=xlabel)
ax.set(ylabel=ylabel)
ax.set_yticks(x_pos)
ax.set_yticklabels(x)
ax.set_title(title)
def get_nested_values(series):
results = set()
for item in series:
results.update(item.split(', '))
return list(results)
def filter_by_language(df, value):
result = []
for col in df['language_used']:
if value in col.split(', '):
result.append(True)
else:
result.append(False)
return result
def filter_by_ide(df, value):
result = []
for col in df['ide_used']:
if value in col.split(', '):
result.append(True)
else:
result.append(False)
return result
def filter_by_field(field, df, value):
result = []
for col in df[field]:
if value in col.split(', '):
result.append(True)
else:
result.append(False)
return result
def load_data(file_name):
df = pd.read_csv("devkami_survey.tsv",sep="\t")
remap_columns = {
'What language do you use': 'language_used',
'What kind of application do you build?': 'application_built',
'What OS you deployed to': 'os_deployed',
'What OS you write your code on': 'os_coded',
'What IDE do you use': 'ide_used',
'What Version control do you use': 'vcs_used',
'How do you test your application?': 'app_test',
'Tell us more about your development setup. Tell us things like the plugin you use on your IDE, whether you use docker or kubernetes, do you code using remote development tools etc.': 'dev_setup',
'Tell us about your computer. Tell us about the spec, which model etc': 'computer_model',
'How do you deploy your application? Tell us whether you build an docker image or use a script etc.': 'deploy_method',
'What issue tracker you use in your team?': 'tracker_used',
'Do you do standup in your work place': 'standup',
'Do your team do sprint planning': 'sprint_planning',
'Tell us more about your development process. What else your team do other than standup and sprint planning': 'dev_process',
}
df.rename(columns=remap_columns, inplace=True)
df.replace(np.nan,'n/a',inplace=True)
return df
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.figure",
"collections.Counter",
"matplotlib.pyplot.xlabel"
] |
[((379, 388), 'collections.Counter', 'Counter', ([], {}), '()\n', (386, 388), False, 'from collections import Counter\n'), ((527, 555), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (537, 555), True, 'import matplotlib.pyplot as plt\n'), ((600, 618), 'matplotlib.pyplot.barh', 'plt.barh', (['x_pos', 'y'], {}), '(x_pos, y)\n', (608, 618), True, 'import matplotlib.pyplot as plt\n'), ((623, 641), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (633, 641), True, 'import matplotlib.pyplot as plt\n'), ((646, 664), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (656, 664), True, 'import matplotlib.pyplot as plt\n'), ((669, 689), 'matplotlib.pyplot.yticks', 'plt.yticks', (['x_pos', 'x'], {}), '(x_pos, x)\n', (679, 689), True, 'import matplotlib.pyplot as plt\n'), ((694, 710), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (703, 710), True, 'import matplotlib.pyplot as plt\n'), ((715, 725), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (723, 725), True, 'import matplotlib.pyplot as plt\n'), ((1811, 1854), 'pandas.read_csv', 'pd.read_csv', (['"""devkami_survey.tsv"""'], {'sep': '"""\t"""'}), "('devkami_survey.tsv', sep='\\t')\n", (1822, 1854), True, 'import pandas as pd\n')]
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_health.test_run_aggregator import Status
from openstack_health.tests import base
class TestStatus(base.TestCase):
def test_that_success_string_translates_to_success(self):
status = Status('success')
self.assertEqual(True, status.is_success)
self.assertEqual(False, status.is_failure)
self.assertEqual(False, status.is_skip)
def test_that_xfail_string_translates_to_success(self):
status = Status('xfail')
self.assertEqual(True, status.is_success)
self.assertEqual(False, status.is_failure)
self.assertEqual(False, status.is_skip)
def test_that_fail_string_translates_to_failure(self):
status = Status('fail')
self.assertEqual(False, status.is_success)
self.assertEqual(True, status.is_failure)
self.assertEqual(False, status.is_skip)
def test_that_unxsuccess_string_translates_to_failure(self):
status = Status('unxsuccess')
self.assertEqual(False, status.is_success)
self.assertEqual(True, status.is_failure)
self.assertEqual(False, status.is_skip)
def test_that_null_translates_to_skip(self):
status = Status(None)
self.assertEqual(False, status.is_success)
self.assertEqual(False, status.is_failure)
self.assertEqual(True, status.is_skip)
def test_that_an_empty_string_translates_to_skip(self):
status = Status('')
self.assertEqual(False, status.is_success)
self.assertEqual(False, status.is_failure)
self.assertEqual(True, status.is_skip)
def test_that_a_random_string_translates_to_skip(self):
status = Status('$random1234')
self.assertEqual(False, status.is_success)
self.assertEqual(False, status.is_failure)
self.assertEqual(True, status.is_skip)
|
[
"openstack_health.test_run_aggregator.Status"
] |
[((816, 833), 'openstack_health.test_run_aggregator.Status', 'Status', (['"""success"""'], {}), "('success')\n", (822, 833), False, 'from openstack_health.test_run_aggregator import Status\n'), ((1061, 1076), 'openstack_health.test_run_aggregator.Status', 'Status', (['"""xfail"""'], {}), "('xfail')\n", (1067, 1076), False, 'from openstack_health.test_run_aggregator import Status\n'), ((1303, 1317), 'openstack_health.test_run_aggregator.Status', 'Status', (['"""fail"""'], {}), "('fail')\n", (1309, 1317), False, 'from openstack_health.test_run_aggregator import Status\n'), ((1550, 1570), 'openstack_health.test_run_aggregator.Status', 'Status', (['"""unxsuccess"""'], {}), "('unxsuccess')\n", (1556, 1570), False, 'from openstack_health.test_run_aggregator import Status\n'), ((1787, 1799), 'openstack_health.test_run_aggregator.Status', 'Status', (['None'], {}), '(None)\n', (1793, 1799), False, 'from openstack_health.test_run_aggregator import Status\n'), ((2027, 2037), 'openstack_health.test_run_aggregator.Status', 'Status', (['""""""'], {}), "('')\n", (2033, 2037), False, 'from openstack_health.test_run_aggregator import Status\n'), ((2265, 2286), 'openstack_health.test_run_aggregator.Status', 'Status', (['"""$random1234"""'], {}), "('$random1234')\n", (2271, 2286), False, 'from openstack_health.test_run_aggregator import Status\n')]
|
"""Test NMS.
Run the examples described in `ONNX docs`_.
.. _ONNX docs: https://github.com/onnx/onnx/blob/main/docs/Operators.md#NonMaxSuppression
"""
# import pytest
import numpy as np
import box_utils._c.box_nms as box_nms
def test_nms_suppress_by_iou():
"""Test NMS - suppress by IoU."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array([[[
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array(
[[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_suppress_by_IOU_and_scores():
"""Test NMS - suppress by IoU and scores."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array(
[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.4]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_single_box():
"""Test NMS - single box."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0]
]]).astype(np.float32)
scores = np.array([[[0.9]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_identical_boxes():
"""Test NMS - identical boxes."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
]]).astype(np.float32)
scores = np.array([[[
0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9
]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_limit_output_size():
"""Test NMS - limit output size."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array([[[
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_two_batches():
"""Test NMS - two batches."""
# --
boxes = np.array([[[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]],
[[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]]]).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]],
[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 0, 3], [0, 0, 0], [1, 0, 3], [1, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_two_classes():
"""Test NMS - two classes."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array([[
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3],
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 0, 3], [0, 0, 0], [0, 1, 3], [0, 1, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_center_point_box_format():
"""Test NMS - center-point box format."""
# --
boxes = np.array([[
[0.5, 0.5, 1.0, 1.0],
[0.5, 0.6, 1.0, 1.0],
[0.5, 0.4, 1.0, 1.0],
[0.5, 10.5, 1.0, 1.0],
[0.5, 10.6, 1.0, 1.0],
[0.5, 100.5, 1.0, 1.0]
]]).astype(np.float32)
scores = np.array([[
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
# --
result = box_nms.xywh_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_flipped_coordinates():
"""Test NMS - flipped coordinates."""
# --
boxes = np.array([[
[1.0, 1.0, 0.0, 0.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, 0.9, 1.0, -0.1],
[0.0, 10.0, 1.0, 11.0],
[1.0, 10.1, 0.0, 11.1],
[1.0, 101.0, 0.0, 100.0]
]]).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
# ---------------------------------------------------------
# box_nms can be called in some other way.
# ---------------------------------------------------------
def test_nms_suppress_by_iou_nobatch():
"""Test NMS - suppress by IoU."""
# --
boxes = np.array([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]).astype(np.float32)
scores = np.array([[
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array(
[[0, 3], [0, 0], [0, 5]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_suppress_by_iou_noclass():
"""Test NMS - suppress by IoU."""
# --
boxes = np.array([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]).astype(np.float32)
scores = np.array([
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([3, 0, 5]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_suppress_by_iou_notopk():
"""Test NMS - suppress by IoU."""
# --
boxes = np.array([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]).astype(np.float32)
scores = np.array([
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]).astype(np.float32)
max_output_boxes_per_class = np.array([-1]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([3, 0, 5]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_two_classes_nobatch():
"""Test NMS - two classes."""
# --
boxes = np.array([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]).astype(np.float32)
scores = np.array([
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3],
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 3], [0, 0], [1, 3], [1, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_center_point_box_format_nobatch():
"""Test NMS - center-point box format."""
# --
boxes = np.array([
[0.5, 0.5, 1.0, 1.0],
[0.5, 0.6, 1.0, 1.0],
[0.5, 0.4, 1.0, 1.0],
[0.5, 10.5, 1.0, 1.0],
[0.5, 10.6, 1.0, 1.0],
[0.5, 100.5, 1.0, 1.0]
]).astype(np.float32)
scores = np.array([
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 3], [0, 0], [0, 5]]).astype(np.int64)
# --
result = box_nms.xywh_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_center_point_box_format_noclass():
"""Test NMS - center-point box format."""
# --
boxes = np.array([
[0.5, 0.5, 1.0, 1.0],
[0.5, 0.6, 1.0, 1.0],
[0.5, 0.4, 1.0, 1.0],
[0.5, 10.5, 1.0, 1.0],
[0.5, 10.6, 1.0, 1.0],
[0.5, 100.5, 1.0, 1.0]
]).astype(np.float32)
scores = np.array(
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([3, 0, 5]).astype(np.int64)
# --
result = box_nms.xywh_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
|
[
"box_utils._c.box_nms.ltrb_nms",
"numpy.testing.assert_array_equal",
"box_utils._c.box_nms.xywh_nms",
"numpy.array"
] |
[((928, 1032), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (944, 1032), True, 'import box_utils._c.box_nms as box_nms\n'), ((1051, 1106), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (1080, 1106), True, 'import numpy as np\n'), ((1809, 1913), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (1825, 1913), True, 'import box_utils._c.box_nms as box_nms\n'), ((1932, 1987), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (1961, 1987), True, 'import numpy as np\n'), ((2452, 2556), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (2468, 2556), True, 'import box_utils._c.box_nms as box_nms\n'), ((2575, 2630), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (2604, 2630), True, 'import numpy as np\n'), ((3435, 3539), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (3451, 3539), True, 'import box_utils._c.box_nms as box_nms\n'), ((3558, 3613), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (3587, 3613), True, 'import numpy as np\n'), ((4298, 4402), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (4314, 4402), True, 'import box_utils._c.box_nms as box_nms\n'), ((4421, 4476), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (4450, 4476), True, 'import numpy as np\n'), ((5536, 5640), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (5552, 5640), True, 'import box_utils._c.box_nms as box_nms\n'), ((5659, 5714), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (5688, 5714), True, 'import numpy as np\n'), ((6460, 6564), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (6476, 6564), True, 'import box_utils._c.box_nms as box_nms\n'), ((6583, 6638), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (6612, 6638), True, 'import numpy as np\n'), ((7350, 7454), 'box_utils._c.box_nms.xywh_nms', 'box_nms.xywh_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (7366, 7454), True, 'import box_utils._c.box_nms as box_nms\n'), ((7473, 7528), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (7502, 7528), True, 'import numpy as np\n'), ((8219, 8323), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (8235, 8323), True, 'import box_utils._c.box_nms as box_nms\n'), ((8342, 8397), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (8371, 8397), True, 'import numpy as np\n'), ((9257, 9361), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (9273, 9361), True, 'import box_utils._c.box_nms as box_nms\n'), ((9380, 9435), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (9409, 9435), True, 'import numpy as np\n'), ((10105, 10209), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (10121, 10209), True, 'import box_utils._c.box_nms as box_nms\n'), ((10228, 10283), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (10257, 10283), True, 'import numpy as np\n'), ((10953, 11057), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (10969, 11057), True, 'import box_utils._c.box_nms as box_nms\n'), ((11076, 11131), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (11105, 11131), True, 'import numpy as np\n'), ((11869, 11973), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (11885, 11973), True, 'import box_utils._c.box_nms as box_nms\n'), ((11992, 12047), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (12021, 12047), True, 'import numpy as np\n'), ((12754, 12858), 'box_utils._c.box_nms.xywh_nms', 'box_nms.xywh_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (12770, 12858), True, 'import box_utils._c.box_nms as box_nms\n'), ((12877, 12932), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (12906, 12932), True, 'import numpy as np\n'), ((13613, 13717), 'box_utils._c.box_nms.xywh_nms', 'box_nms.xywh_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (13629, 13717), True, 'import box_utils._c.box_nms as box_nms\n'), ((13736, 13791), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (13765, 13791), True, 'import numpy as np\n'), ((321, 484), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9\n ], [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, \n 101.0]]])\n', (329, 484), True, 'import numpy as np\n'), ((561, 607), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]])\n', (569, 607), True, 'import numpy as np\n'), ((669, 682), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (677, 682), True, 'import numpy as np\n'), ((720, 735), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (728, 735), True, 'import numpy as np\n'), ((777, 792), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (785, 792), True, 'import numpy as np\n'), ((835, 878), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0], [0, 0, 5]]'], {}), '([[0, 0, 3], [0, 0, 0], [0, 0, 5]])\n', (843, 878), True, 'import numpy as np\n'), ((1222, 1385), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9\n ], [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, \n 101.0]]])\n', (1230, 1385), True, 'import numpy as np\n'), ((1462, 1508), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]])\n', (1470, 1508), True, 'import numpy as np\n'), ((1570, 1583), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (1578, 1583), True, 'import numpy as np\n'), ((1621, 1636), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (1629, 1636), True, 'import numpy as np\n'), ((1678, 1693), 'numpy.array', 'np.array', (['[0.4]'], {}), '([0.4])\n', (1686, 1693), True, 'import numpy as np\n'), ((1736, 1768), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0]]'], {}), '([[0, 0, 3], [0, 0, 0]])\n', (1744, 1768), True, 'import numpy as np\n'), ((2071, 2105), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0]]])\n', (2079, 2105), True, 'import numpy as np\n'), ((2152, 2171), 'numpy.array', 'np.array', (['[[[0.9]]]'], {}), '([[[0.9]]])\n', (2160, 2171), True, 'import numpy as np\n'), ((2224, 2237), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (2232, 2237), True, 'import numpy as np\n'), ((2275, 2290), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (2283, 2290), True, 'import numpy as np\n'), ((2332, 2347), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (2340, 2347), True, 'import numpy as np\n'), ((2390, 2411), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (2398, 2411), True, 'import numpy as np\n'), ((2724, 2969), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, \n 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, \n 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, \n 1.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0,\n 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, \n 1.0, 1.0]]])\n', (2732, 2969), True, 'import numpy as np\n'), ((3076, 3140), 'numpy.array', 'np.array', (['[[[0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]]]'], {}), '([[[0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]]])\n', (3084, 3140), True, 'import numpy as np\n'), ((3207, 3220), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (3215, 3220), True, 'import numpy as np\n'), ((3258, 3273), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (3266, 3273), True, 'import numpy as np\n'), ((3315, 3330), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (3323, 3330), True, 'import numpy as np\n'), ((3373, 3394), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (3381, 3394), True, 'import numpy as np\n'), ((3711, 3874), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9\n ], [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, \n 101.0]]])\n', (3719, 3874), True, 'import numpy as np\n'), ((3951, 3997), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]])\n', (3959, 3997), True, 'import numpy as np\n'), ((4059, 4072), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (4067, 4072), True, 'import numpy as np\n'), ((4110, 4125), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (4118, 4125), True, 'import numpy as np\n'), ((4167, 4182), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (4175, 4182), True, 'import numpy as np\n'), ((4225, 4257), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0]]'], {}), '([[0, 0, 3], [0, 0, 0]])\n', (4233, 4257), True, 'import numpy as np\n'), ((4562, 4877), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]], [[\n 0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0,\n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9\n ], [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, \n 101.0]], [[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, \n 0.9], [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0,\n 101.0]]])\n', (4570, 4877), True, 'import numpy as np\n'), ((5111, 5198), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]], [[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]], [[0.9, 0.75, 0.6, 0.95, 0.5, \n 0.3]]])\n', (5119, 5198), True, 'import numpy as np\n'), ((5266, 5279), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (5274, 5279), True, 'import numpy as np\n'), ((5317, 5332), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (5325, 5332), True, 'import numpy as np\n'), ((5374, 5389), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (5382, 5389), True, 'import numpy as np\n'), ((5432, 5486), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0], [1, 0, 3], [1, 0, 0]]'], {}), '([[0, 0, 3], [0, 0, 0], [1, 0, 3], [1, 0, 0]])\n', (5440, 5486), True, 'import numpy as np\n'), ((5800, 5963), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9\n ], [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, \n 101.0]]])\n', (5808, 5963), True, 'import numpy as np\n'), ((6040, 6125), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3], [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3], [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]\n )\n', (6048, 6125), True, 'import numpy as np\n'), ((6190, 6203), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (6198, 6203), True, 'import numpy as np\n'), ((6241, 6256), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (6249, 6256), True, 'import numpy as np\n'), ((6298, 6313), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (6306, 6313), True, 'import numpy as np\n'), ((6356, 6410), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0], [0, 1, 3], [0, 1, 0]]'], {}), '([[0, 0, 3], [0, 0, 0], [0, 1, 3], [0, 1, 0]])\n', (6364, 6410), True, 'import numpy as np\n'), ((6748, 6900), 'numpy.array', 'np.array', (['[[[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0], [0.5, 0.4, 1.0, 1.0], [0.5, \n 10.5, 1.0, 1.0], [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0, 1.0]]]'], {}), '([[[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0], [0.5, 0.4, 1.0, 1.0],\n [0.5, 10.5, 1.0, 1.0], [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0, 1.0]]])\n', (6756, 6900), True, 'import numpy as np\n'), ((6983, 7029), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]])\n', (6991, 7029), True, 'import numpy as np\n'), ((7091, 7104), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (7099, 7104), True, 'import numpy as np\n'), ((7142, 7157), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (7150, 7157), True, 'import numpy as np\n'), ((7199, 7214), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (7207, 7214), True, 'import numpy as np\n'), ((7257, 7300), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0], [0, 0, 5]]'], {}), '([[0, 0, 3], [0, 0, 0], [0, 0, 5]])\n', (7265, 7300), True, 'import numpy as np\n'), ((7630, 7793), 'numpy.array', 'np.array', (['[[[1.0, 1.0, 0.0, 0.0], [0.0, 0.1, 1.0, 1.1], [0.0, 0.9, 1.0, -0.1], [0.0, \n 10.0, 1.0, 11.0], [1.0, 10.1, 0.0, 11.1], [1.0, 101.0, 0.0, 100.0]]]'], {}), '([[[1.0, 1.0, 0.0, 0.0], [0.0, 0.1, 1.0, 1.1], [0.0, 0.9, 1.0, -0.1\n ], [0.0, 10.0, 1.0, 11.0], [1.0, 10.1, 0.0, 11.1], [1.0, 101.0, 0.0, \n 100.0]]])\n', (7638, 7793), True, 'import numpy as np\n'), ((7870, 7916), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]])\n', (7878, 7916), True, 'import numpy as np\n'), ((7969, 7982), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (7977, 7982), True, 'import numpy as np\n'), ((8020, 8035), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (8028, 8035), True, 'import numpy as np\n'), ((8077, 8092), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (8085, 8092), True, 'import numpy as np\n'), ((8135, 8178), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0], [0, 0, 5]]'], {}), '([[0, 0, 3], [0, 0, 0], [0, 0, 5]])\n', (8143, 8178), True, 'import numpy as np\n'), ((8663, 8818), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]])\n', (8671, 8818), True, 'import numpy as np\n'), ((8901, 8945), 'numpy.array', 'np.array', (['[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]'], {}), '([[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]])\n', (8909, 8945), True, 'import numpy as np\n'), ((9007, 9020), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (9015, 9020), True, 'import numpy as np\n'), ((9058, 9073), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (9066, 9073), True, 'import numpy as np\n'), ((9115, 9130), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (9123, 9130), True, 'import numpy as np\n'), ((9173, 9207), 'numpy.array', 'np.array', (['[[0, 3], [0, 0], [0, 5]]'], {}), '([[0, 3], [0, 0], [0, 5]])\n', (9181, 9207), True, 'import numpy as np\n'), ((9537, 9692), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]])\n', (9545, 9692), True, 'import numpy as np\n'), ((9775, 9817), 'numpy.array', 'np.array', (['[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]'], {}), '([0.9, 0.75, 0.6, 0.95, 0.5, 0.3])\n', (9783, 9817), True, 'import numpy as np\n'), ((9879, 9892), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (9887, 9892), True, 'import numpy as np\n'), ((9930, 9945), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (9938, 9945), True, 'import numpy as np\n'), ((9987, 10002), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (9995, 10002), True, 'import numpy as np\n'), ((10045, 10064), 'numpy.array', 'np.array', (['[3, 0, 5]'], {}), '([3, 0, 5])\n', (10053, 10064), True, 'import numpy as np\n'), ((10384, 10539), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]])\n', (10392, 10539), True, 'import numpy as np\n'), ((10622, 10664), 'numpy.array', 'np.array', (['[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]'], {}), '([0.9, 0.75, 0.6, 0.95, 0.5, 0.3])\n', (10630, 10664), True, 'import numpy as np\n'), ((10726, 10740), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (10734, 10740), True, 'import numpy as np\n'), ((10778, 10793), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (10786, 10793), True, 'import numpy as np\n'), ((10835, 10850), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (10843, 10850), True, 'import numpy as np\n'), ((10893, 10912), 'numpy.array', 'np.array', (['[3, 0, 5]'], {}), '([3, 0, 5])\n', (10901, 10912), True, 'import numpy as np\n'), ((11225, 11380), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]])\n', (11233, 11380), True, 'import numpy as np\n'), ((11463, 11541), 'numpy.array', 'np.array', (['[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3], [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]'], {}), '([[0.9, 0.75, 0.6, 0.95, 0.5, 0.3], [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]])\n', (11471, 11541), True, 'import numpy as np\n'), ((11611, 11624), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (11619, 11624), True, 'import numpy as np\n'), ((11662, 11677), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (11670, 11677), True, 'import numpy as np\n'), ((11719, 11734), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (11727, 11734), True, 'import numpy as np\n'), ((11777, 11819), 'numpy.array', 'np.array', (['[[0, 3], [0, 0], [1, 3], [1, 0]]'], {}), '([[0, 3], [0, 0], [1, 3], [1, 0]])\n', (11785, 11819), True, 'import numpy as np\n'), ((12165, 12315), 'numpy.array', 'np.array', (['[[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0], [0.5, 0.4, 1.0, 1.0], [0.5, \n 10.5, 1.0, 1.0], [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0, 1.0]]'], {}), '([[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0], [0.5, 0.4, 1.0, 1.0],\n [0.5, 10.5, 1.0, 1.0], [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0, 1.0]])\n', (12173, 12315), True, 'import numpy as np\n'), ((12398, 12442), 'numpy.array', 'np.array', (['[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]'], {}), '([[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]])\n', (12406, 12442), True, 'import numpy as np\n'), ((12504, 12517), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (12512, 12517), True, 'import numpy as np\n'), ((12555, 12570), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (12563, 12570), True, 'import numpy as np\n'), ((12612, 12627), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (12620, 12627), True, 'import numpy as np\n'), ((12670, 12704), 'numpy.array', 'np.array', (['[[0, 3], [0, 0], [0, 5]]'], {}), '([[0, 3], [0, 0], [0, 5]])\n', (12678, 12704), True, 'import numpy as np\n'), ((13050, 13200), 'numpy.array', 'np.array', (['[[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0], [0.5, 0.4, 1.0, 1.0], [0.5, \n 10.5, 1.0, 1.0], [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0, 1.0]]'], {}), '([[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0], [0.5, 0.4, 1.0, 1.0],\n [0.5, 10.5, 1.0, 1.0], [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0, 1.0]])\n', (13058, 13200), True, 'import numpy as np\n'), ((13283, 13325), 'numpy.array', 'np.array', (['[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]'], {}), '([0.9, 0.75, 0.6, 0.95, 0.5, 0.3])\n', (13291, 13325), True, 'import numpy as np\n'), ((13387, 13400), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (13395, 13400), True, 'import numpy as np\n'), ((13438, 13453), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (13446, 13453), True, 'import numpy as np\n'), ((13495, 13510), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (13503, 13510), True, 'import numpy as np\n'), ((13553, 13572), 'numpy.array', 'np.array', (['[3, 0, 5]'], {}), '([3, 0, 5])\n', (13561, 13572), True, 'import numpy as np\n')]
|
from collections import OrderedDict
def genListCombinaison(P4Graph):
""" Generate a set of tuples from graph.
Each tuple represents possible active headers at the same time
"""
combinaison = []
return combinaison
def genListHeaders(P4Graph):
""" Generate the dictionnary of headers.
P4Graph : JSon imported file
Name, size
"""
headers = OrderedDict()
return headers
def genBitPos(combinaison, Entete, bus_width=64, muxNum=0, Payload=False):
""" Gen list of unique tuple (name, pos, [Etat]) of the muxNum.
Each tuple correspond to the bit of a protocol that have to be connected
to the mux
"""
def GetPosTuple(nom, pos, liste):
for e, v in enumerate(listeEntree):
if v[0] == nom and v[1] == pos:
return e
return 0
if bus_width <= muxNum:
raise ValueError("bus width {} smaller than mux number :{}".format(
bus_width, muxNum))
listeEntree = []
EtatAssocie = []
for combeNum, comb in enumerate(combinaison):
pos = muxNum
for j in comb:
while pos < Entete[j]:
if (j, pos) not in listeEntree:
listeEntree.append((j, pos))
EtatAssocie.append([])
EtatAssocie[-1].append(combeNum)
else:
e = GetPosTuple(j, pos, listeEntree)
if combeNum not in EtatAssocie[e]:
EtatAssocie[e].append(combeNum)
else:
print("{}, {}".format(j, pos))
pos += bus_width
pos -= Entete[j]
if Payload:
if ("Payload", pos) not in listeEntree:
listeEntree.append(("Payload", pos))
EtatAssocie.append([])
EtatAssocie[-1].append(combeNum)
else:
e = GetPosTuple("Payload", pos, listeEntree)
if combeNum not in EtatAssocie[e]:
EtatAssocie[e].append(combeNum)
else:
print("{}, {}".format(j, pos))
for i, tup in enumerate(listeEntree):
newTup = (tup[0], tup[1], EtatAssocie[i])
listeEntree[i] = newTup
return listeEntree
def sortListBitTuple(liste, headers):
output = []
def takeSecond(elem):
return elem[1]
for entete in headers:
tmp = []
for nom, pos, etat in liste:
if nom == entete:
tmp.append((nom, pos, etat))
tmp.sort(key=takeSecond)
output.extend(tmp)
return output
|
[
"collections.OrderedDict"
] |
[((381, 394), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (392, 394), False, 'from collections import OrderedDict\n')]
|
import pytest
from rut_chile import rut_chile
class TestIsValidRutTests:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("1", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("1.", ValueError),
("1.11", ValueError),
("1.111K", ValueError),
(".1", ValueError),
("123.K", ValueError),
("123.12-K", ValueError)
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.is_valid_rut(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("9868503-1", False),
("21518268-2", False),
("17175325-3", False),
("20930576-4", False),
("13402128-5", False),
("20737522-6", False),
("6842256-7", False),
("14983005-8", False),
("20247667-9", False),
("17832479-k", False),
("12667869-0", False)
])
def test_invalid_rut(self, test_input, expected_value):
assert rut_chile.is_valid_rut(test_input) == expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("00", True),
("0-0", True),
("1-9", True),
("98685030", True),
("9868503-0", True),
("9.868.503-0", True),
("21518268-1", True),
("17175325-2", True),
("20930576-3", True),
("13402128-4", True),
("20737522-5", True),
("6842256-6", True),
("14983005-7", True),
("20247667-8", True),
("17832479-9", True),
("12667869-k", True),
("12667869-K", True),
("12.667.869-K", True),
("12.667.869-k", True)
])
def test_valid_rut(self, test_input, expected_value):
assert rut_chile.is_valid_rut(test_input) == expected_value
class TestGetVerificationDigit:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("1k", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("12312-K", ValueError),
("12.312-K", ValueError),
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.get_verification_digit(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("0", "0"),
("1", "9"),
("9868503", "0"),
("21518268", "1"),
("17175325", "2"),
("20930576", "3"),
("13402128", "4"),
("20737522", "5"),
("6842256", "6"),
("14983005", "7"),
("20247667", "8"),
("17832479", "9"),
("12667869", "k")
])
def test_valid_rut(self, test_input, expected_value):
assert rut_chile.get_verification_digit(test_input) == expected_value
class TestGetCapitalizedVerificationDigit:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("1k", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("12312-K", ValueError),
("12.312-K", ValueError),
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.get_capitalized_verification_digit(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("0", "0"),
("1", "9"),
("9868503", "0"),
("21518268", "1"),
("17175325", "2"),
("20930576", "3"),
("13402128", "4"),
("20737522", "5"),
("6842256", "6"),
("14983005", "7"),
("20247667", "8"),
("17832479", "9"),
("12667869", "K")
])
def test_valid_rut(self, test_input, expected_value):
digit = rut_chile.get_capitalized_verification_digit(test_input)
assert digit == expected_value
class TestFormatRutWithDots:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("ab", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("1.", ValueError),
("1.11", ValueError)
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.format_rut_with_dots(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("12", "1-2"),
("123", "12-3"),
("1234", "123-4"),
("12345", "1.234-5"),
("123456", "12.345-6"),
("1234567", "123.456-7"),
("12345678", "1.234.567-8"),
("123456789", "12.345.678-9"),
("123456789k", "123.456.789-k"),
])
def test_valid_rut(self, test_input, expected_value):
assert rut_chile.format_rut_with_dots(test_input) == expected_value
class TestFormatCapitalizedRutWithDots:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("ab", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("1.", ValueError),
("1.11", ValueError)
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.format_capitalized_rut_with_dots(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("12", "1-2"),
("123", "12-3"),
("1234", "123-4"),
("12345", "1.234-5"),
("123456", "12.345-6"),
("1234567", "123.456-7"),
("12345678", "1.234.567-8"),
("123456789", "12.345.678-9"),
("123456789k", "123.456.789-K"),
])
def test_valid_rut(self, test_input, expected_value):
rut = rut_chile.format_capitalized_rut_with_dots(test_input)
assert rut == expected_value
class TestFormatRutWithoutDots:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("ab", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("1.", ValueError),
("1.11", ValueError)
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.format_rut_without_dots(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("12", "1-2"),
("123", "12-3"),
("1234", "123-4"),
("12345", "1234-5"),
("123456", "12345-6"),
("1234567", "123456-7"),
("12345678", "1234567-8"),
("123456789", "12345678-9"),
("123456789k", "123456789-k"),
])
def test_valid_rut(self, test_input, expected_value):
assert rut_chile.format_rut_without_dots(test_input) == expected_value
class TestFormatCapitalizedRutWithoutDots:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("ab", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("1.", ValueError),
("1.11", ValueError)
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.format_capitalized_rut_without_dots(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("12", "1-2"),
("123", "12-3"),
("1234", "123-4"),
("12345", "1234-5"),
("123456", "12345-6"),
("1234567", "123456-7"),
("12345678", "1234567-8"),
("123456789", "12345678-9"),
("123456789k", "123456789-K"),
])
def test_valid_rut(self, test_input, expected_value):
rut = rut_chile.format_capitalized_rut_without_dots(test_input)
assert rut == expected_value
|
[
"rut_chile.rut_chile.get_capitalized_verification_digit",
"rut_chile.rut_chile.format_capitalized_rut_without_dots",
"rut_chile.rut_chile.format_capitalized_rut_with_dots",
"rut_chile.rut_chile.format_rut_with_dots",
"rut_chile.rut_chile.format_rut_without_dots",
"pytest.raises",
"rut_chile.rut_chile.get_verification_digit",
"rut_chile.rut_chile.is_valid_rut",
"pytest.mark.parametrize"
] |
[((81, 441), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[(None, ValueError), ('', ValueError), (' ', ValueError), ('k', ValueError),\n ('1', ValueError), ('*', ValueError), ('1-', ValueError), ('.-',\n ValueError), ('1.', ValueError), ('1.11', ValueError), ('1.111K',\n ValueError), ('.1', ValueError), ('123.K', ValueError), ('123.12-K',\n ValueError)]"], {}), "('test_input, expected_value', [(None, ValueError),\n ('', ValueError), (' ', ValueError), ('k', ValueError), ('1',\n ValueError), ('*', ValueError), ('1-', ValueError), ('.-', ValueError),\n ('1.', ValueError), ('1.11', ValueError), ('1.111K', ValueError), ('.1',\n ValueError), ('123.K', ValueError), ('123.12-K', ValueError)])\n", (104, 441), False, 'import pytest\n'), ((762, 1087), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[('9868503-1', False), ('21518268-2', False), ('17175325-3', False), (\n '20930576-4', False), ('13402128-5', False), ('20737522-6', False), (\n '6842256-7', False), ('14983005-8', False), ('20247667-9', False), (\n '17832479-k', False), ('12667869-0', False)]"], {}), "('test_input, expected_value', [('9868503-1', False),\n ('21518268-2', False), ('17175325-3', False), ('20930576-4', False), (\n '13402128-5', False), ('20737522-6', False), ('6842256-7', False), (\n '14983005-8', False), ('20247667-9', False), ('17832479-k', False), (\n '12667869-0', False)])\n", (785, 1087), False, 'import pytest\n'), ((1297, 1778), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[('00', True), ('0-0', True), ('1-9', True), ('98685030', True), (\n '9868503-0', True), ('9.868.503-0', True), ('21518268-1', True), (\n '17175325-2', True), ('20930576-3', True), ('13402128-4', True), (\n '20737522-5', True), ('6842256-6', True), ('14983005-7', True), (\n '20247667-8', True), ('17832479-9', True), ('12667869-k', True), (\n '12667869-K', True), ('12.667.869-K', True), ('12.667.869-k', True)]"], {}), "('test_input, expected_value', [('00', True), ('0-0',\n True), ('1-9', True), ('98685030', True), ('9868503-0', True), (\n '9.868.503-0', True), ('21518268-1', True), ('17175325-2', True), (\n '20930576-3', True), ('13402128-4', True), ('20737522-5', True), (\n '6842256-6', True), ('14983005-7', True), ('20247667-8', True), (\n '17832479-9', True), ('12667869-k', True), ('12667869-K', True), (\n '12.667.869-K', True), ('12.667.869-k', True)])\n", (1320, 1778), False, 'import pytest\n'), ((2073, 2346), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[(None, ValueError), ('', ValueError), (' ', ValueError), ('k', ValueError),\n ('1k', ValueError), ('*', ValueError), ('1-', ValueError), ('.-',\n ValueError), ('12312-K', ValueError), ('12.312-K', ValueError)]"], {}), "('test_input, expected_value', [(None, ValueError),\n ('', ValueError), (' ', ValueError), ('k', ValueError), ('1k',\n ValueError), ('*', ValueError), ('1-', ValueError), ('.-', ValueError),\n ('12312-K', ValueError), ('12.312-K', ValueError)])\n", (2096, 2346), False, 'import pytest\n'), ((2650, 2954), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[('0', '0'), ('1', '9'), ('9868503', '0'), ('21518268', '1'), ('17175325',\n '2'), ('20930576', '3'), ('13402128', '4'), ('20737522', '5'), (\n '6842256', '6'), ('14983005', '7'), ('20247667', '8'), ('17832479', '9'\n ), ('12667869', 'k')]"], {}), "('test_input, expected_value', [('0', '0'), ('1',\n '9'), ('9868503', '0'), ('21518268', '1'), ('17175325', '2'), (\n '20930576', '3'), ('13402128', '4'), ('20737522', '5'), ('6842256', '6'\n ), ('14983005', '7'), ('20247667', '8'), ('17832479', '9'), ('12667869',\n 'k')])\n", (2673, 2954), False, 'import pytest\n'), ((3233, 3506), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[(None, ValueError), ('', ValueError), (' ', ValueError), ('k', ValueError),\n ('1k', ValueError), ('*', ValueError), ('1-', ValueError), ('.-',\n ValueError), ('12312-K', ValueError), ('12.312-K', ValueError)]"], {}), "('test_input, expected_value', [(None, ValueError),\n ('', ValueError), (' ', ValueError), ('k', ValueError), ('1k',\n ValueError), ('*', ValueError), ('1-', ValueError), ('.-', ValueError),\n ('12312-K', ValueError), ('12.312-K', ValueError)])\n", (3256, 3506), False, 'import pytest\n'), ((3822, 4126), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[('0', '0'), ('1', '9'), ('9868503', '0'), ('21518268', '1'), ('17175325',\n '2'), ('20930576', '3'), ('13402128', '4'), ('20737522', '5'), (\n '6842256', '6'), ('14983005', '7'), ('20247667', '8'), ('17832479', '9'\n ), ('12667869', 'K')]"], {}), "('test_input, expected_value', [('0', '0'), ('1',\n '9'), ('9868503', '0'), ('21518268', '1'), ('17175325', '2'), (\n '20930576', '3'), ('13402128', '4'), ('20737522', '5'), ('6842256', '6'\n ), ('14983005', '7'), ('20247667', '8'), ('17832479', '9'), ('12667869',\n 'K')])\n", (3845, 4126), False, 'import pytest\n'), ((4425, 4689), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[(None, ValueError), ('', ValueError), (' ', ValueError), ('k', ValueError),\n ('ab', ValueError), ('*', ValueError), ('1-', ValueError), ('.-',\n ValueError), ('1.', ValueError), ('1.11', ValueError)]"], {}), "('test_input, expected_value', [(None, ValueError),\n ('', ValueError), (' ', ValueError), ('k', ValueError), ('ab',\n ValueError), ('*', ValueError), ('1-', ValueError), ('.-', ValueError),\n ('1.', ValueError), ('1.11', ValueError)])\n", (4448, 4689), False, 'import pytest\n'), ((4990, 5275), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[('12', '1-2'), ('123', '12-3'), ('1234', '123-4'), ('12345', '1.234-5'), (\n '123456', '12.345-6'), ('1234567', '123.456-7'), ('12345678',\n '1.234.567-8'), ('123456789', '12.345.678-9'), ('123456789k',\n '123.456.789-k')]"], {}), "('test_input, expected_value', [('12', '1-2'), (\n '123', '12-3'), ('1234', '123-4'), ('12345', '1.234-5'), ('123456',\n '12.345-6'), ('1234567', '123.456-7'), ('12345678', '1.234.567-8'), (\n '123456789', '12.345.678-9'), ('123456789k', '123.456.789-k')])\n", (5013, 5275), False, 'import pytest\n'), ((5522, 5786), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[(None, ValueError), ('', ValueError), (' ', ValueError), ('k', ValueError),\n ('ab', ValueError), ('*', ValueError), ('1-', ValueError), ('.-',\n ValueError), ('1.', ValueError), ('1.11', ValueError)]"], {}), "('test_input, expected_value', [(None, ValueError),\n ('', ValueError), (' ', ValueError), ('k', ValueError), ('ab',\n ValueError), ('*', ValueError), ('1-', ValueError), ('.-', ValueError),\n ('1.', ValueError), ('1.11', ValueError)])\n", (5545, 5786), False, 'import pytest\n'), ((6099, 6384), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[('12', '1-2'), ('123', '12-3'), ('1234', '123-4'), ('12345', '1.234-5'), (\n '123456', '12.345-6'), ('1234567', '123.456-7'), ('12345678',\n '1.234.567-8'), ('123456789', '12.345.678-9'), ('123456789k',\n '123.456.789-K')]"], {}), "('test_input, expected_value', [('12', '1-2'), (\n '123', '12-3'), ('1234', '123-4'), ('12345', '1.234-5'), ('123456',\n '12.345-6'), ('1234567', '123.456-7'), ('12345678', '1.234.567-8'), (\n '123456789', '12.345.678-9'), ('123456789k', '123.456.789-K')])\n", (6122, 6384), False, 'import pytest\n'), ((6653, 6917), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[(None, ValueError), ('', ValueError), (' ', ValueError), ('k', ValueError),\n ('ab', ValueError), ('*', ValueError), ('1-', ValueError), ('.-',\n ValueError), ('1.', ValueError), ('1.11', ValueError)]"], {}), "('test_input, expected_value', [(None, ValueError),\n ('', ValueError), (' ', ValueError), ('k', ValueError), ('ab',\n ValueError), ('*', ValueError), ('1-', ValueError), ('.-', ValueError),\n ('1.', ValueError), ('1.11', ValueError)])\n", (6676, 6917), False, 'import pytest\n'), ((7221, 7497), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[('12', '1-2'), ('123', '12-3'), ('1234', '123-4'), ('12345', '1234-5'), (\n '123456', '12345-6'), ('1234567', '123456-7'), ('12345678', '1234567-8'\n ), ('123456789', '12345678-9'), ('123456789k', '123456789-k')]"], {}), "('test_input, expected_value', [('12', '1-2'), (\n '123', '12-3'), ('1234', '123-4'), ('12345', '1234-5'), ('123456',\n '12345-6'), ('1234567', '123456-7'), ('12345678', '1234567-8'), (\n '123456789', '12345678-9'), ('123456789k', '123456789-k')])\n", (7244, 7497), False, 'import pytest\n'), ((7750, 8014), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[(None, ValueError), ('', ValueError), (' ', ValueError), ('k', ValueError),\n ('ab', ValueError), ('*', ValueError), ('1-', ValueError), ('.-',\n ValueError), ('1.', ValueError), ('1.11', ValueError)]"], {}), "('test_input, expected_value', [(None, ValueError),\n ('', ValueError), (' ', ValueError), ('k', ValueError), ('ab',\n ValueError), ('*', ValueError), ('1-', ValueError), ('.-', ValueError),\n ('1.', ValueError), ('1.11', ValueError)])\n", (7773, 8014), False, 'import pytest\n'), ((8330, 8606), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_value"""', "[('12', '1-2'), ('123', '12-3'), ('1234', '123-4'), ('12345', '1234-5'), (\n '123456', '12345-6'), ('1234567', '123456-7'), ('12345678', '1234567-8'\n ), ('123456789', '12345678-9'), ('123456789k', '123456789-K')]"], {}), "('test_input, expected_value', [('12', '1-2'), (\n '123', '12-3'), ('1234', '123-4'), ('12345', '1234-5'), ('123456',\n '12345-6'), ('1234567', '123456-7'), ('12345678', '1234567-8'), (\n '123456789', '12345678-9'), ('123456789k', '123456789-K')])\n", (8353, 8606), False, 'import pytest\n'), ((4293, 4349), 'rut_chile.rut_chile.get_capitalized_verification_digit', 'rut_chile.get_capitalized_verification_digit', (['test_input'], {}), '(test_input)\n', (4337, 4349), False, 'from rut_chile import rut_chile\n'), ((6522, 6576), 'rut_chile.rut_chile.format_capitalized_rut_with_dots', 'rut_chile.format_capitalized_rut_with_dots', (['test_input'], {}), '(test_input)\n', (6564, 6576), False, 'from rut_chile import rut_chile\n'), ((8744, 8801), 'rut_chile.rut_chile.format_capitalized_rut_without_dots', 'rut_chile.format_capitalized_rut_without_dots', (['test_input'], {}), '(test_input)\n', (8789, 8801), False, 'from rut_chile import rut_chile\n'), ((622, 647), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (635, 647), False, 'import pytest\n'), ((670, 704), 'rut_chile.rut_chile.is_valid_rut', 'rut_chile.is_valid_rut', (['test_input'], {}), '(test_input)\n', (692, 704), False, 'from rut_chile import rut_chile\n'), ((1238, 1272), 'rut_chile.rut_chile.is_valid_rut', 'rut_chile.is_valid_rut', (['test_input'], {}), '(test_input)\n', (1260, 1272), False, 'from rut_chile import rut_chile\n'), ((1981, 2015), 'rut_chile.rut_chile.is_valid_rut', 'rut_chile.is_valid_rut', (['test_input'], {}), '(test_input)\n', (2003, 2015), False, 'from rut_chile import rut_chile\n'), ((2500, 2525), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2513, 2525), False, 'import pytest\n'), ((2548, 2592), 'rut_chile.rut_chile.get_verification_digit', 'rut_chile.get_verification_digit', (['test_input'], {}), '(test_input)\n', (2580, 2592), False, 'from rut_chile import rut_chile\n'), ((3120, 3164), 'rut_chile.rut_chile.get_verification_digit', 'rut_chile.get_verification_digit', (['test_input'], {}), '(test_input)\n', (3152, 3164), False, 'from rut_chile import rut_chile\n'), ((3660, 3685), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3673, 3685), False, 'import pytest\n'), ((3708, 3764), 'rut_chile.rut_chile.get_capitalized_verification_digit', 'rut_chile.get_capitalized_verification_digit', (['test_input'], {}), '(test_input)\n', (3752, 3764), False, 'from rut_chile import rut_chile\n'), ((4842, 4867), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4855, 4867), False, 'import pytest\n'), ((4890, 4932), 'rut_chile.rut_chile.format_rut_with_dots', 'rut_chile.format_rut_with_dots', (['test_input'], {}), '(test_input)\n', (4920, 4932), False, 'from rut_chile import rut_chile\n'), ((5414, 5456), 'rut_chile.rut_chile.format_rut_with_dots', 'rut_chile.format_rut_with_dots', (['test_input'], {}), '(test_input)\n', (5444, 5456), False, 'from rut_chile import rut_chile\n'), ((5939, 5964), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5952, 5964), False, 'import pytest\n'), ((5987, 6041), 'rut_chile.rut_chile.format_capitalized_rut_with_dots', 'rut_chile.format_capitalized_rut_with_dots', (['test_input'], {}), '(test_input)\n', (6029, 6041), False, 'from rut_chile import rut_chile\n'), ((7070, 7095), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7083, 7095), False, 'import pytest\n'), ((7118, 7163), 'rut_chile.rut_chile.format_rut_without_dots', 'rut_chile.format_rut_without_dots', (['test_input'], {}), '(test_input)\n', (7151, 7163), False, 'from rut_chile import rut_chile\n'), ((7636, 7681), 'rut_chile.rut_chile.format_rut_without_dots', 'rut_chile.format_rut_without_dots', (['test_input'], {}), '(test_input)\n', (7669, 7681), False, 'from rut_chile import rut_chile\n'), ((8167, 8192), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8180, 8192), False, 'import pytest\n'), ((8215, 8272), 'rut_chile.rut_chile.format_capitalized_rut_without_dots', 'rut_chile.format_capitalized_rut_without_dots', (['test_input'], {}), '(test_input)\n', (8260, 8272), False, 'from rut_chile import rut_chile\n')]
|
# ------------------------------------------------------------------
# PyTorch implementation of
# "ROAM: Recurrently Optimizing Tracking Model", CVPR, 2020
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------
import config
import numpy as np
import os
import torch
import torchvision.transforms as trans
import json
from torch.utils.data import Dataset
from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader
import cv2
class BaseDataset(Dataset):
def __init__(self, train_dir, val_dir=None, is_train=True):
if is_train:
self._seq_dir = train_dir
with open(os.path.join(train_dir, 'train.json'), 'r') as f:
self._seqs = json.load(f)
else:
np.random.seed(123)
self._seq_dir = val_dir
with open(os.path.join(val_dir, 'val.json'), 'r') as f:
self._seqs = json.load(f)
self.n_seq = len(self._seqs)
print(self.n_seq)
self.is_train = is_train
self.seq_id = -1
self.preprocessor = trans.Compose(
[trans.ToTensor(),
trans.Normalize(mean=config.mean, std=config.std)])
self.pad_value = np.array(config.mean)
base_target_sz = np.array([config.base_target_sz, config.base_target_sz])
self.base_window_sz = get_search_size(base_target_sz, config.search_scale)
window_cell_sz = np.ceil(self.base_window_sz / config.cell_sz)
self.window_cell_sz = window_cell_sz - (window_cell_sz % 2) + 1
def __len__(self):
return len(self._seqs)
def __getitem__(self, seq_id):
# uniformly choosing video frames
seq_name = self._seqs[seq_id]['seq_name']
start_frame = self._seqs[seq_id]['start_frame']
end_frame = self._seqs[seq_id]['end_frame']
bboxes = self._seqs[seq_id]['gt_bboxes']
visible = self._seqs[seq_id]['visible']
visible_frame_idxes = np.arange(0, end_frame - start_frame + 1)[visible]
seq_len = len(visible_frame_idxes)
clip_len = config.look_ahead * config.time_step + 1
assert seq_len >= clip_len
start_idx = np.random.randint(seq_len - clip_len + 1) if self.is_train else 0
selected_idxes = [visible_frame_idxes[idx] for idx in range(start_idx, start_idx + clip_len)]
# build training examples for initial frame
patches, label_maps, gt_bboxes = [], [], []
img_path = self._get_img_path(seq_name, start_frame, selected_idxes[0])
init_image = default_loader(img_path)
init_bbox = np.array(bboxes[selected_idxes[0]])
for ratio in config.aug_init_ratios:
for scale in config.aug_init_scales:
# aspect ratio augmentation
height, width = init_image.shape[0: 2]
sw, sh = int(width * ratio), int(height / ratio)
image_resized = cv2.resize(init_image, (sw, sh))
bbox_reiszed = init_bbox * np.array([ratio, 1 / ratio, ratio, 1 / ratio])
# scale changes augmentation
search_scale = config.search_scale / scale
# generate training examples
patch, label_map, bbox_on_patch = self._generate_training_examples(image_resized, bbox_reiszed, search_scale)
patches.append(patch)
label_maps.append(label_map)
gt_bboxes.append(bbox_on_patch)
# build training examples for subsequent frames.
for i, idx in enumerate(selected_idxes[1:]):
img_path = self._get_img_path(seq_name, start_frame, idx)
image = default_loader(img_path)
bbox = np.array(bboxes[idx])
# aspect ratio augmentation
height, width = image.shape[0: 2]
ratio = np.random.uniform(config.aug_ratios_range[0], config.aug_ratios_range[1])
sw, sh = int(width * ratio), int(height / ratio)
image = cv2.resize(image, (sw, sh))
bbox = bbox * np.array([ratio, 1 / ratio, ratio, 1 / ratio])
# scale changes augmentation
obj_scale = np.random.uniform(config.aug_scales_range[0], config.aug_scales_range[1])
search_scale = config.search_scale/obj_scale
# generate training examples
patch, label_map, bbox_on_patch = self._generate_training_examples(image, bbox, search_scale)
patches.append(patch)
label_maps.append(label_map)
gt_bboxes.append(bbox_on_patch)
patches = torch.stack(patches, 0)
label_maps = torch.stack(label_maps, 0)
gt_bboxes = torch.stack(gt_bboxes, 0)
return patches, label_maps, gt_bboxes
def _get_img_path(self, seq_name, start_frame, index):
raise NotImplementedError
def _generate_training_examples(self, image, bbox, search_scale):
# get roi patches
window_sz = get_search_size(bbox[2:], search_scale)
scale = self.base_window_sz[0] / window_sz[0]
offset = np.random.randint(-config.offset_range, config.offset_range, 2)
center = bbox[:2] + np.floor(bbox[2:] / 2) - offset/scale
patch = get_search_patch(image, center, self.pad_value, window_sz, self.base_window_sz)
patch = self.preprocessor(patch)
# get label maps
scaled_bbox = np.round(bbox * scale)
target_cell_sz = np.ceil(scaled_bbox[2:] / config.cell_sz)
output_sigma = target_cell_sz * config.output_sigma_factor
map_center = np.floor(self.window_cell_sz / 2) + offset //config.cell_sz
label_map = gaussian_shaped_labels(output_sigma, self.window_cell_sz, map_center)
label_map = torch.from_numpy(label_map[None, :]).float()
# get gt bboxes
bbox_center = np.zeros(2, np.float32) + offset
bbox_on_patch = np.concatenate([bbox_center, scaled_bbox[2:]])
bbox_on_patch = torch.from_numpy(bbox_on_patch).float()
return patch, label_map, bbox_on_patch
class VIDataset(BaseDataset):
def __init__(self, root_dir, is_train):
train_dir = os.path.join(root_dir, 'Data/ILSVRC/Data/VID/train')
val_dir = os.path.join(root_dir, 'Data/ILSVRC/Data/VID/val')
super(VIDataset, self).__init__(train_dir, val_dir, is_train)
def _get_img_path(self, seq_name, start_frame, index):
img_path = self._seq_dir + ('/%s/%06d.JPEG' % (seq_name, (start_frame - 1) + index))
return img_path
|
[
"numpy.random.seed",
"numpy.floor",
"utils.get_search_patch",
"numpy.random.randint",
"numpy.arange",
"torchvision.transforms.Normalize",
"os.path.join",
"numpy.round",
"cv2.resize",
"numpy.ceil",
"utils.get_search_size",
"utils.gaussian_shaped_labels",
"utils.default_loader",
"numpy.concatenate",
"torch.from_numpy",
"numpy.random.uniform",
"json.load",
"torch.stack",
"numpy.zeros",
"numpy.array",
"torchvision.transforms.ToTensor"
] |
[((1309, 1330), 'numpy.array', 'np.array', (['config.mean'], {}), '(config.mean)\n', (1317, 1330), True, 'import numpy as np\n'), ((1357, 1413), 'numpy.array', 'np.array', (['[config.base_target_sz, config.base_target_sz]'], {}), '([config.base_target_sz, config.base_target_sz])\n', (1365, 1413), True, 'import numpy as np\n'), ((1444, 1496), 'utils.get_search_size', 'get_search_size', (['base_target_sz', 'config.search_scale'], {}), '(base_target_sz, config.search_scale)\n', (1459, 1496), False, 'from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader\n'), ((1522, 1567), 'numpy.ceil', 'np.ceil', (['(self.base_window_sz / config.cell_sz)'], {}), '(self.base_window_sz / config.cell_sz)\n', (1529, 1567), True, 'import numpy as np\n'), ((2643, 2667), 'utils.default_loader', 'default_loader', (['img_path'], {}), '(img_path)\n', (2657, 2667), False, 'from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader\n'), ((2688, 2723), 'numpy.array', 'np.array', (['bboxes[selected_idxes[0]]'], {}), '(bboxes[selected_idxes[0]])\n', (2696, 2723), True, 'import numpy as np\n'), ((4653, 4676), 'torch.stack', 'torch.stack', (['patches', '(0)'], {}), '(patches, 0)\n', (4664, 4676), False, 'import torch\n'), ((4698, 4724), 'torch.stack', 'torch.stack', (['label_maps', '(0)'], {}), '(label_maps, 0)\n', (4709, 4724), False, 'import torch\n'), ((4745, 4770), 'torch.stack', 'torch.stack', (['gt_bboxes', '(0)'], {}), '(gt_bboxes, 0)\n', (4756, 4770), False, 'import torch\n'), ((5029, 5068), 'utils.get_search_size', 'get_search_size', (['bbox[2:]', 'search_scale'], {}), '(bbox[2:], search_scale)\n', (5044, 5068), False, 'from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader\n'), ((5140, 5203), 'numpy.random.randint', 'np.random.randint', (['(-config.offset_range)', 'config.offset_range', '(2)'], {}), '(-config.offset_range, config.offset_range, 2)\n', (5157, 5203), True, 'import numpy as np\n'), ((5286, 5365), 'utils.get_search_patch', 'get_search_patch', (['image', 'center', 'self.pad_value', 'window_sz', 'self.base_window_sz'], {}), '(image, center, self.pad_value, window_sz, self.base_window_sz)\n', (5302, 5365), False, 'from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader\n'), ((5455, 5477), 'numpy.round', 'np.round', (['(bbox * scale)'], {}), '(bbox * scale)\n', (5463, 5477), True, 'import numpy as np\n'), ((5503, 5544), 'numpy.ceil', 'np.ceil', (['(scaled_bbox[2:] / config.cell_sz)'], {}), '(scaled_bbox[2:] / config.cell_sz)\n', (5510, 5544), True, 'import numpy as np\n'), ((5713, 5782), 'utils.gaussian_shaped_labels', 'gaussian_shaped_labels', (['output_sigma', 'self.window_cell_sz', 'map_center'], {}), '(output_sigma, self.window_cell_sz, map_center)\n', (5735, 5782), False, 'from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader\n'), ((5952, 5998), 'numpy.concatenate', 'np.concatenate', (['[bbox_center, scaled_bbox[2:]]'], {}), '([bbox_center, scaled_bbox[2:]])\n', (5966, 5998), True, 'import numpy as np\n'), ((6207, 6259), 'os.path.join', 'os.path.join', (['root_dir', '"""Data/ILSVRC/Data/VID/train"""'], {}), "(root_dir, 'Data/ILSVRC/Data/VID/train')\n", (6219, 6259), False, 'import os\n'), ((6278, 6328), 'os.path.join', 'os.path.join', (['root_dir', '"""Data/ILSVRC/Data/VID/val"""'], {}), "(root_dir, 'Data/ILSVRC/Data/VID/val')\n", (6290, 6328), False, 'import os\n'), ((857, 876), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (871, 876), True, 'import numpy as np\n'), ((2060, 2101), 'numpy.arange', 'np.arange', (['(0)', '(end_frame - start_frame + 1)'], {}), '(0, end_frame - start_frame + 1)\n', (2069, 2101), True, 'import numpy as np\n'), ((2269, 2310), 'numpy.random.randint', 'np.random.randint', (['(seq_len - clip_len + 1)'], {}), '(seq_len - clip_len + 1)\n', (2286, 2310), True, 'import numpy as np\n'), ((3744, 3768), 'utils.default_loader', 'default_loader', (['img_path'], {}), '(img_path)\n', (3758, 3768), False, 'from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader\n'), ((3788, 3809), 'numpy.array', 'np.array', (['bboxes[idx]'], {}), '(bboxes[idx])\n', (3796, 3809), True, 'import numpy as np\n'), ((3916, 3989), 'numpy.random.uniform', 'np.random.uniform', (['config.aug_ratios_range[0]', 'config.aug_ratios_range[1]'], {}), '(config.aug_ratios_range[0], config.aug_ratios_range[1])\n', (3933, 3989), True, 'import numpy as np\n'), ((4071, 4098), 'cv2.resize', 'cv2.resize', (['image', '(sw, sh)'], {}), '(image, (sw, sh))\n', (4081, 4098), False, 'import cv2\n'), ((4237, 4310), 'numpy.random.uniform', 'np.random.uniform', (['config.aug_scales_range[0]', 'config.aug_scales_range[1]'], {}), '(config.aug_scales_range[0], config.aug_scales_range[1])\n', (4254, 4310), True, 'import numpy as np\n'), ((5633, 5666), 'numpy.floor', 'np.floor', (['(self.window_cell_sz / 2)'], {}), '(self.window_cell_sz / 2)\n', (5641, 5666), True, 'import numpy as np\n'), ((5895, 5918), 'numpy.zeros', 'np.zeros', (['(2)', 'np.float32'], {}), '(2, np.float32)\n', (5903, 5918), True, 'import numpy as np\n'), ((818, 830), 'json.load', 'json.load', (['f'], {}), '(f)\n', (827, 830), False, 'import json\n'), ((1010, 1022), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1019, 1022), False, 'import json\n'), ((1201, 1217), 'torchvision.transforms.ToTensor', 'trans.ToTensor', ([], {}), '()\n', (1215, 1217), True, 'import torchvision.transforms as trans\n'), ((1232, 1281), 'torchvision.transforms.Normalize', 'trans.Normalize', ([], {'mean': 'config.mean', 'std': 'config.std'}), '(mean=config.mean, std=config.std)\n', (1247, 1281), True, 'import torchvision.transforms as trans\n'), ((3014, 3046), 'cv2.resize', 'cv2.resize', (['init_image', '(sw, sh)'], {}), '(init_image, (sw, sh))\n', (3024, 3046), False, 'import cv2\n'), ((4125, 4171), 'numpy.array', 'np.array', (['[ratio, 1 / ratio, ratio, 1 / ratio]'], {}), '([ratio, 1 / ratio, ratio, 1 / ratio])\n', (4133, 4171), True, 'import numpy as np\n'), ((5232, 5254), 'numpy.floor', 'np.floor', (['(bbox[2:] / 2)'], {}), '(bbox[2:] / 2)\n', (5240, 5254), True, 'import numpy as np\n'), ((5803, 5839), 'torch.from_numpy', 'torch.from_numpy', (['label_map[None, :]'], {}), '(label_map[None, :])\n', (5819, 5839), False, 'import torch\n'), ((6023, 6054), 'torch.from_numpy', 'torch.from_numpy', (['bbox_on_patch'], {}), '(bbox_on_patch)\n', (6039, 6054), False, 'import torch\n'), ((739, 776), 'os.path.join', 'os.path.join', (['train_dir', '"""train.json"""'], {}), "(train_dir, 'train.json')\n", (751, 776), False, 'import os\n'), ((935, 968), 'os.path.join', 'os.path.join', (['val_dir', '"""val.json"""'], {}), "(val_dir, 'val.json')\n", (947, 968), False, 'import os\n'), ((3090, 3136), 'numpy.array', 'np.array', (['[ratio, 1 / ratio, ratio, 1 / ratio]'], {}), '([ratio, 1 / ratio, ratio, 1 / ratio])\n', (3098, 3136), True, 'import numpy as np\n')]
|
"""Creates a custom kinematics body with two links and one joint
"""
from openravepy import *
from numpy import eye, array, zeros
env = Environment() # create openrave environment
env.SetViewer('qtcoin') # attach viewer (optional)
with env:
robot=RaveCreateRobot(env,'')
robot.SetName('camera')
linkinfo=KinBody.LinkInfo()
linkinfo._name='camerabase'
ginfo=KinBody.GeometryInfo()
ginfo._type=GeometryType.Box
ginfo._vGeomData=[0.1,0.1,0.1] # box extents
ginfo._vDiffuseColor=[0,0,1]
ginfo._t = eye(4)
linkinfo._vgeometryinfos = [ginfo]
camera1info=Robot.AttachedSensorInfo()
camera1info._linkname='camerabase'
camera1info._name = 'ensenson10'
camera1info._sensorname = 'base_pinhole_camera'
camera1info._trelative = eye(4)
camera1info._trelative[0:3,3] = [0,0,0.1]
camera1info._sensorgeometry = CameraGeomData()
camera1info._sensorgeometry.width = 640
camera1info._sensorgeometry.height = 480
camera1info._sensorgeometry.intrinsics.K = array([[640.0,0,320],[0,640,240],[0,0,1]])
camera1info._sensorgeometry.intrinsics.distortion_coeffs = zeros(5)
camera1info._sensorgeometry.intrinsics.distortion_model = 'opencv'
camera1info._sensorgeometry.intrinsics.focal_length = 0.05
robot.Init([linkinfo],[],[],[])
env.Add(robot)
robot.AddAttachedSensor(camera1info,True)
|
[
"numpy.zeros",
"numpy.eye",
"numpy.array"
] |
[((533, 539), 'numpy.eye', 'eye', (['(4)'], {}), '(4)\n', (536, 539), False, 'from numpy import eye, array, zeros\n'), ((780, 786), 'numpy.eye', 'eye', (['(4)'], {}), '(4)\n', (783, 786), False, 'from numpy import eye, array, zeros\n'), ((1020, 1070), 'numpy.array', 'array', (['[[640.0, 0, 320], [0, 640, 240], [0, 0, 1]]'], {}), '([[640.0, 0, 320], [0, 640, 240], [0, 0, 1]])\n', (1025, 1070), False, 'from numpy import eye, array, zeros\n'), ((1126, 1134), 'numpy.zeros', 'zeros', (['(5)'], {}), '(5)\n', (1131, 1134), False, 'from numpy import eye, array, zeros\n')]
|
# 检查label是否包含最后时间加1秒,若没有则加一行数据
# 图像列表也加在函数中
# 单个生成一个标签
# 给出一个旧标签的位置(全路径),生成一张新标签
import os
import numpy as np
import json
from create_v3_012 import create_json_record
# from create_v2 import create_json_record
# path_txt_for_check = r'Y:\dataset\inroad_parking_videos\pics\2019_08_12\DDT2G1907ZMY00008SY_label.txt'
# path_txt_for_check = r'Y:\dataset\inroad_parking_videos\pics\2019_12_28\DDT2G1907ZMY00142SY_1211_label.txt'
# path_txt_for_check = r'W:\dataset\inroad_parking_videos\pics\2019_12_14\DDT2G1907ZMY00057SY_label.txt'
path_txt_for_check = r'W:\dataset\inroad_parking_videos\pics\2020_01_19\DDT2G1907ZMY00082SY_label.txt'
# path_txt_for_check = r'W:\dataset\inroad_parking_videos\pics\2019_12_31\DDT2G1907ZMY00142SY_1211_label.txt'
imgs_dir = os.path.dirname(path_txt_for_check)
folder_name = os.path.basename(path_txt_for_check).split('_')
if len(folder_name) == 2:
folder_name = folder_name[0]
elif len(folder_name) == 3:
folder_name = '_'.join(folder_name[0:2])
elif len(folder_name) == 4:
folder_name = '_'.join(folder_name[0:3])
imgs_folder_path = f'{imgs_dir}\\{folder_name}'
imgs_list = os.listdir(imgs_folder_path)
imgs_list = [i for i in imgs_list if i.endswith('jpg')]
imgs_list.sort() # 排序
imgs_list_only_time = ['_'.join(i.split('_')[:3]) for i in imgs_list]
# # 得到最后一张图片时间
imgs_last_time = os.path.splitext(imgs_list[-1])[0]
hh, mm, ss = imgs_last_time.split('_')[:3]
sec_last_plus_one = 3600 * int(hh) + 60 * int(mm) + int(ss) + 1 # #最后时间加1s
imgs_last_time_plus_one = f'{sec_last_plus_one//3600:02d}_{sec_last_plus_one%3600//60:02d}_{sec_last_plus_one%60:02d}'
# path_local = r'C:\Users\tongxin\Desktop\label_test_666'
# path_local = r'C:\Users\tongxin\Desktop\1'
path_local = r'C:\Users\tongxin\Desktop\label_test_2020_01_07'
path_json_converted = path_local + '\\' + \
path_txt_for_check.split('\\')[-2] + '\\' + \
os.path.splitext(os.path.basename(path_txt_for_check))[0] + '_v2.json'
# os.path.splitext(os.path.basename(path_txt_for_check))[0]+'_d05.10.json'
if not os.path.isdir(os.path.dirname(path_json_converted)):
os.makedirs(os.path.dirname(path_json_converted))
# 1.读取一个txt文件
data_raw = []
with open(path_txt_for_check, 'r', encoding='UTF-8') as file_to_read:
lines = file_to_read.readlines() # 读取数据
for line in lines:
if line != '\n':
data_raw.append(line)
parking_space = [i.split(':')[0] for i in data_raw[-1].split(' ')[1:]] # 看有多少停车位
# 2.转换文件
data_raw_np = []
for i in data_raw:
for idx, j in enumerate(i.split(' ')):
if idx == len(parking_space) and (j[-1] == '\n'): # 最后一列
data_raw_np.append(j[:-1]) # 去掉"\n"
else:
data_raw_np.append(j)
record_for_json = create_json_record(data_raw_np, parking_space, imgs_last_time_plus_one, imgs_list_only_time)
file = open(path_json_converted, 'w', encoding='utf-8')
json.dump(record_for_json, file, ensure_ascii=False, indent=4)
file.close()
print(f'save new label at:{path_json_converted}')
|
[
"json.dump",
"os.path.basename",
"os.path.dirname",
"os.path.splitext",
"create_v3_012.create_json_record",
"os.listdir"
] |
[((757, 792), 'os.path.dirname', 'os.path.dirname', (['path_txt_for_check'], {}), '(path_txt_for_check)\n', (772, 792), False, 'import os\n'), ((1121, 1149), 'os.listdir', 'os.listdir', (['imgs_folder_path'], {}), '(imgs_folder_path)\n', (1131, 1149), False, 'import os\n'), ((2712, 2808), 'create_v3_012.create_json_record', 'create_json_record', (['data_raw_np', 'parking_space', 'imgs_last_time_plus_one', 'imgs_list_only_time'], {}), '(data_raw_np, parking_space, imgs_last_time_plus_one,\n imgs_list_only_time)\n', (2730, 2808), False, 'from create_v3_012 import create_json_record\n'), ((2861, 2923), 'json.dump', 'json.dump', (['record_for_json', 'file'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(record_for_json, file, ensure_ascii=False, indent=4)\n', (2870, 2923), False, 'import json\n'), ((1331, 1362), 'os.path.splitext', 'os.path.splitext', (['imgs_list[-1]'], {}), '(imgs_list[-1])\n', (1347, 1362), False, 'import os\n'), ((807, 843), 'os.path.basename', 'os.path.basename', (['path_txt_for_check'], {}), '(path_txt_for_check)\n', (823, 843), False, 'import os\n'), ((2039, 2075), 'os.path.dirname', 'os.path.dirname', (['path_json_converted'], {}), '(path_json_converted)\n', (2054, 2075), False, 'import os\n'), ((2094, 2130), 'os.path.dirname', 'os.path.dirname', (['path_json_converted'], {}), '(path_json_converted)\n', (2109, 2130), False, 'import os\n'), ((1887, 1923), 'os.path.basename', 'os.path.basename', (['path_txt_for_check'], {}), '(path_txt_for_check)\n', (1903, 1923), False, 'import os\n')]
|
# main imports
import numpy as np
import pandas as pd
import json
import os, sys, argparse, subprocess
# model imports
from keras.models import model_from_json
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from joblib import dump, load
# image processing imports
from PIL import Image
import ipfml.iqa.fr as fr
from ipfml import metrics
# modules and config imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
n_samples_image_name_postfix = "_samples_mean.png"
reference_image_name_postfix = "_1000_samples_mean.png"
def write_result(_scene_name, _data_file, _model_path, _n, _reconstructed_path, _iqa):
# prepare data to get score information
dataset=np.loadtxt(_data_file, delimiter=';')
y = dataset[:,0]
X = dataset[:,1:]
y=np.reshape(y, (-1,1))
scaler = MinMaxScaler()
scaler.fit(X)
scaler.fit(y)
xscale=scaler.transform(X)
yscale=scaler.transform(y)
_, X_test, _, y_test = train_test_split(xscale, yscale)
# prepare image path to compare
n_samples_image_path = os.path.join(cfg.reconstructed_folder, _scene_name + '_' + _n + n_samples_image_name_postfix)
reference_image_path = os.path.join(cfg.reconstructed_folder, _scene_name + reference_image_name_postfix)
if not os.path.exists(n_samples_image_path):
# call sub process to create 'n' samples img
print("Creation of 'n' samples image : ", n_samples_image_path)
subprocess.run(["python", "reconstruct/reconstruct_scene_mean.py", "--scene", _scene_name, "--n", _n, "--image_name", n_samples_image_path.split('/')[-1]])
if not os.path.exists(reference_image_path):
# call sub process to create 'reference' img
print("Creation of reference image : ", reference_image_path)
subprocess.run(["python", "reconstruct/reconstruct_scene_mean.py", "--scene", _scene_name, "--n", str(1000), "--image_name", reference_image_path.split('/')[-1]])
# load the trained model
with open(_model_path, 'r') as f:
json_model = json.load(f)
model = model_from_json(json_model)
model.load_weights(_model_path.replace('.json', '.h5'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# get coefficient of determination score on test set
y_predicted = model.predict(X_test)
len_shape, _ = y_predicted.shape
y_predicted = y_predicted.reshape(len_shape)
coeff = metrics.coefficient_of_determination(y_test, y_predicted)
# Get data information
reference_image = Image.open(reference_image_path)
reconstructed_image = Image.open(_reconstructed_path)
n_samples_image = Image.open(n_samples_image_path)
# Load expected IQA comparison
try:
fr_iqa = getattr(fr, _iqa)
except AttributeError:
raise NotImplementedError("FR IQA `{}` not implement `{}`".format(fr.__name__, _iqa))
mse_ref_reconstructed_samples = fr_iqa(reference_image, reconstructed_image)
mse_reconstructed_n_samples = fr_iqa(n_samples_image, reconstructed_image)
model_name = _model_path.split('/')[-1].replace('.json', '')
if not os.path.exists(cfg.results_information_folder):
os.makedirs(cfg.results_information_folder)
# save score into models_comparisons_keras.csv file
with open(cfg.global_result_filepath_keras, "a") as f:
f.write(model_name + ';' + str(len(y)) + ';' + str(coeff[0]) + ';' + str(mse_reconstructed_n_samples) + ';' + str(mse_ref_reconstructed_samples) + '\n')
def main():
parser = argparse.ArgumentParser(description="Train model and saved it")
parser.add_argument('--scene', type=str, help='Scene name to reconstruct', choices=cfg.scenes_list)
parser.add_argument('--data', type=str, help='Filename of dataset')
parser.add_argument('--model_path', type=str, help='Json model file path')
parser.add_argument('--n', type=str, help='Number of pixel values approximated to keep')
parser.add_argument('--image_path', type=str, help="The image reconstructed to compare with")
parser.add_argument('--iqa', type=str, help='Image to compare', choices=['ssim', 'mse', 'rmse', 'mae', 'psnr'])
args = parser.parse_args()
param_scene_name = args.scene
param_data_file = args.data
param_n = args.n
param_model_path = args.model_path
param_image_path = args.image_path
param_iqa = args.iqa
write_result(param_scene_name, param_data_file, param_model_path, param_n, param_image_path, param_iqa)
if __name__== "__main__":
main()
|
[
"ipfml.metrics.coefficient_of_determination",
"json.load",
"argparse.ArgumentParser",
"os.makedirs",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.MinMaxScaler",
"os.path.exists",
"sys.path.insert",
"PIL.Image.open",
"keras.models.model_from_json",
"numpy.loadtxt",
"numpy.reshape",
"os.path.join"
] |
[((423, 445), 'sys.path.insert', 'sys.path.insert', (['(0)', '""""""'], {}), "(0, '')\n", (438, 445), False, 'import os, sys, argparse, subprocess\n'), ((779, 816), 'numpy.loadtxt', 'np.loadtxt', (['_data_file'], {'delimiter': '""";"""'}), "(_data_file, delimiter=';')\n", (789, 816), True, 'import numpy as np\n'), ((868, 890), 'numpy.reshape', 'np.reshape', (['y', '(-1, 1)'], {}), '(y, (-1, 1))\n', (878, 890), True, 'import numpy as np\n'), ((903, 917), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (915, 917), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1046, 1078), 'sklearn.model_selection.train_test_split', 'train_test_split', (['xscale', 'yscale'], {}), '(xscale, yscale)\n', (1062, 1078), False, 'from sklearn.model_selection import train_test_split\n'), ((1143, 1240), 'os.path.join', 'os.path.join', (['cfg.reconstructed_folder', "(_scene_name + '_' + _n + n_samples_image_name_postfix)"], {}), "(cfg.reconstructed_folder, _scene_name + '_' + _n +\n n_samples_image_name_postfix)\n", (1155, 1240), False, 'import os, sys, argparse, subprocess\n'), ((1264, 1350), 'os.path.join', 'os.path.join', (['cfg.reconstructed_folder', '(_scene_name + reference_image_name_postfix)'], {}), '(cfg.reconstructed_folder, _scene_name +\n reference_image_name_postfix)\n', (1276, 1350), False, 'import os, sys, argparse, subprocess\n'), ((2574, 2631), 'ipfml.metrics.coefficient_of_determination', 'metrics.coefficient_of_determination', (['y_test', 'y_predicted'], {}), '(y_test, y_predicted)\n', (2610, 2631), False, 'from ipfml import metrics\n'), ((2682, 2714), 'PIL.Image.open', 'Image.open', (['reference_image_path'], {}), '(reference_image_path)\n', (2692, 2714), False, 'from PIL import Image\n'), ((2741, 2772), 'PIL.Image.open', 'Image.open', (['_reconstructed_path'], {}), '(_reconstructed_path)\n', (2751, 2772), False, 'from PIL import Image\n'), ((2795, 2827), 'PIL.Image.open', 'Image.open', (['n_samples_image_path'], {}), '(n_samples_image_path)\n', (2805, 2827), False, 'from PIL import Image\n'), ((3675, 3738), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train model and saved it"""'}), "(description='Train model and saved it')\n", (3698, 3738), False, 'import os, sys, argparse, subprocess\n'), ((1359, 1395), 'os.path.exists', 'os.path.exists', (['n_samples_image_path'], {}), '(n_samples_image_path)\n', (1373, 1395), False, 'import os, sys, argparse, subprocess\n'), ((1698, 1734), 'os.path.exists', 'os.path.exists', (['reference_image_path'], {}), '(reference_image_path)\n', (1712, 1734), False, 'import os, sys, argparse, subprocess\n'), ((2120, 2132), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2129, 2132), False, 'import json\n'), ((2149, 2176), 'keras.models.model_from_json', 'model_from_json', (['json_model'], {}), '(json_model)\n', (2164, 2176), False, 'from keras.models import model_from_json\n'), ((3268, 3314), 'os.path.exists', 'os.path.exists', (['cfg.results_information_folder'], {}), '(cfg.results_information_folder)\n', (3282, 3314), False, 'import os, sys, argparse, subprocess\n'), ((3324, 3367), 'os.makedirs', 'os.makedirs', (['cfg.results_information_folder'], {}), '(cfg.results_information_folder)\n', (3335, 3367), False, 'import os, sys, argparse, subprocess\n')]
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from nlp.chatbot.dataset import data_utils
from nltk.translate.bleu_score import sentence_bleu
from tqdm import tqdm
import os,sys
import numpy as np
from nlp.chatbot import model as s2s_model
def test_bleu(count, args):
print('准备数据')
bucket_dbs = data_utils.read_bucket_dbs(args.buckets_dir)
buckets = data_utils.buckets
bucket_sizes = []
for i in range(len(buckets)):
bucket_size = bucket_dbs[i].size
bucket_sizes.append(bucket_size)
print('bucket {} 中有数据 {} 条'.format(i, bucket_size))
total_size = sum(bucket_sizes)
print('共有数据 {} 条'.format(total_size))
if count <= 0:
count = total_size
buckets_scale = [
sum(bucket_sizes[:i + 1]) / total_size
for i in range(len(bucket_sizes))
]
with tf.Session() as sess:
model = s2s_model.create_model(sess, True)
model.batch_size = 1
sess.run(tf.initialize_all_variables())
model.saver.restore(sess, os.path.join(args.model_dir, args.model_name))
total_score = 0.0
for i in tqdm(range(count)):
random_number = np.random.random_sample()
bucket_id = min([
i for i in range(len(buckets_scale))
if buckets_scale[i] > random_number
])
data, _ = model.get_batch_data(
bucket_dbs,
bucket_id
)
encoder_inputs, decoder_inputs, decoder_weights = model.get_batch(
bucket_dbs,
bucket_id,
data
)
_, _, output_logits = model.step(
sess,
encoder_inputs,
decoder_inputs,
decoder_weights,
bucket_id,
True
)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
ask, _ = data[0]
all_answers = bucket_dbs[bucket_id].all_answers(ask)
ret = data_utils.indice_sentence(outputs)
if not ret:
continue
references = [list(x) for x in all_answers]
score = sentence_bleu(
references,
list(ret),
weights=(1.0,)
)
total_score += score
print('BLUE: {:.2f} in {} samples'.format(total_score / count * 10, count))
def test(args):
class TestBucket(object):
def __init__(self, sentence):
self.sentence = sentence
def random(self):
return sentence, ''
buckets = data_utils.buckets
with tf.Session() as sess:
model = s2s_model.create_model(sess, True)
model.batch_size = 1
sess.run(tf.initialize_all_variables())
model.saver.restore(sess, os.path.join(args.model_dir, args.model_name))
sys.stdout.write("> ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
bucket_id = min([
b for b in range(len(buckets))
if buckets[b][0] > len(sentence)
])
data, _ = model.get_batch_data(
{bucket_id: TestBucket(sentence)},
bucket_id
)
encoder_inputs, decoder_inputs, decoder_weights = model.get_batch(
{bucket_id: TestBucket(sentence)},
bucket_id,
data
)
_, _, output_logits = model.step(
sess,
encoder_inputs,
decoder_inputs,
decoder_weights,
bucket_id,
True
)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
ret = data_utils.indice_sentence(outputs)
print(ret)
print("> ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
|
[
"nlp.chatbot.model.create_model",
"sys.stdout.write",
"numpy.random.random_sample",
"numpy.argmax",
"tensorflow.Session",
"nlp.chatbot.dataset.data_utils.indice_sentence",
"nlp.chatbot.dataset.data_utils.read_bucket_dbs",
"sys.stdout.flush",
"tensorflow.initialize_all_variables",
"os.path.join",
"sys.stdin.readline"
] |
[((307, 351), 'nlp.chatbot.dataset.data_utils.read_bucket_dbs', 'data_utils.read_bucket_dbs', (['args.buckets_dir'], {}), '(args.buckets_dir)\n', (333, 351), False, 'from nlp.chatbot.dataset import data_utils\n'), ((833, 845), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (843, 845), True, 'import tensorflow as tf\n'), ((871, 905), 'nlp.chatbot.model.create_model', 's2s_model.create_model', (['sess', '(True)'], {}), '(sess, True)\n', (893, 905), True, 'from nlp.chatbot import model as s2s_model\n'), ((2650, 2662), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2660, 2662), True, 'import tensorflow as tf\n'), ((2688, 2722), 'nlp.chatbot.model.create_model', 's2s_model.create_model', (['sess', '(True)'], {}), '(sess, True)\n', (2710, 2722), True, 'from nlp.chatbot import model as s2s_model\n'), ((2889, 2911), 'sys.stdout.write', 'sys.stdout.write', (['"""> """'], {}), "('> ')\n", (2905, 2911), False, 'import os, sys\n'), ((2920, 2938), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2936, 2938), False, 'import os, sys\n'), ((2958, 2978), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (2976, 2978), False, 'import os, sys\n'), ((952, 981), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (979, 981), True, 'import tensorflow as tf\n'), ((1017, 1062), 'os.path.join', 'os.path.join', (['args.model_dir', 'args.model_name'], {}), '(args.model_dir, args.model_name)\n', (1029, 1062), False, 'import os, sys\n'), ((1156, 1181), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (1179, 1181), True, 'import numpy as np\n'), ((2033, 2068), 'nlp.chatbot.dataset.data_utils.indice_sentence', 'data_utils.indice_sentence', (['outputs'], {}), '(outputs)\n', (2059, 2068), False, 'from nlp.chatbot.dataset import data_utils\n'), ((2769, 2798), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (2796, 2798), True, 'import tensorflow as tf\n'), ((2834, 2879), 'os.path.join', 'os.path.join', (['args.model_dir', 'args.model_name'], {}), '(args.model_dir, args.model_name)\n', (2846, 2879), False, 'import os, sys\n'), ((3797, 3832), 'nlp.chatbot.dataset.data_utils.indice_sentence', 'data_utils.indice_sentence', (['outputs'], {}), '(outputs)\n', (3823, 3832), False, 'from nlp.chatbot.dataset import data_utils\n'), ((3900, 3918), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3916, 3918), False, 'import os, sys\n'), ((3942, 3962), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (3960, 3962), False, 'import os, sys\n'), ((1867, 1891), 'numpy.argmax', 'np.argmax', (['logit'], {'axis': '(1)'}), '(logit, axis=1)\n', (1876, 1891), True, 'import numpy as np\n'), ((3725, 3749), 'numpy.argmax', 'np.argmax', (['logit'], {'axis': '(1)'}), '(logit, axis=1)\n', (3734, 3749), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-14 09:29
from __future__ import unicode_literals
import bluebottle.utils.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0036_merge_20170831_1449'),
]
operations = [
migrations.AlterField(
model_name='project',
name='place',
field=models.CharField(blank=True, help_text='Geographical location', max_length=200, null=True),
),
]
|
[
"django.db.models.CharField"
] |
[((434, 529), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Geographical location"""', 'max_length': '(200)', 'null': '(True)'}), "(blank=True, help_text='Geographical location', max_length=\n 200, null=True)\n", (450, 529), False, 'from django.db import migrations, models\n')]
|
import numpy as np
import pyaudio
from pyaudio import PyAudio
from queue import Queue
import struct
from time import sleep
def get_steinberg_device_idx(pa: PyAudio) -> int:
"""
looks up the steinberg device index
"""
for i in range(pa.get_device_count()):
name = pa.get_device_info_by_index(i)['name']
if 'steinberg' in name.lower():
return i
raise Exception("Couldn't find steinberg audio device")
class Recorder:
def __init__(self, chunk_size=512, channels=1):
# for some reason, when chunk size is 1024 we observe some
# non-random discontonuities in the signal every 1024*3 samples,
# which leads to very noticeable transients in the spectrogram
self.format = pyaudio.paFloat32
self.chunk_size = chunk_size
self.channels = channels
self.pa = PyAudio()
self.frame_queue = Queue()
self.device_idx = get_steinberg_device_idx(self.pa)
self.sr = int(self.pa.get_device_info_by_index(self.device_idx)['defaultSampleRate'])
def _get_callback(self):
def cb(input_data, frame_cnt, time_info, status_flags):
self.frame_queue.put(input_data)
return (None, pyaudio.paContinue)
return cb
def start_record(self):
self.stream = self.pa.open(
input_device_index=self.device_idx,
rate=self.sr,
format=self.format,
channels=self.channels,
input=True,
stream_callback=self._get_callback(),
frames_per_buffer=self.chunk_size)
def stop_record(self):
self.stream.stop_stream()
# unpacker = struct.Struct('f' * self.chunk_size)
# input_data = None # TODO
# output = []
# output += unpacker.unpack(input_data)
def read_queue(self):
s = struct.Struct('f'*self.chunk_size)
y = []
while not self.frame_queue.empty():
y += s.unpack(self.frame_queue.get())
return np.array(y)
if __name__ == '__main__':
r = Recorder()
r.start_record()
sleep(2)
r.stop_record()
print(r.read_queue())
|
[
"struct.Struct",
"time.sleep",
"numpy.array",
"pyaudio.PyAudio",
"queue.Queue"
] |
[((2126, 2134), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (2131, 2134), False, 'from time import sleep\n'), ((859, 868), 'pyaudio.PyAudio', 'PyAudio', ([], {}), '()\n', (866, 868), False, 'from pyaudio import PyAudio\n'), ((896, 903), 'queue.Queue', 'Queue', ([], {}), '()\n', (901, 903), False, 'from queue import Queue\n'), ((1882, 1918), 'struct.Struct', 'struct.Struct', (["('f' * self.chunk_size)"], {}), "('f' * self.chunk_size)\n", (1895, 1918), False, 'import struct\n'), ((2041, 2052), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2049, 2052), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from datetime import datetime
import matplotlib.patches as patches
from matplotlib.backends.backend_pdf import PdfPages
import math
import operator
def putval(x1, x2, demand_per_tick, result):
"""
Helper function to calculate the demand over time (stored in result)
based on (x1=start, x2=end, x3=demand_per_second)
"""
for i in range(math.floor(x1)-1, math.floor(x2)+2):
demand = 0
if i-x1 >= 1 and x2-i >= 0:
demand = demand_per_tick
if i-x1 < 1 and i-x1 > 0:
demand = (i-x1)*demand_per_tick
if i-x2 < 1 and i-x2 > 0:
demand = (1-(i-x2))*demand_per_tick
if not result.get(i): result[i] = 0
result[i] += demand
def plot(ctx):
"Volume that was delegated"
result = dict()
result2 = dict()
# extract total demand over time
events2 = []
for flow in ctx.flows:
events2.append((flow.start, flow.demand_per_tick))
events2.append((flow.start+flow.duration, -flow.demand_per_tick))
putval(flow.start, flow.start+flow.duration, flow.demand_per_tick, result2)
# extract delegated demand over time
events = []
demand_per_port = {}
demand_delegated = 0
demand_total = 0
per_tick = {}
for flow in ctx.flows:
demand_total += flow.duration * flow.demand_per_tick
if len(flow.delegation.reports) > 1:
for r1, r2 in zip(flow.delegation.reports, flow.delegation.reports[1:]):
# start and end time of delegation are recorded
if r1.action == 1 and r2.action == 0:
demand = (r2.tick-r1.tick)*flow.demand_per_tick
demand_delegated += demand
putval(r1.tick, r2.tick, flow.demand_per_tick, result)
assert(demand >= 0)
events.append((r1.tick, demand))
events.append((r2.tick, -demand))
rlast = flow.delegation.reports[-1]
if rlast.action == 1:
demand = (flow.finished_at-rlast.tick)*flow.demand_per_tick
demand_delegated += demand
assert(demand >= 0)
putval(rlast.tick, flow.finished_at, flow.demand_per_tick, result)
events.append((rlast.tick, demand))
events.append((flow.finished_at, -demand))
if len(flow.delegation.reports) == 1:
r1 = flow.delegation.reports[0]
demand = (flow.finished_at-r1.tick)*flow.demand_per_tick
demand_delegated += demand
assert(demand >= 0)
putval(r1.tick, flow.finished_at, flow.demand_per_tick, result)
events.append((r1.tick, demand))
events.append((flow.finished_at, -demand))
fig, ax = plt.subplots(figsize=(8, 3))
fig.tight_layout(pad=2.7)
xvalues = []
yvalues = []
for t, v in sorted(result.items()):
xvalues.append(int(t))
yvalues.append(v/1000.0)
#fig, ax = plt.subplots(figsize=(16, 8))
ax.plot(xvalues, yvalues, color='black', linewidth=1)
ax.set_xlabel('time (s)')
ax.set_ylabel('delegated (Mbit/s)')
#fill_underutil = [True if x < threshold and x+e > threshold else False for x, e in zip(cnt_active_flows, cnt_active_flows_evicted)]
ax.fill_between(xvalues, yvalues, 0, color='orange', alpha=0.3)
#ax.set_title('%s (%s)' % (names[solver], metrics[objective]), fontsize=10, fontweight='bold')
ax.set_xlim(0,450)
#ax.set_ylim(0,350)
ax.yaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.3)
ax.xaxis.grid(True, color='red', linestyle='--', linewidth=1, alpha=0.5)
plt.show()
|
[
"matplotlib.pyplot.subplots",
"math.floor",
"matplotlib.pyplot.show"
] |
[((2876, 2904), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (2888, 2904), True, 'import matplotlib.pyplot as plt\n'), ((3763, 3773), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3771, 3773), True, 'import matplotlib.pyplot as plt\n'), ((441, 455), 'math.floor', 'math.floor', (['x1'], {}), '(x1)\n', (451, 455), False, 'import math\n'), ((459, 473), 'math.floor', 'math.floor', (['x2'], {}), '(x2)\n', (469, 473), False, 'import math\n')]
|
# Generated by Django 3.1.13 on 2021-08-14 16:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pwas', '0015_auto_20210814_1622'),
]
operations = [
migrations.RemoveField(
model_name='pwa',
name='tags',
),
migrations.AddField(
model_name='pwa',
name='tags',
field=models.ManyToManyField(related_name='tags', to='pwas.Tag'),
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.ManyToManyField"
] |
[((233, 286), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""pwa"""', 'name': '"""tags"""'}), "(model_name='pwa', name='tags')\n", (255, 286), False, 'from django.db import migrations, models\n'), ((425, 483), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""tags"""', 'to': '"""pwas.Tag"""'}), "(related_name='tags', to='pwas.Tag')\n", (447, 483), False, 'from django.db import migrations, models\n')]
|
import tensorflow as tf
from nets.network import Network
import numpy as np
# !! The default data format used here is NHWC !!
# TODO: scope
def conv_bn(X, inChannel, outChannel, kernel, istrain, stride=1, name=None):
out = tf.layers.conv2d(X, outChannel, kernel, stride, 'same', use_bias=False, name=name)
out = tf.layers.batch_normalization(out, training=istrain,
name=name.replace('res', 'bn').replace('conv1', 'bn_conv1'))
return out
def conv_bn_relu(X, inChannel, outChannel, kernel, istrain, stride=1, use_bias=False, name=None):
out = conv_bn(X, inChannel, outChannel, kernel, istrain, stride=stride, name=name)
out = tf.nn.relu(out)
return out
def residual_conv(X, inChannel, interChannel, outputChannel, transition, istrain=False, name=None):
if transition:
init_stride = 2
else:
init_stride = 1
if inChannel == outputChannel:
skip_out = X
else:
skip_out = conv_bn(X, inChannel, outputChannel, 1, istrain,
stride=init_stride, name=name+'_branch1')
conv_out = conv_bn_relu(X, inChannel, interChannel, 1, istrain, name=name+'_branch2a')
conv_out = conv_bn_relu(conv_out, interChannel, interChannel, 3, istrain,
stride=init_stride, name=name+'_branch2b')
conv_out = conv_bn(conv_out, interChannel, outputChannel, 1, istrain, name=name+'_branch2c')
out = tf.nn.relu(skip_out + conv_out)
return out
def residual_block(X, inChannel, interChannel, outputChannel, numLayers,
transition=True, istrain=False, number_name=True, name=None):
if number_name and numLayers > 3:
names = [name+'a'] + [name+'b'+str(i+1) for i in range(numLayers-1)]
else:
names = [name+chr(ord('a')+i) for i in range(numLayers)]
out = residual_conv(X, inChannel, interChannel, outputChannel,
transition=transition, istrain=istrain, name=names[0])
for i in range(numLayers - 1):
out = residual_conv(out, outputChannel, interChannel, outputChannel,
transition=False, istrain=istrain, name=names[i+1])
return out
class ResNet(Network):
def __init__(self, scope=None, istrain=False, reuse=False):
super(ResNet, self).__init__(scope)
self.reuse = reuse
self.istrain = istrain
def _build_resnet(self, numBlock1, numBlock2, numBlock3, numBlock4):
number_name = (self._scope != 'resnet50')
self.conv1 = conv_bn_relu(self.input, 3, 64, 7, istrain=self.istrain, stride=2, name='conv1')
self.pool1 = tf.layers.max_pooling2d(self.conv1, 3, 2, padding='same')
self.conv2 = residual_block(self.pool1, 64, 64, 256, numBlock1, transition=False,
istrain=self.istrain, number_name=number_name, name='res2')
self.conv3 = residual_block(self.conv2, 256, 128, 512, numBlock2,
istrain=self.istrain, number_name=number_name, name='res3')
self.conv4 = residual_block(self.conv3, 512, 256, 1024, numBlock3,
istrain=self.istrain, number_name=number_name, name='res4')
self.conv5 = residual_block(self.conv4, 1024, 512, 2048, numBlock4,
istrain=self.istrain, number_name=number_name, name='res5')
self.pool5 = tf.layers.average_pooling2d(self.conv5, 7, 1)
self.pool5_flat = tf.layers.flatten(self.pool5)
self.scores = tf.layers.dense(self.pool5_flat, 1000, name='fc1000')
return self.scores
def find_key_name(self, var):
key, name = var.name.split('/')[-2:]
if 'kernel' in name or 'weight' in name:
name = 'weights'
elif 'bias' in name:
name = 'biases'
elif 'mean' in name:
name = 'mean'
elif 'variance' in name:
name = 'variance'
elif 'gamma' in name:
name = 'scale'
elif 'beta' in name:
name = 'offset'
else:
raise Exception('%s is not defined in official resnet deploy.txt'%name)
return key, name
def load(self, sess, pretrained_file):
data = np.load(pretrained_file).item()
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self._scope)
for var in variables:
key, name = self.find_key_name(var)
sess.run(var.assign(data[key][name]))
class ResNet50(ResNet):
def __init__(self, *args, **kwargs):
super(ResNet50, self).__init__('resnet50', *args, **kwargs)
def _build_network(self):
return self._build_resnet(3, 4, 6, 3)
class ResNet101(ResNet):
def __init__(self, *args, **kwargs):
super(ResNet101, self).__init__('resnet101', *args, **kwargs)
def _build_network(self):
return self._build_resnet(3, 4, 23, 3)
class ResNet152(ResNet):
def __init__(self, *args, **kwargs):
super(ResNet152, self).__init__('resnet152', *args, **kwargs)
def _build_network(self):
return self._build_resnet(3, 8, 36, 3)
|
[
"numpy.load",
"tensorflow.nn.relu",
"tensorflow.get_collection",
"tensorflow.layers.dense",
"tensorflow.layers.flatten",
"tensorflow.layers.average_pooling2d",
"tensorflow.layers.conv2d",
"tensorflow.layers.max_pooling2d"
] |
[((228, 314), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['X', 'outChannel', 'kernel', 'stride', '"""same"""'], {'use_bias': '(False)', 'name': 'name'}), "(X, outChannel, kernel, stride, 'same', use_bias=False,\n name=name)\n", (244, 314), True, 'import tensorflow as tf\n'), ((661, 676), 'tensorflow.nn.relu', 'tf.nn.relu', (['out'], {}), '(out)\n', (671, 676), True, 'import tensorflow as tf\n'), ((1424, 1455), 'tensorflow.nn.relu', 'tf.nn.relu', (['(skip_out + conv_out)'], {}), '(skip_out + conv_out)\n', (1434, 1455), True, 'import tensorflow as tf\n'), ((2607, 2664), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['self.conv1', '(3)', '(2)'], {'padding': '"""same"""'}), "(self.conv1, 3, 2, padding='same')\n", (2630, 2664), True, 'import tensorflow as tf\n'), ((3386, 3431), 'tensorflow.layers.average_pooling2d', 'tf.layers.average_pooling2d', (['self.conv5', '(7)', '(1)'], {}), '(self.conv5, 7, 1)\n', (3413, 3431), True, 'import tensorflow as tf\n'), ((3458, 3487), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['self.pool5'], {}), '(self.pool5)\n', (3475, 3487), True, 'import tensorflow as tf\n'), ((3510, 3563), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.pool5_flat', '(1000)'], {'name': '"""fc1000"""'}), "(self.pool5_flat, 1000, name='fc1000')\n", (3525, 3563), True, 'import tensorflow as tf\n'), ((4272, 4339), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'self._scope'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=self._scope)\n', (4289, 4339), True, 'import tensorflow as tf\n'), ((4220, 4244), 'numpy.load', 'np.load', (['pretrained_file'], {}), '(pretrained_file)\n', (4227, 4244), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# This is the part of the codes used for the article entitled "A Deep Learning
# Approach for Assessment of Regional Wall Motion Abnormality from
# Echocardiographic Images" for JACC CV imaging.
#
# Before using this code, please prepare image data at "./data_folder" dir.
#
# ./data_folder/train/Norm
# ./data_folder/train/LAD
# ./data_folder/train/LCXD
# ./data_folder/train/RCA
#
# ./data_folder/test/Norm
# ./data_folder/test/LAD
# ./data_folder/test/LCX
# ./data_folder/test/RCA
#
# Each dir should have echocardiographic images (.png is recommended and .jpg
# acceptable) that contains endo-diastolic, mid-systolic, and endo-systolic
# phases. We put endo-diastolic for red color image channel, mid-systolic for
# Green and endo-systolic for Blue image channle with Python3.5 programming
# language with PIL and numpy libraries.
#
# This code was used with
# OS: Ubuntu 14.04LTS
# Programming language: Python 3.5 Anaconda
# Deep Learning library: tensorflow-gpu 1.4.1, Keras 2.1.5
# CUDA toolkit 8.0, CuDNN v5.1
# Python libraries: numpy 1.14.2, Pillow 5.0.0
#
#
# If NeuralNet == "Xception":
# this code takes about 4 min for training (100 epoches, 320 train/valid)
# with core i7 6850K, RAM 256GB, NVMe SSD w 3.5" HDD, 1080ti.
import os, keras
import numpy as np
from datetime import datetime
from PIL import Image
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adam
# to select which neuralnetwork to use
#NeuralNet = 'VGG16' # ILSVRC image classification top-1 accuracy of 0.715
#NeuralNet = 'VGG19' # ILSVRC image classification top-1 accuracy of 0.727
NeuralNet = 'ResNet50' # ILSVRC image classification top-1 accuracy of 0.759
#NeuralNet = 'DenseNet201' # ILSVRC image classification top-1 accuracy of 0.770
#NeuralNet = 'InceptionV3' # ILSVRC image classification top-1 accuracy of 0.788
#NeuralNet = 'Xception' # ILSVRC image classification top-1 accuracy of 0.790
#NeuralNet = 'IncResV2' # ILSVRC image classification top-1 accuracy of 0.804
# making training data
image_list = []
label_list = []
for dir_name in os.listdir("data_folder/train"):
dir_train = "data_folder/train/" + dir_name
label = 0
if dir_name == "LAD":
label = 0
elif dir_name == "LCX":
label = 1
elif dir_name == "RCA":
label = 2
elif dir_name == "Norm":
label = 3
for file_name in os.listdir(dir_train):
label_list.append(label)
filepath = dir_train + "/" + file_name
if NeuralNet == 'Xception':
image = np.array(Image.open(filepath).resize((128, 128)))
else:
image = np.array(Image.open(filepath).resize((224, 224)))
image_list.append(image / 255)
image_list = np.array(image_list)
label_list = to_categorical(label_list)
#making neural network
if NeuralNet == 'VGG16':
print('NeuralNetwork: VGG16.\nILSVRC top-1 accuracy of 0.715')
DCNN = keras.applications.vgg16.VGG16(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'VGG19':
print('NeuralNetwork: VGG16.\nILSVRC top-1 accuracy of 0.727')
DCNN = keras.applications.vgg19.VGG19(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'ResNet50':
print('NeuralNetwork: ResNet50.\nILSVRC top-1 accuracy of 0.759')
DCNN = keras.applications.resnet50.ResNet50(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'DenseNet201':
print('NeuralNetwork: DenseNet201.\nILSVRC top-1 accuracy of 0.770')
DCNN = keras.applications.densenet.DenseNet201(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'InceptionV3':
print('NeuralNetwork: InceptionV3.\nILSVRC top-1 accuracy of 0.788')
DCNN = keras.applications.inception_v3.InceptionV3(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'Xception':
print('NeuralNetwork: Xception.\nILSVRC top-1 accuracy of 0.790')
DCNN = keras.applications.xception.Xception(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'IncResV2':
print('NeuralNetwork: Inception-ResNet-V2.\nILSVRC top-1 accuracy of 0.804')
DCNN = keras.applications.inception_resnet_v2.InceptionResNetV2(
include_top=True, input_tensor=None, pooling=None, classes=1000)
else:
print('error, no neural network.')
opt = Adam(lr = 0.0001)
model = Sequential()
model.add((DCNN))
model.add(Dropout(0.5))
model.add(Dense(4))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy",
optimizer=opt, metrics=["accuracy"])
#training
print('training')
model.fit(image_list, label_list,
epochs=100, batch_size=16, validation_split=0.2)
#saving post-trained model
prefix = datetime.now().strftime("%Y"+"_"+"%m%d"+"_"+"%H%M")
save_name = NeuralNet + '_' + prefix + '.h5'
model.save_weights(save_name)
print('saving post-trained model:', save_name)
print('finished training.')
print('finished: train_DCNN.py')
|
[
"keras.applications.xception.Xception",
"keras.applications.inception_resnet_v2.InceptionResNetV2",
"keras.layers.Activation",
"keras.layers.Dropout",
"keras.optimizers.Adam",
"PIL.Image.open",
"keras.applications.vgg19.VGG19",
"keras.applications.resnet50.ResNet50",
"keras.utils.np_utils.to_categorical",
"keras.layers.Dense",
"numpy.array",
"keras.applications.densenet.DenseNet201",
"keras.applications.inception_v3.InceptionV3",
"keras.applications.vgg16.VGG16",
"keras.models.Sequential",
"datetime.datetime.now",
"os.listdir"
] |
[((2248, 2279), 'os.listdir', 'os.listdir', (['"""data_folder/train"""'], {}), "('data_folder/train')\n", (2258, 2279), False, 'import os, keras\n'), ((2895, 2915), 'numpy.array', 'np.array', (['image_list'], {}), '(image_list)\n', (2903, 2915), True, 'import numpy as np\n'), ((2929, 2955), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['label_list'], {}), '(label_list)\n', (2943, 2955), False, 'from keras.utils.np_utils import to_categorical\n'), ((4609, 4624), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (4613, 4624), False, 'from keras.optimizers import Adam\n'), ((4636, 4648), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4646, 4648), False, 'from keras.models import Sequential\n'), ((2549, 2570), 'os.listdir', 'os.listdir', (['dir_train'], {}), '(dir_train)\n', (2559, 2570), False, 'import os, keras\n'), ((3083, 3183), 'keras.applications.vgg16.VGG16', 'keras.applications.vgg16.VGG16', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True, input_tensor=None, pooling\n =None, classes=1000)\n', (3113, 3183), False, 'import os, keras\n'), ((4677, 4689), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4684, 4689), False, 'from keras.layers import Activation, Dense, Dropout\n'), ((4701, 4709), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (4706, 4709), False, 'from keras.layers import Activation, Dense, Dropout\n'), ((4721, 4742), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (4731, 4742), False, 'from keras.layers import Activation, Dense, Dropout\n'), ((3293, 3393), 'keras.applications.vgg19.VGG19', 'keras.applications.vgg19.VGG19', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True, input_tensor=None, pooling\n =None, classes=1000)\n', (3323, 3393), False, 'import os, keras\n'), ((5001, 5015), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5013, 5015), False, 'from datetime import datetime\n'), ((3509, 3614), 'keras.applications.resnet50.ResNet50', 'keras.applications.resnet50.ResNet50', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True, input_tensor=None,\n pooling=None, classes=1000)\n', (3545, 3614), False, 'import os, keras\n'), ((3737, 3845), 'keras.applications.densenet.DenseNet201', 'keras.applications.densenet.DenseNet201', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True, input_tensor=None,\n pooling=None, classes=1000)\n', (3776, 3845), False, 'import os, keras\n'), ((3968, 4081), 'keras.applications.inception_v3.InceptionV3', 'keras.applications.inception_v3.InceptionV3', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True, input_tensor=\n None, pooling=None, classes=1000)\n', (4011, 4081), False, 'import os, keras\n'), ((2717, 2737), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (2727, 2737), False, 'from PIL import Image\n'), ((2801, 2821), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (2811, 2821), False, 'from PIL import Image\n'), ((4197, 4302), 'keras.applications.xception.Xception', 'keras.applications.xception.Xception', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True, input_tensor=None,\n pooling=None, classes=1000)\n', (4233, 4302), False, 'import os, keras\n'), ((4430, 4555), 'keras.applications.inception_resnet_v2.InceptionResNetV2', 'keras.applications.inception_resnet_v2.InceptionResNetV2', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True,\n input_tensor=None, pooling=None, classes=1000)\n', (4486, 4555), False, 'import os, keras\n')]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unittest for pex_loader'''
import os
import unittest
import re
import sys
import heron.common.src.python.pex_loader as pex_loader
import heron.common.tests.python.pex_loader.constants as constants
# pylint: disable=missing-docstring
class PexLoaderTest(unittest.TestCase):
def test_deps_regex(self):
# Testing egg_regex to find dependencies
pass_test_cases = [".deps/sample_egg.egg/",
".deps/sample_egg_1234.egg/",
".deps/sample_egg.egg.egg/",
".deps/sample_egg.whl/",
".deps/sample.egg.whl/"]
for test in pass_test_cases:
# should match without the trailing slash
self.assertEqual(re.match(pex_loader.egg_regex, test).group(1), test[:-1])
fail_test_cases = [".deps/sample_egg/",
".deps/sample_egg.egg", # no trailing slash
".deps/sample/egg.egg/", # contains slash
".deps/sample_ egg.egg/", # contains space
"deps/sample_egg.egg/", # not starting from .deps
"/.deps/sample_egg.egg/", # starting from slash
".deps/sample_whl/",
".deps/sample.egg.wh/",
".deps/sample.whl.egg"]
for test in fail_test_cases:
self.assertIsNone(re.match(pex_loader.egg_regex, test))
def test_load_pex(self):
# Testing load_pex without including deps (including deps requires an actual zip file)
test_path = ['sample.pex', 'sample_123.pex', '/tmp/path.pex']
for path in test_path:
pex_loader.load_pex(path, include_deps=False)
abs_path = os.path.abspath(path)
self.assertIn(os.path.dirname(abs_path), sys.path)
def test_sample(self):
path = self.get_path_of_sample(constants.SAMPLE_PEX)
print(path)
pex_loader.load_pex(path)
cls = pex_loader.import_and_get_class(path, constants.SAMPLE_PEX_CLASSPATH)
self.assertIsNotNone(cls)
self.assertEqual(cls.name, "sample class")
self.assertEqual(cls.age, 100)
@staticmethod
def get_path_of_sample(sample):
file_dir = "/".join(os.path.realpath(__file__).split('/')[:-1])
testdata_dir = os.path.join(file_dir, constants.TEST_DATA_PATH)
sample_pex_path = os.path.join(testdata_dir, sample)
return sample_pex_path
|
[
"os.path.abspath",
"os.path.dirname",
"os.path.realpath",
"re.match",
"heron.common.src.python.pex_loader.import_and_get_class",
"os.path.join",
"heron.common.src.python.pex_loader.load_pex"
] |
[((2496, 2521), 'heron.common.src.python.pex_loader.load_pex', 'pex_loader.load_pex', (['path'], {}), '(path)\n', (2515, 2521), True, 'import heron.common.src.python.pex_loader as pex_loader\n'), ((2532, 2601), 'heron.common.src.python.pex_loader.import_and_get_class', 'pex_loader.import_and_get_class', (['path', 'constants.SAMPLE_PEX_CLASSPATH'], {}), '(path, constants.SAMPLE_PEX_CLASSPATH)\n', (2563, 2601), True, 'import heron.common.src.python.pex_loader as pex_loader\n'), ((2852, 2900), 'os.path.join', 'os.path.join', (['file_dir', 'constants.TEST_DATA_PATH'], {}), '(file_dir, constants.TEST_DATA_PATH)\n', (2864, 2900), False, 'import os\n'), ((2923, 2957), 'os.path.join', 'os.path.join', (['testdata_dir', 'sample'], {}), '(testdata_dir, sample)\n', (2935, 2957), False, 'import os\n'), ((2251, 2296), 'heron.common.src.python.pex_loader.load_pex', 'pex_loader.load_pex', (['path'], {'include_deps': '(False)'}), '(path, include_deps=False)\n', (2270, 2296), True, 'import heron.common.src.python.pex_loader as pex_loader\n'), ((2314, 2335), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (2329, 2335), False, 'import os\n'), ((1995, 2031), 're.match', 're.match', (['pex_loader.egg_regex', 'test'], {}), '(pex_loader.egg_regex, test)\n', (2003, 2031), False, 'import re\n'), ((2356, 2381), 'os.path.dirname', 'os.path.dirname', (['abs_path'], {}), '(abs_path)\n', (2371, 2381), False, 'import os\n'), ((1345, 1381), 're.match', 're.match', (['pex_loader.egg_regex', 'test'], {}), '(pex_loader.egg_regex, test)\n', (1353, 1381), False, 'import re\n'), ((2789, 2815), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2805, 2815), False, 'import os\n')]
|
from cloudinary.models import CloudinaryField
from customers.models import Customer
from django.db import models
from django.utils.translation import gettext as _
from model.common_fields import BaseModel
from product.models import Product
class Review(BaseModel):
""" Review model for products """
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, null=True)
product = models.ForeignKey(Product, on_delete=models.SET_NULL, null=True)
class Star(models.IntegerChoices):
VS = 5, _('Very satisfied')
ST = 4, _('Satisfied')
NT = 3, _('Neutral')
US = 2, _('Unsatisfied')
VN = 1, _('Very unsatisfied')
star = models.PositiveSmallIntegerField(
_("stars"), choices=Star.choices, default=5)
reviewImage = CloudinaryField('image', null=True, blank=True)
feedback = models.TextField(
help_text="Please share your feedback about the product was the product as described? What is the quality like?",
)
riderReview = models.TextField(
help_text="How was your overall experience with our rider?",
null=True,
blank=True,
)
def __str__(self):
return f"Customer: {self.customer} - Product: {self.product} Rating: - {self.star}"
class Meta:
ordering = ('-star',)
verbose_name_plural = _("Customer feedback")
|
[
"django.db.models.ForeignKey",
"django.db.models.TextField",
"django.utils.translation.gettext",
"cloudinary.models.CloudinaryField"
] |
[((332, 397), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Customer'], {'on_delete': 'models.SET_NULL', 'null': '(True)'}), '(Customer, on_delete=models.SET_NULL, null=True)\n', (349, 397), False, 'from django.db import models\n'), ((413, 477), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Product'], {'on_delete': 'models.SET_NULL', 'null': '(True)'}), '(Product, on_delete=models.SET_NULL, null=True)\n', (430, 477), False, 'from django.db import models\n'), ((815, 862), 'cloudinary.models.CloudinaryField', 'CloudinaryField', (['"""image"""'], {'null': '(True)', 'blank': '(True)'}), "('image', null=True, blank=True)\n", (830, 862), False, 'from cloudinary.models import CloudinaryField\n'), ((881, 1021), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Please share your feedback about the product was the product as described? What is the quality like?"""'}), "(help_text=\n 'Please share your feedback about the product was the product as described? What is the quality like?'\n )\n", (897, 1021), False, 'from django.db import models\n'), ((1048, 1153), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""How was your overall experience with our rider?"""', 'null': '(True)', 'blank': '(True)'}), "(help_text=\n 'How was your overall experience with our rider?', null=True, blank=True)\n", (1064, 1153), False, 'from django.db import models\n'), ((749, 759), 'django.utils.translation.gettext', '_', (['"""stars"""'], {}), "('stars')\n", (750, 759), True, 'from django.utils.translation import gettext as _\n'), ((1384, 1406), 'django.utils.translation.gettext', '_', (['"""Customer feedback"""'], {}), "('Customer feedback')\n", (1385, 1406), True, 'from django.utils.translation import gettext as _\n'), ((537, 556), 'django.utils.translation.gettext', '_', (['"""Very satisfied"""'], {}), "('Very satisfied')\n", (538, 556), True, 'from django.utils.translation import gettext as _\n'), ((574, 588), 'django.utils.translation.gettext', '_', (['"""Satisfied"""'], {}), "('Satisfied')\n", (575, 588), True, 'from django.utils.translation import gettext as _\n'), ((606, 618), 'django.utils.translation.gettext', '_', (['"""Neutral"""'], {}), "('Neutral')\n", (607, 618), True, 'from django.utils.translation import gettext as _\n'), ((636, 652), 'django.utils.translation.gettext', '_', (['"""Unsatisfied"""'], {}), "('Unsatisfied')\n", (637, 652), True, 'from django.utils.translation import gettext as _\n'), ((670, 691), 'django.utils.translation.gettext', '_', (['"""Very unsatisfied"""'], {}), "('Very unsatisfied')\n", (671, 691), True, 'from django.utils.translation import gettext as _\n')]
|
# -*- coding: utf-8 -*-
from pollirio.modules import expose
from pollirio import choose_dest
from pollirio import conf
@expose('mass')
def mass(bot, ievent, msg=None):
if ievent.channel == conf.nickname:
return
modes = bot.userlist[ievent.channel.lower()][ievent.nick]
if '~' in modes or \
'&' in modes or \
'@' in modes or \
'%' in modes or \
ievent.nick == 'dapal':
users = sorted(bot.userlist[ievent.channel.lower()].keys())
bot.msg(ievent.channel, ' '.join(users))
if msg:
bot.msg(ievent.channel, '\x02%s\x0F' % msg)
@expose('call', 1)
def call(bot, ievent):
""" call <messaggio> """
if ievent.channel == conf.nickname:
return
args = ievent.msg.split(' ', 1)
mass(bot, ievent, msg=args[1])
|
[
"pollirio.modules.expose"
] |
[((122, 136), 'pollirio.modules.expose', 'expose', (['"""mass"""'], {}), "('mass')\n", (128, 136), False, 'from pollirio.modules import expose\n'), ((605, 622), 'pollirio.modules.expose', 'expose', (['"""call"""', '(1)'], {}), "('call', 1)\n", (611, 622), False, 'from pollirio.modules import expose\n')]
|
import hosts.fleaflicker as site
from hosts.db import overwrite_league
from pandas import DataFrame
import sqlite3
from utilities import DB_PATH
LEAGUE_ID = 34958
TEAM_ID = 217960
LEAGUE_NAME = "Family League"
HOST = 'fleaflicker'
SCORING = {'qb': 'pass6', 'skill': 'ppr', 'dst': 'high'}
# open up our database connection
conn = sqlite3.connect(DB_PATH)
# team list
teams = site.get_teams_in_league(LEAGUE_ID)
overwrite_league(teams, 'teams', conn, LEAGUE_ID)
# schedule info
schedule = site.get_league_schedule(LEAGUE_ID)
overwrite_league(schedule, 'schedule', conn, LEAGUE_ID)
# league info
league = DataFrame([{'league_id': LEAGUE_ID, 'team_id': TEAM_ID, 'host':
HOST.lower(), 'name': LEAGUE_NAME, 'qb_scoring':
SCORING['qb'], 'skill_scoring': SCORING['skill'],
'dst_scoring': SCORING['dst']}])
overwrite_league(league, 'league', conn, LEAGUE_ID)
|
[
"hosts.db.overwrite_league",
"sqlite3.connect",
"hosts.fleaflicker.get_league_schedule",
"hosts.fleaflicker.get_teams_in_league"
] |
[((332, 356), 'sqlite3.connect', 'sqlite3.connect', (['DB_PATH'], {}), '(DB_PATH)\n', (347, 356), False, 'import sqlite3\n'), ((378, 413), 'hosts.fleaflicker.get_teams_in_league', 'site.get_teams_in_league', (['LEAGUE_ID'], {}), '(LEAGUE_ID)\n', (402, 413), True, 'import hosts.fleaflicker as site\n'), ((414, 463), 'hosts.db.overwrite_league', 'overwrite_league', (['teams', '"""teams"""', 'conn', 'LEAGUE_ID'], {}), "(teams, 'teams', conn, LEAGUE_ID)\n", (430, 463), False, 'from hosts.db import overwrite_league\n'), ((492, 527), 'hosts.fleaflicker.get_league_schedule', 'site.get_league_schedule', (['LEAGUE_ID'], {}), '(LEAGUE_ID)\n', (516, 527), True, 'import hosts.fleaflicker as site\n'), ((528, 583), 'hosts.db.overwrite_league', 'overwrite_league', (['schedule', '"""schedule"""', 'conn', 'LEAGUE_ID'], {}), "(schedule, 'schedule', conn, LEAGUE_ID)\n", (544, 583), False, 'from hosts.db import overwrite_league\n'), ((867, 918), 'hosts.db.overwrite_league', 'overwrite_league', (['league', '"""league"""', 'conn', 'LEAGUE_ID'], {}), "(league, 'league', conn, LEAGUE_ID)\n", (883, 918), False, 'from hosts.db import overwrite_league\n')]
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("users", "0023_auto_20170820_1623")]
operations = [
migrations.AddField(
model_name="userprofile",
name="lang",
field=models.CharField(
blank=True,
max_length=128,
null=True,
help_text="Language",
choices=[(b"en", "English"), (b"fi", "Finnish"), (b"fr", "French")],
),
)
]
|
[
"django.db.models.CharField"
] |
[((297, 452), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(128)', 'null': '(True)', 'help_text': '"""Language"""', 'choices': "[(b'en', 'English'), (b'fi', 'Finnish'), (b'fr', 'French')]"}), "(blank=True, max_length=128, null=True, help_text=\n 'Language', choices=[(b'en', 'English'), (b'fi', 'Finnish'), (b'fr',\n 'French')])\n", (313, 452), False, 'from django.db import migrations, models\n')]
|
import os
import numpy as np
import torch
import torch.nn as nn
from .pu_net import PUNet
class SORDefense(nn.Module):
"""Statistical outlier removal as defense.
"""
def __init__(self, k=2, alpha=1.1):
"""SOR defense.
Args:
k (int, optional): kNN. Defaults to 2.
alpha (float, optional): \miu + \alpha * std. Defaults to 1.1.
"""
super(SORDefense, self).__init__()
self.k = k
self.alpha = alpha
def outlier_removal(self, x):
"""Removes large kNN distance points.
Args:
x (torch.FloatTensor): batch input pc, [B, K, 3]
Returns:
torch.FloatTensor: pc after outlier removal, [B, N, 3]
"""
pc = x.clone().detach().double()
B, K = pc.shape[:2]
pc = pc.transpose(2, 1) # [B, 3, K]
inner = -2. * torch.matmul(pc.transpose(2, 1), pc) # [B, K, K]
xx = torch.sum(pc ** 2, dim=1, keepdim=True) # [B, 1, K]
dist = xx + inner + xx.transpose(2, 1) # [B, K, K]
assert dist.min().item() >= -1e-6
# the min is self so we take top (k + 1)
neg_value, _ = (-dist).topk(k=self.k + 1, dim=-1) # [B, K, k + 1]
value = -(neg_value[..., 1:]) # [B, K, k]
value = torch.mean(value, dim=-1) # [B, K]
mean = torch.mean(value, dim=-1) # [B]
std = torch.std(value, dim=-1) # [B]
threshold = mean + self.alpha * std # [B]
bool_mask = (value <= threshold[:, None]) # [B, K]
sel_pc = [x[i][bool_mask[i]] for i in range(B)]
return sel_pc
def forward(self, x):
with torch.no_grad():
x = self.outlier_removal(x)
return x
class DUPNet(nn.Module):
def __init__(self, sor_k=2, sor_alpha=1.1,
npoint=1024, up_ratio=4):
super(DUPNet, self).__init__()
self.npoint = npoint
self.sor = SORDefense(k=sor_k, alpha=sor_alpha)
self.pu_net = PUNet(npoint=self.npoint, up_ratio=up_ratio,
use_normal=False, use_bn=False, use_res=False)
def process_data(self, pc, npoint=None):
"""Process point cloud data to be suitable for
PU-Net input.
We do two things:
sample npoint or duplicate to npoint.
Args:
pc (torch.FloatTensor): list input, [(N_i, 3)] from SOR.
Need to pad or trim to [B, self.npoint, 3].
"""
if npoint is None:
npoint = self.npoint
B = len(pc)
proc_pc = torch.zeros((B, npoint, 3)).float().cuda()
for pc_idx in range(B):
one_pc = pc[pc_idx]
# [N_i, 3]
N = len(one_pc)
if N > npoint:
# random sample some of them
idx = np.random.choice(N, npoint, replace=False)
idx = torch.from_numpy(idx).long().cuda()
one_pc = one_pc[idx]
elif N < npoint:
# just duplicate to the number
duplicated_pc = one_pc
num = npoint // N - 1
for i in range(num):
duplicated_pc = torch.cat([
duplicated_pc, one_pc
], dim=0)
num = npoint - len(duplicated_pc)
# random sample the remaining
idx = np.random.choice(N, num, replace=False)
idx = torch.from_numpy(idx).long().cuda()
one_pc = torch.cat([
duplicated_pc, one_pc[idx]
], dim=0)
proc_pc[pc_idx] = one_pc
return proc_pc
def forward(self, x):
# import pdb; pdb.set_trace()
with torch.no_grad():
x = self.sor(x) # a list of pc
x = self.process_data(x) # to batch input
x = self.pu_net(x) # [B, N * r, 3]
return x
|
[
"torch.mean",
"torch.cat",
"torch.std",
"numpy.random.choice",
"torch.zeros",
"torch.no_grad",
"torch.sum",
"torch.from_numpy"
] |
[((938, 977), 'torch.sum', 'torch.sum', (['(pc ** 2)'], {'dim': '(1)', 'keepdim': '(True)'}), '(pc ** 2, dim=1, keepdim=True)\n', (947, 977), False, 'import torch\n'), ((1284, 1309), 'torch.mean', 'torch.mean', (['value'], {'dim': '(-1)'}), '(value, dim=-1)\n', (1294, 1309), False, 'import torch\n'), ((1335, 1360), 'torch.mean', 'torch.mean', (['value'], {'dim': '(-1)'}), '(value, dim=-1)\n', (1345, 1360), False, 'import torch\n'), ((1382, 1406), 'torch.std', 'torch.std', (['value'], {'dim': '(-1)'}), '(value, dim=-1)\n', (1391, 1406), False, 'import torch\n'), ((1643, 1658), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1656, 1658), False, 'import torch\n'), ((3726, 3741), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3739, 3741), False, 'import torch\n'), ((2810, 2852), 'numpy.random.choice', 'np.random.choice', (['N', 'npoint'], {'replace': '(False)'}), '(N, npoint, replace=False)\n', (2826, 2852), True, 'import numpy as np\n'), ((3380, 3419), 'numpy.random.choice', 'np.random.choice', (['N', 'num'], {'replace': '(False)'}), '(N, num, replace=False)\n', (3396, 3419), True, 'import numpy as np\n'), ((3503, 3549), 'torch.cat', 'torch.cat', (['[duplicated_pc, one_pc[idx]]'], {'dim': '(0)'}), '([duplicated_pc, one_pc[idx]], dim=0)\n', (3512, 3549), False, 'import torch\n'), ((2558, 2585), 'torch.zeros', 'torch.zeros', (['(B, npoint, 3)'], {}), '((B, npoint, 3))\n', (2569, 2585), False, 'import torch\n'), ((3174, 3215), 'torch.cat', 'torch.cat', (['[duplicated_pc, one_pc]'], {'dim': '(0)'}), '([duplicated_pc, one_pc], dim=0)\n', (3183, 3215), False, 'import torch\n'), ((2875, 2896), 'torch.from_numpy', 'torch.from_numpy', (['idx'], {}), '(idx)\n', (2891, 2896), False, 'import torch\n'), ((3442, 3463), 'torch.from_numpy', 'torch.from_numpy', (['idx'], {}), '(idx)\n', (3458, 3463), False, 'import torch\n')]
|
# Faça um programa que jogue par ou ímpar com o computador. O jogo só será interrompido quando o jogador perder,
# mostrando o total de vitórias consecutivas que ele conquistou no final do jogo.
print(20 * '\033[1;33m=*', 'JOGO PAR & ÍMPAR', '=*' * 20)
print('\033[1;34m~~' * 20, 'MANUAL DE INSTRUÇÕES DO GAME:', '~~' * 200,
'\n1º) Impar e Par, aqui, são representados como: \033[1;4;31mI; \033[1;4;33mP \033[1;34m'
'\n2º) NO EMPATE TODOS GANHAM +0 pts'
'\n3º) TENHA UM BOM JOGO E DIVIRTA-SE\033[m')
print('\033[1;34m~~' * 200)
# Importações Necessárias
from random import choice, randint
from emoji import emojize
# Variávies de controle de 'while', contabilizadoras e aleatorização.
vitoria = False
vitoria_pc = False
usuario = pc = par_impar_pc = impar = par = 0
escolha_pc = ['P', 'I'] # PAR E IMPAR
# Enquanto a vitória do computador for falsa, rodaremos o loop.
while vitoria_pc == False:
# Definindo a função do usuário e suas escolhas
usuario = int(input('\033[1;31;40mESCOLHA UM NÚMERO INTEIRO: '))
par_impar_user = str(input('\033[1;34;40mAPOSTA EM QUAL?[PAR VS ÍMPAR]:\nR=')).upper()[0]
# Definindo a função e randomização do PC
pc = randint(1, 1000) # Possíveis escolhas do PC
par_impar_pc = choice(escolha_pc)
# Verificando vencedor VS perdedor
# 1ª hipótese: EMPATE. PC = USER.
if par_impar_user == par_impar_pc:
print(f'\033[1;4;31mO Usuário escolheu: {par_impar_user} e O PC escolheu: {par_impar_pc}')
# Se fora empate, os dois recebem 0 pt.
vitoria += 0
vitoria_pc += 0
# PLACAR ATUAL DE EMPATE
print('\033[1;30;46m')
print('~' * 200)
print(f'| PLACAR ATUAL |\n'
f'\n Vitórias do usuário: {vitoria} \n'
f'\n VS \n'
f'\n Vitórias do PC: {vitoria_pc} \n\033[m')
print('~' * 200)
# Passando dessa verificação
# (2ª hipótese: JOGO NORMAL)
else:
# Somaremos o número escolhido pelo usuário pelo do pc, para sabermos se é: PAR ou ÍMPAR.
valor_final = usuario + pc
# Definindo Números Ímpares
if valor_final % 2 != 0: # Número Ímpar é definido por seu quosciente não ser exato na divisão por 2.
impar += 1 # Números ímpares sendo contabilizados(Qualifica se é ímpar, nessa contagem)
# Classificando quem igualou-se, no palpite, ao Ímpar(PC OU USER).
# Quando for o user, o programa parabeniza-o e continua nesse loop infinito.
if par_impar_user == 'I' and impar != 0 and par_impar_pc == 'P':
print(f'| USUÁRIO: {usuario} VS PC: {pc} = {valor_final} = ÍMPAR|')
print(emojize('PARABÉNS, VOCÊ VENCEU, ESCOLHEU ÍMPAR!!!:sweat_smile:\nDA PRÓXIMA TE GANHAREI!!',
use_aliases=True))
vitoria += 1 # contabilizando as quantidades de vitórias do user
# PLACAR DA VITÓRIA DO USER
print('\033[1;30;46m')
print('~' * 200)
print(f'| PLACAR ATUAL |\n'
f'\n Vitórias do usuário: {vitoria} \n'
f'\n VS \n'
f'\n Vitórias do PC: {vitoria_pc} \n\033[m')
print('~' * 200)
# Quando for o PC, o programa, como pedido, parará. (break)
elif par_impar_pc == 'I' and impar != 0 and par_impar_user == 'P':
print(f'| USUÁRIO: {usuario} VS PC: {pc} = {valor_final} = ÍMPAR|')
print(emojize('\nHAHAHAHAHAHA VENCI. ESCOLHI ÍMPAR!!:joy:', use_aliases=True))
vitoria_pc += 1 # contabilizando as quantidades de vitórias do pc
break
# Definindo Números Pares
if valor_final % 2 == 0:
par += 1 # Números Pares sendo contabilizados(Qualifica se é Par, nessa contagem)
# Classificando quem igualou-se, no palpite, ao valor Par(PC OU USER).
# Quando for o user, o programa parabeniza-o e continua nesse loop infinito.
if par_impar_user == 'P' and par != 0 and par_impar_user == 'I':
print(f'| USUÁRIO: {usuario} VS PC: {pc} = {valor_final} = PAR|')
print(emojize('PARABÉNS, VOCÊ GANHOU, ESCOLHEU PAR!!:sweat_smile:\nDA PRÓXIMA TE GANHAREI!!',
use_aliases=True))
vitoria += 1 # contabilizando as quantidades de vitórias do usuário
# PLACAR DA VITÓRIA DO USER
print('\033[1;30;46m')
print('~' * 200)
print(f'| PLACAR ATUAL |\n'
f'\n Vitórias do usuário: {vitoria} \n'
f'\n VS \n'
f'\n Vitórias do PC: {vitoria_pc} \n\033[m')
print('~' * 200)
# Quando for o PC, o programa, como pedido, parará. (break)
elif par_impar_pc == 'P' and par != 0 and par_impar_user == 'I':
print(f'| USUÁRIO: {usuario} VS PC: {pc} = {valor_final} = PAR|')
print(emojize('HAHAHA VENCI, ESCOLHI PAR!!:joy:', use_aliases=True))
vitoria_pc += 1 # contabilizando as quantidades de vitórias do pc
break
'''# PLACAR FINAL( USUÁRIO VS PC )
print('\033[1;30;46m')
print('~' * 200)
print(f'| PLACAR FINAL |\n'
f'\n Vitórias do usuário: {vitoria} \n'
f'\n VS \n'
f'\n Vitórias do PC: {vitoria_pc} \n\033[m')
print('~' * 200)
# Outra solução própria
# Faça um programa para jogar par ou ímpar com o computador. O jogo só será interrompido quando o jogador perder,
# mostrando o total de vitórias consecutivas que ele conquistou no final do jogo.
# Adaptation: The game will stop when user request
from random import randint, choice
from emoji import emojize
victories_user = 0
victories_pc = 0
print('\033[1;30;41m=*'*20, 'BEM VINDOS AO' ' \033[1;30;46mPAIR OR OPP´s GAME\033[1;30;41m', '=*'*20)
while True:
proceed = str(input('Deseja prosseguir com o jogo? [S/N]')).upper()[0]
if proceed == 'N':
print('\033[1;33;40mJOGO FINALIZADO COM SUCESSO')
break
else:
# USER
user = int(input('\033[1;34;40mEscolha um valor de 0 a 10: '))
while user > 10:
print(emojize('\033[1;40;33mOps...Excedeu o valor proposto!!!\nTente Novamente:thumbs_up:'))
user = int(input('\033[1;34;40mEscolha um valor de 0 a 10: \033[m '))
user_choice = str(input('PAR OU IMPAR?[P/I]:')).upper()[0]
print(f'Você escolheu o número {user} e apostou no {user_choice}' if user_choice == 'PAR'
else f'Você escolheu o número {user} e apostou no {user_choice}MPAR \033[m')
# PC
random_pc_numbers = randint(0, 10)
pc_pair_opp = ['PAR', 'IMPAR']
random_pair_opp = choice(pc_pair_opp)
print(f'\033[1;33mO PC escolheu o número {random_pc_numbers} e apostou no {random_pair_opp} \033[m')
# Final Number's Winner
winner = random_pc_numbers + user
# Final Validation
#1) Winner
# 1º case : Sum pair
if winner % 2 == 0:
if user_choice == 'P' and random_pair_opp == 'IMPAR':
print(f'\033[1;30;42m PARABÉNS!! VOCÊ APOSTOU NO PAR E DEU {winner}')
victories_user += 1
elif user_choice == 'I' and random_pair_opp == 'PAR':
print(f'\033[1;30;42mOPS...VOCÊ APOSTOU IMPAR E PERDEU!!!\n O PC APOSTOU PAR E DEU JUSTAMENTE {winner}')
victories_pc += 1
else:
print(f'\033[1;30;42mCOINCIDÊNCIA OU NÃO...HOUVE EMPATE!!!\n VOCÊ APOSTOU {user_choice}AR E O PC TAMBÉM {random_pair_opp} '
f'E DEU JUSTAMENTE {winner}'if user_choice == 'P'
else f'COINCIDÊNCIA OU NÃO...HOUVE EMPATE!!!\n VOCÊ APOSTOU NO {user_choice}MPAR E O PC TAMBÉM APOSTOU NO {random_pair_opp}'
f'E DEU JUSTAMENTE {winner}')
victories_user += 1
victories_pc += 1
# 2 º case : Sum opp
if winner % 2 != 0:
if user_choice == 'I' and random_pair_opp == 'PAR':
print(f'\033[1;30;42mPARABÉNS!! VOCÊ APOSTOU NO IMPAR E DEU {winner}')
victories_user += 1
elif user_choice == 'P' and random_pair_opp == 'IMPAR':
print(f'\033[1;30;42mOPS...VOCÊ APOSTOU PAR E PERDEU!!\n O PC APOSTOU IMPAR E DEU {winner}')
victories_pc += 1
else:
print(f'\033[1;30;42mCOINCIDÊNCIA OU NÃO...HOUVE EMPATE!!!\n VOCÊ APOSTOU {user_choice}AR E O PC TAMBÉM APOSTOU NO {random_pair_opp} '
f' E DEU JUSTAMENTE {winner}' if user_choice == 'P'
else f'COINCIDÊNCIA OU NÃO...HOUVE EMPATE!!!\n VOCÊ APOSTOU {user_choice}MPAR E O PC TAMBÉM APOSTOU NO {random_pair_opp}'
f' E DEU JUSTAMENTE {winner}')
victories_user += 1
victories_pc += 1
# Final Score
print('=*'*15, f'PLACAR FINAL', '=*'*15)
print(f'\033[1;36;40m\n\nVOCÊ\033[m : {victories_user} \n\033[1;33;40m VS \n\033[1;35;40mPC\033[m : {victories_pc}')
print('\033[1;33;40m=*'*37)
# Score Validation
if victories_user > victories_pc:
print('FECHAMOS O PROGRAMA COM VOCÊ SENDO O VENCEDOR!!!\n Parabéns e volte sempre')
elif victories_pc == victories_user:
print('FECHAMOS O PROGRAMA COM EMPATE!!\nACEITAMOS REVANCHE!!')
else:
print('FECHAMOS O PROGRAMA COM A VITÓRIA DA MÁQUINA!!\n ACEITAMOS REVANCHE!!')
'''''
|
[
"random.choice",
"emoji.emojize",
"random.randint"
] |
[((1192, 1208), 'random.randint', 'randint', (['(1)', '(1000)'], {}), '(1, 1000)\n', (1199, 1208), False, 'from random import choice, randint\n'), ((1256, 1274), 'random.choice', 'choice', (['escolha_pc'], {}), '(escolha_pc)\n', (1262, 1274), False, 'from random import choice, randint\n'), ((2739, 2860), 'emoji.emojize', 'emojize', (['"""PARABÉNS, VOCÊ VENCEU, ESCOLHEU ÍMPAR!!!:sweat_smile:\nDA PRÓXIMA TE GANHAREI!!"""'], {'use_aliases': '(True)'}), '(\n """PARABÉNS, VOCÊ VENCEU, ESCOLHEU ÍMPAR!!!:sweat_smile:\nDA PRÓXIMA TE GANHAREI!!"""\n , use_aliases=True)\n', (2746, 2860), False, 'from emoji import emojize\n'), ((4331, 4449), 'emoji.emojize', 'emojize', (['"""PARABÉNS, VOCÊ GANHOU, ESCOLHEU PAR!!:sweat_smile:\nDA PRÓXIMA TE GANHAREI!!"""'], {'use_aliases': '(True)'}), '(\n """PARABÉNS, VOCÊ GANHOU, ESCOLHEU PAR!!:sweat_smile:\nDA PRÓXIMA TE GANHAREI!!"""\n , use_aliases=True)\n', (4338, 4449), False, 'from emoji import emojize\n'), ((3630, 3704), 'emoji.emojize', 'emojize', (['"""\nHAHAHAHAHAHA VENCI. ESCOLHI ÍMPAR!!:joy:"""'], {'use_aliases': '(True)'}), '("""\nHAHAHAHAHAHA VENCI. ESCOLHI ÍMPAR!!:joy:""", use_aliases=True)\n', (3637, 3704), False, 'from emoji import emojize\n'), ((5217, 5278), 'emoji.emojize', 'emojize', (['"""HAHAHA VENCI, ESCOLHI PAR!!:joy:"""'], {'use_aliases': '(True)'}), "('HAHAHA VENCI, ESCOLHI PAR!!:joy:', use_aliases=True)\n", (5224, 5278), False, 'from emoji import emojize\n')]
|
from soundcard import pulseaudio as sc
import json
l = []
for mic in sc.all_microphones(exclude_monitors=False):
l.append({'id': mic.id, 'name': mic.name})
s = json.dumps(l)
print(s)
|
[
"soundcard.pulseaudio.all_microphones",
"json.dumps"
] |
[((70, 112), 'soundcard.pulseaudio.all_microphones', 'sc.all_microphones', ([], {'exclude_monitors': '(False)'}), '(exclude_monitors=False)\n', (88, 112), True, 'from soundcard import pulseaudio as sc\n'), ((165, 178), 'json.dumps', 'json.dumps', (['l'], {}), '(l)\n', (175, 178), False, 'import json\n')]
|
import os
import json
import logging
from services.security import Security
from services.agent import AgentFactory
from controllers.controller import routes, Controller
from rasa.core.channels.channel import UserMessage
logger = logging.getLogger(__name__)
class GoogleController(Controller):
def __init__(self, security: Security):
super().__init__()
self.security=security
@routes.get('/')
async def hello(self, request):
text = request.rel_url.query['text']
response=await AgentFactory.load().handle_text(text)
logger.info(json.dumps(response))
return self.json(response)
@routes.post("/google_action")
async def google_webhook(self, request):
req = await request.json()
print(json.dumps(req))
authorization= request.headers['Google-Assistant-Signature']
print(authorization)
print(request.headers)
#self.security.verify_token(authorization)
session_id = req['session'].get('id', None)
locale = req['user']['locale']
lang = locale[:2]
if req['intent']['name'] == 'actions.intent.MAIN':
response_text=os.environ['WELCOME_TEXT']
else:
text = req['intent']['query']
user_message=UserMessage(text=text, sender_id=session_id)
response=await AgentFactory.load().handle_message(user_message)
logger.info(json.dumps(response))
response_text=response[0]['text']
resp={
"session": {
"id": "example_session_id",
"params": {}
},
"prompt": {
"override": False,
"firstSimple": {
"speech": response_text,
"text": response_text
}
},
"scene": {
"name": "Main",
"slots": {},
"next": {
"name": "Main"
}
}
}
return self.json(resp)
#
#
# @routes.post('/google_action')
# def google_action(item: dict, authorization: str = Header(None)):
# print(item, file=sys.stderr)
# print(authorization, file=sys.stderr)
# context = ConversationContext()
# context.request = item
# context: ConversationContext = handler_manager.invoke(context)
# print(context.response, file=sys.stderr)
# return json.loads(context.response)
|
[
"rasa.core.channels.channel.UserMessage",
"json.dumps",
"controllers.controller.routes.get",
"controllers.controller.routes.post",
"services.agent.AgentFactory.load",
"logging.getLogger"
] |
[((231, 258), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (248, 258), False, 'import logging\n'), ((404, 419), 'controllers.controller.routes.get', 'routes.get', (['"""/"""'], {}), "('/')\n", (414, 419), False, 'from controllers.controller import routes, Controller\n'), ((645, 674), 'controllers.controller.routes.post', 'routes.post', (['"""/google_action"""'], {}), "('/google_action')\n", (656, 674), False, 'from controllers.controller import routes, Controller\n'), ((582, 602), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (592, 602), False, 'import json\n'), ((769, 784), 'json.dumps', 'json.dumps', (['req'], {}), '(req)\n', (779, 784), False, 'import json\n'), ((1278, 1322), 'rasa.core.channels.channel.UserMessage', 'UserMessage', ([], {'text': 'text', 'sender_id': 'session_id'}), '(text=text, sender_id=session_id)\n', (1289, 1322), False, 'from rasa.core.channels.channel import UserMessage\n'), ((1423, 1443), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (1433, 1443), False, 'import json\n'), ((524, 543), 'services.agent.AgentFactory.load', 'AgentFactory.load', ([], {}), '()\n', (541, 543), False, 'from services.agent import AgentFactory\n'), ((1350, 1369), 'services.agent.AgentFactory.load', 'AgentFactory.load', ([], {}), '()\n', (1367, 1369), False, 'from services.agent import AgentFactory\n')]
|
## @ingroup Methods-Weights-Correlations-FLOPS
# operating_items.py
#
# Created: May 2020, <NAME>
# Modified:
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Units, Data
import numpy as np
## @ingroup Methods-Weights-Correlations-FLOPS
def operating_items_FLOPS(vehicle):
""" Calculate the weight of operating items, including:
- crew
- baggage
- unusable fuel
- engine oil
- passenger service
- ammunition and non-fixed weapons
- cargo containers
Assumptions:
If no tanks are specified, 5 fuel tanks are assumed (includes main and auxiliary tanks)
If the number of coach seats is not defined, then it assumed that 5% of
of the seats are first class and an additional 10 % are business class.
If the number of coach seats is defined, then the additional seats are 1/4 first class
and 3/4 business class
Source:
The Flight Optimization System Weight Estimation Method
Inputs:
vehicle - data dictionary with vehicle properties [dimensionless]
-.networks: data dictionary containing all propulsion properties
-.number_of_engines: number of engines
-.sealevel_static_thrust: thrust at sea level [N]
-.reference_area: wing surface area [m^2]
-.mass_properties.max_takeoff: MTOW [kilograms]
-.passengers: number of passengers in aircraft
-.design_mach_number: design mach number for cruise flight
-.design_range: design range of aircraft [nmi]
-.mass_properties.cargo: weight of cargo carried [kilograms]
Outputs:
output - data dictionary with weights [kilograms]
- output.oper_items: unusable fuel, engine oil, passenger service weight and cargo containers
- output.flight_crew: flight crew weight
- output.flight_attendants: flight attendants weight
- output.total: total operating items weight
Properties Used:
N/A
"""
network_name = list(vehicle.networks.keys())[0]
networks = vehicle.networks[network_name]
NENG = networks.number_of_engines
THRUST = networks.sealevel_static_thrust * 1 / Units.lbf
SW = vehicle.reference_area / Units.ft ** 2
FMXTOT = vehicle.mass_properties.max_zero_fuel / Units.lbs
DESRNG = vehicle.design_range / Units.nmi
VMAX = vehicle.design_mach_number
NTANK = 0 # number of fuel tanks
for wing in vehicle.wings:
NTANK += len(wing.Fuel_Tanks)
for fuselage in vehicle.fuselages:
NTANK += len(fuselage.Fuel_Tanks)
if NTANK == 0:
NTANK = 5
WUF = 11.5 * NENG * THRUST ** 0.2 + 0.07 * SW + 1.6 * NTANK * FMXTOT ** 0.28 # unusable fuel weight
WOIL = 0.082 * NENG * THRUST ** 0.65 # engine oil weight
if hasattr(vehicle.fuselages['fuselage'], 'number_coach_seats'):
NPT = vehicle.fuselages['fuselage'].number_coach_seats # number of economy passengers
NPF = (vehicle.passengers - NPT) / 4. # number of first clss passengers
NPB = vehicle.passengers - NPF - NPT # number of bussines passengers
else:
NPF = vehicle.passengers / 20.
NPB = vehicle.passengers / 10.
NPT = vehicle.passengers - NPF - NPB
vehicle.NPF = NPF
vehicle.NPB = NPB
vehicle.NPT = NPT
WSRV = (5.164 * NPF + 3.846 * NPB + 2.529 * NPT) * (DESRNG / VMAX) ** 0.255 # passenger service weight
WCON = 175 * np.ceil(vehicle.mass_properties.cargo / Units.lbs * 1. / 950) # cargo container weight
if vehicle.passengers >= 150:
NFLCR = 3 # number of flight crew
NGALC = 1 + np.floor(vehicle.passengers / 250.) # number of galley crew
else:
NFLCR = 2
NGALC = 0
if vehicle.passengers < 51:
NFLA = 1 # number of flight attendants, NSTU in FLOPS
else:
NFLA = 1 + np.floor(vehicle.passengers / 40.)
WFLAAB = NFLA * 155 + NGALC * 200 # flight attendant weight, WSTUAB in FLOPS
WFLCRB = NFLCR * 225 # flight crew and baggage weight
output = Data()
output.operating_items_less_crew = WUF * Units.lbs + WOIL * Units.lbs + WSRV * Units.lbs + WCON * Units.lbs
output.flight_crew = WFLCRB * Units.lbs
output.flight_attendants = WFLAAB * Units.lbs
output.total = output.operating_items_less_crew + output.flight_crew + \
output.flight_attendants
return output
|
[
"numpy.floor",
"numpy.ceil",
"SUAVE.Core.Data"
] |
[((4650, 4656), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (4654, 4656), False, 'from SUAVE.Core import Units, Data\n'), ((4016, 4078), 'numpy.ceil', 'np.ceil', (['(vehicle.mass_properties.cargo / Units.lbs * 1.0 / 950)'], {}), '(vehicle.mass_properties.cargo / Units.lbs * 1.0 / 950)\n', (4023, 4078), True, 'import numpy as np\n'), ((4202, 4238), 'numpy.floor', 'np.floor', (['(vehicle.passengers / 250.0)'], {}), '(vehicle.passengers / 250.0)\n', (4210, 4238), True, 'import numpy as np\n'), ((4433, 4468), 'numpy.floor', 'np.floor', (['(vehicle.passengers / 40.0)'], {}), '(vehicle.passengers / 40.0)\n', (4441, 4468), True, 'import numpy as np\n')]
|
import logging
import json
from flask import request
from data.database import Repository, User
from data import model
from decorators import task_resources
logger = logging.getLogger(__name__)
@task_resources
def process(resources):
response = []
changed = True
for resource in resources:
p_namespace = resource["namespace"]
p_region = resource["region"]
p_state = resource["state"]
p_description = resource["description"]
user = model.user.get_user(p_user)
if user is None:
return {"failed": True, "msg": "User '%s' does not exist" % (p_user)}, 400
return {"failed": False, "changed": changed, "meta": response}, 200
|
[
"data.model.user.get_user",
"logging.getLogger"
] |
[((169, 196), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (186, 196), False, 'import logging\n'), ((490, 517), 'data.model.user.get_user', 'model.user.get_user', (['p_user'], {}), '(p_user)\n', (509, 517), False, 'from data import model\n')]
|
from django.conf.urls import url
from . import views
app_name = 'project-admin'
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^consent-records/?$',
views.consent_records, name='consent-records'),
url(r'^config-general-settings/?$',
views.config_general_settings, name='config-general-settings'),
url(r'^config-oh-settings/?$',
views.config_oh_settings, name='config-oh-settings'),
url(r'^config-file-settings/?$',
views.config_file_settings, name='config-file-settings'),
url(r'^config-homepage-text/?$',
views.config_homepage_text, name='config-homepage-text'),
url(r'^login/?$', views.admin_login, name='login'),
url(r'^add-file/?$', views.add_file, name='add-file'),
url(r'^delete-file/(?P<file_id>\w+)/?$', views.delete_file,
name='delete-file'),
]
|
[
"django.conf.urls.url"
] |
[((103, 137), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.home'], {'name': '"""home"""'}), "('^$', views.home, name='home')\n", (106, 137), False, 'from django.conf.urls import url\n'), ((144, 217), 'django.conf.urls.url', 'url', (['"""^consent-records/?$"""', 'views.consent_records'], {'name': '"""consent-records"""'}), "('^consent-records/?$', views.consent_records, name='consent-records')\n", (147, 217), False, 'from django.conf.urls import url\n'), ((232, 334), 'django.conf.urls.url', 'url', (['"""^config-general-settings/?$"""', 'views.config_general_settings'], {'name': '"""config-general-settings"""'}), "('^config-general-settings/?$', views.config_general_settings, name=\n 'config-general-settings')\n", (235, 334), False, 'from django.conf.urls import url\n'), ((344, 431), 'django.conf.urls.url', 'url', (['"""^config-oh-settings/?$"""', 'views.config_oh_settings'], {'name': '"""config-oh-settings"""'}), "('^config-oh-settings/?$', views.config_oh_settings, name=\n 'config-oh-settings')\n", (347, 431), False, 'from django.conf.urls import url\n'), ((441, 534), 'django.conf.urls.url', 'url', (['"""^config-file-settings/?$"""', 'views.config_file_settings'], {'name': '"""config-file-settings"""'}), "('^config-file-settings/?$', views.config_file_settings, name=\n 'config-file-settings')\n", (444, 534), False, 'from django.conf.urls import url\n'), ((544, 637), 'django.conf.urls.url', 'url', (['"""^config-homepage-text/?$"""', 'views.config_homepage_text'], {'name': '"""config-homepage-text"""'}), "('^config-homepage-text/?$', views.config_homepage_text, name=\n 'config-homepage-text')\n", (547, 637), False, 'from django.conf.urls import url\n'), ((647, 696), 'django.conf.urls.url', 'url', (['"""^login/?$"""', 'views.admin_login'], {'name': '"""login"""'}), "('^login/?$', views.admin_login, name='login')\n", (650, 696), False, 'from django.conf.urls import url\n'), ((703, 755), 'django.conf.urls.url', 'url', (['"""^add-file/?$"""', 'views.add_file'], {'name': '"""add-file"""'}), "('^add-file/?$', views.add_file, name='add-file')\n", (706, 755), False, 'from django.conf.urls import url\n'), ((762, 841), 'django.conf.urls.url', 'url', (['"""^delete-file/(?P<file_id>\\\\w+)/?$"""', 'views.delete_file'], {'name': '"""delete-file"""'}), "('^delete-file/(?P<file_id>\\\\w+)/?$', views.delete_file, name='delete-file')\n", (765, 841), False, 'from django.conf.urls import url\n')]
|
import sys
import source as src
import database as db
def main(argv):
config_filename = argv[1]
config = src.read_config(config_filename)
websocket = src.authentication(config['exchange'])
try:
src.run(websocket,config)
except Exception as e:
print("Bot error: {}".format(e))
finally:
src.terminate(websocket)
if __name__ == '__main__':
if len(sys.argv) > 1:
db.create_tables()
main(sys.argv)
else:
print("Try to use: python georgebot.py <configuration_file.yaml>")
|
[
"source.authentication",
"source.terminate",
"source.run",
"database.create_tables",
"source.read_config"
] |
[((108, 140), 'source.read_config', 'src.read_config', (['config_filename'], {}), '(config_filename)\n', (123, 140), True, 'import source as src\n'), ((154, 192), 'source.authentication', 'src.authentication', (["config['exchange']"], {}), "(config['exchange'])\n", (172, 192), True, 'import source as src\n'), ((202, 228), 'source.run', 'src.run', (['websocket', 'config'], {}), '(websocket, config)\n', (209, 228), True, 'import source as src\n'), ((299, 323), 'source.terminate', 'src.terminate', (['websocket'], {}), '(websocket)\n', (312, 323), True, 'import source as src\n'), ((377, 395), 'database.create_tables', 'db.create_tables', ([], {}), '()\n', (393, 395), True, 'import database as db\n')]
|
#######################
##! 1. Importing modules
#######################
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.providers.google.cloud.operators.bigquery import BigQueryCheckOperator, BigQueryExecuteQueryOperator
from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator
from airflow.operators.python import PythonOperator
import requests
import json
#######################
##! 2. Default arguments
#######################
default_args = {
'owner': 'jdpinedaj',
'depends_on_past': False,
'email': ['<EMAIL>'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 2,
'retry_delay': timedelta(minutes=2),
}
# It is possible to store all those variables as "Variables" within airflow
URL_AIRLINES = "https://media.githubusercontent.com/media/dpinedaj/airflights-kaggle-data/master/airlines.csv"
URL_AIRPORTS = "https://media.githubusercontent.com/media/dpinedaj/airflights-kaggle-data/master/airports.csv"
URL_FLIGHTS = "https://media.githubusercontent.com/media/dpinedaj/airflights-kaggle-data/master/flights.csv"
GCP_CONNECTION_ID = 'google_cloud_default'
PROJECT_ID = 'aa-study'
GCP_LOCATION = 'us-central1'
MY_DATASET = 'sandbox_jpineda'
GS_BUCKET = 'jpineda'
SCHEDULE_INTERVAL = '@once'
URL_CF1 = 'https://us-central1-aa-study.cloudfunctions.net/get-data-upload-to-gcs'
URL_CF2 = 'https://us-central1-aa-study.cloudfunctions.net/raw-schema-processed'
# Additional variables
date = datetime.now().strftime("%Y_%m_%d")
# Functions
def get_data_upload_to_gcs(bucket_name, source_url, destiny_path):
url = URL_CF1
values = {
'bucket_name': bucket_name,
'source_url': source_url,
'destiny_path': destiny_path
}
response = requests.post(url, json=values)
if response.status_code != 200:
raise Exception(
'Bad response from application: {!r} / {!r} / {!r}'.format(
response.status_code, response.headers, response.text))
else:
return response.text
def raw_schema_processed(bucket_name, origin_path, destiny_path, schema_name):
url = URL_CF2,
values = {
'bucket_name': bucket_name,
'origin_path': origin_path,
'destiny_path': destiny_path,
'schema_name': schema_name
}
response = requests.post(url[0], json=values)
if response.status_code != 200:
raise Exception(
'Bad response from application: {!r} / {!r} / {!r}'.format(
response.status_code, response.headers, response.text))
else:
return response.text
#######################
##! 3. Instantiate a DAG
#######################
dag = DAG(dag_id='PoC_Juan_Pineda_DAG_vf',
description='PoC de Juan Pineda',
start_date=datetime.now(),
schedule_interval=SCHEDULE_INTERVAL,
concurrency=5,
max_active_runs=1,
default_args=default_args)
#######################
##! 4. Tasks
#######################
#? 4.1. Starting pipeline
start_pipeline = DummyOperator(task_id='start_pipeline', dag=dag)
#? 4.2. Download data from kaggle in parquet, and upload it into gcs using CLOUD FUNCTIONS
download_airlines_data = PythonOperator(task_id='download_airlines_data',
python_callable=get_data_upload_to_gcs,
op_kwargs={
"bucket_name":
GS_BUCKET,
"source_url":
URL_AIRLINES,
"destiny_path":
f"raw/{date}_airlines.parquet"
},
dag=dag)
download_airports_data = PythonOperator(task_id='download_airports_data',
python_callable=get_data_upload_to_gcs,
op_kwargs={
"bucket_name":
GS_BUCKET,
"source_url":
URL_AIRPORTS,
"destiny_path":
f"raw/{date}_airports.parquet"
},
dag=dag)
download_flights_data = PythonOperator(task_id='download_flights_data',
python_callable=get_data_upload_to_gcs,
op_kwargs={
"bucket_name":
GS_BUCKET,
"source_url":
URL_FLIGHTS,
"destiny_path":
f"raw/{date}_flights.parquet"
},
dag=dag)
#? 4.3. Change schema to raw_data and load it again in processed_data
processing_airlines_data = PythonOperator(
task_id='processing_airlines_data',
python_callable=raw_schema_processed,
op_kwargs={
"bucket_name": GS_BUCKET,
"origin_path": f"raw/{date}_airlines.parquet",
"destiny_path": f"processed/{date}_airlines.parquet",
"schema_name": "airlines_schema.json"
},
dag=dag)
processing_airports_data = PythonOperator(
task_id='processing_airports_data',
python_callable=raw_schema_processed,
op_kwargs={
"bucket_name": GS_BUCKET,
"origin_path": f"raw/{date}_airports.parquet",
"destiny_path": f"processed/{date}_airports.parquet",
"schema_name": "airports_schema.json"
},
dag=dag)
processing_flights_data = PythonOperator(
task_id='processing_flights_data',
python_callable=raw_schema_processed,
op_kwargs={
"bucket_name": GS_BUCKET,
"origin_path": f"raw/{date}_flights.parquet",
"destiny_path": f"processed/{date}_flights.parquet",
"schema_name": "flights_schema.json"
},
dag=dag)
#? 4.4. Load data from gcs to bigquery
load_airlines_data = GCSToBigQueryOperator(
task_id='load_airlines_data',
bucket=GS_BUCKET,
source_objects=[f"processed/{date}_airlines.parquet"],
destination_project_dataset_table=
f'{PROJECT_ID}:{MY_DATASET}.airlines_data',
source_format='parquet',
write_disposition='WRITE_TRUNCATE',
skip_leading_rows=1,
autodetect=True,
location=GCP_LOCATION,
dag=dag)
load_airports_data = GCSToBigQueryOperator(
task_id='load_airports_data',
bucket=GS_BUCKET,
source_objects=[f"processed/{date}_airports.parquet"],
destination_project_dataset_table=
f'{PROJECT_ID}:{MY_DATASET}.airports_data',
source_format='parquet',
write_disposition='WRITE_TRUNCATE',
skip_leading_rows=1,
autodetect=True,
location=GCP_LOCATION,
dag=dag)
load_flights_data = GCSToBigQueryOperator(
task_id='load_flights_data',
bucket=GS_BUCKET,
source_objects=[f"processed/{date}_flights.parquet"],
destination_project_dataset_table=f'{PROJECT_ID}:{MY_DATASET}.flights_data',
source_format='parquet',
write_disposition='WRITE_TRUNCATE',
skip_leading_rows=1,
autodetect=True,
location=GCP_LOCATION,
dag=dag)
#? 4.5. Data check
check_airlines = BigQueryCheckOperator(task_id='check_airlines',
use_legacy_sql=False,
location=GCP_LOCATION,
bigquery_conn_id=GCP_CONNECTION_ID,
params={
'project_id': PROJECT_ID,
'my_dataset': MY_DATASET
},
sql='''
#standardSQL
SELECT count(*) AS num_airlines
FROM `{{ params.project_id }}.{{ params.my_dataset }}.airlines_data`
''',
dag=dag)
check_airports = BigQueryCheckOperator(task_id='check_airports',
use_legacy_sql=False,
location=GCP_LOCATION,
bigquery_conn_id=GCP_CONNECTION_ID,
params={
'project_id': PROJECT_ID,
'my_dataset': MY_DATASET
},
sql='''
#standardSQL
SELECT count(*) AS num_airports
FROM `{{ params.project_id }}.{{ params.my_dataset }}.airports_data`
''',
dag=dag)
check_flights = BigQueryCheckOperator(task_id='check_flights',
use_legacy_sql=False,
location=GCP_LOCATION,
bigquery_conn_id=GCP_CONNECTION_ID,
params={
'project_id': PROJECT_ID,
'my_dataset': MY_DATASET
},
sql='''
#standardSQL
SELECT count(*) AS num_flights
FROM `{{ params.project_id }}.{{ params.my_dataset }}.flights_data`
''',
dag=dag)
loaded_data_to_bigquery = DummyOperator(task_id='loaded_data', dag=dag)
#? 4.6. Generating a view
check_unified_view = BigQueryExecuteQueryOperator(
task_id='check_unified_view',
use_legacy_sql=False,
location=GCP_LOCATION,
bigquery_conn_id=GCP_CONNECTION_ID,
destination_dataset_table='{0}.{1}.unified_table'.format(
PROJECT_ID, MY_DATASET),
write_disposition="WRITE_TRUNCATE",
allow_large_results=True,
sql='''
#standardSQL
WITH flights_airlines AS (
SELECT
flights.year,
flights.month,
flights.day,
flights.flight_number,
flights.origin_airport,
flights.airline as airline_iata_code,
airlines.airline
FROM `{0}.{1}.flights_data` flights
LEFT JOIN `{0}.{1}.airlines_data` airlines
ON flights.airline = airlines.iata_code
)
SELECT
year,
month,
day,
airline_iata_code,
airline,
flight_number,
origin_airport,
airports.airport AS name_airport,
airports.city,
airports.state,
airports.latitude,
airports.longitude
FROM flights_airlines
INNER JOIN `{0}.{1}.airports_data` airports
ON flights_airlines.origin_airport = airports.iata_code
'''.format(PROJECT_ID, MY_DATASET),
dag=dag)
#? 4.7. Finishing pipeline
finish_pipeline = DummyOperator(task_id='finish_pipeline', dag=dag)
#######################
##! 5. Setting up dependencies
#######################
start_pipeline >> [
download_airlines_data, download_airports_data, download_flights_data
]
download_airlines_data >> processing_airlines_data >> load_airlines_data >> check_airlines
download_airports_data >> processing_airports_data >> load_airports_data >> check_airports
download_flights_data >> processing_flights_data >> load_flights_data >> check_flights
[check_airlines, check_airports, check_flights
] >> loaded_data_to_bigquery >> check_unified_view >> finish_pipeline
|
[
"airflow.providers.google.cloud.operators.bigquery.BigQueryCheckOperator",
"airflow.providers.google.cloud.transfers.gcs_to_bigquery.GCSToBigQueryOperator",
"airflow.operators.python.PythonOperator",
"datetime.timedelta",
"requests.post",
"datetime.datetime.now",
"airflow.operators.dummy_operator.DummyOperator"
] |
[((3117, 3165), 'airflow.operators.dummy_operator.DummyOperator', 'DummyOperator', ([], {'task_id': '"""start_pipeline"""', 'dag': 'dag'}), "(task_id='start_pipeline', dag=dag)\n", (3130, 3165), False, 'from airflow.operators.dummy_operator import DummyOperator\n'), ((3284, 3508), 'airflow.operators.python.PythonOperator', 'PythonOperator', ([], {'task_id': '"""download_airlines_data"""', 'python_callable': 'get_data_upload_to_gcs', 'op_kwargs': "{'bucket_name': GS_BUCKET, 'source_url': URL_AIRLINES, 'destiny_path':\n f'raw/{date}_airlines.parquet'}", 'dag': 'dag'}), "(task_id='download_airlines_data', python_callable=\n get_data_upload_to_gcs, op_kwargs={'bucket_name': GS_BUCKET,\n 'source_url': URL_AIRLINES, 'destiny_path':\n f'raw/{date}_airlines.parquet'}, dag=dag)\n", (3298, 3508), False, 'from airflow.operators.python import PythonOperator\n'), ((3948, 4172), 'airflow.operators.python.PythonOperator', 'PythonOperator', ([], {'task_id': '"""download_airports_data"""', 'python_callable': 'get_data_upload_to_gcs', 'op_kwargs': "{'bucket_name': GS_BUCKET, 'source_url': URL_AIRPORTS, 'destiny_path':\n f'raw/{date}_airports.parquet'}", 'dag': 'dag'}), "(task_id='download_airports_data', python_callable=\n get_data_upload_to_gcs, op_kwargs={'bucket_name': GS_BUCKET,\n 'source_url': URL_AIRPORTS, 'destiny_path':\n f'raw/{date}_airports.parquet'}, dag=dag)\n", (3962, 4172), False, 'from airflow.operators.python import PythonOperator\n'), ((4611, 4832), 'airflow.operators.python.PythonOperator', 'PythonOperator', ([], {'task_id': '"""download_flights_data"""', 'python_callable': 'get_data_upload_to_gcs', 'op_kwargs': "{'bucket_name': GS_BUCKET, 'source_url': URL_FLIGHTS, 'destiny_path':\n f'raw/{date}_flights.parquet'}", 'dag': 'dag'}), "(task_id='download_flights_data', python_callable=\n get_data_upload_to_gcs, op_kwargs={'bucket_name': GS_BUCKET,\n 'source_url': URL_FLIGHTS, 'destiny_path':\n f'raw/{date}_flights.parquet'}, dag=dag)\n", (4625, 4832), False, 'from airflow.operators.python import PythonOperator\n'), ((5335, 5627), 'airflow.operators.python.PythonOperator', 'PythonOperator', ([], {'task_id': '"""processing_airlines_data"""', 'python_callable': 'raw_schema_processed', 'op_kwargs': "{'bucket_name': GS_BUCKET, 'origin_path': f'raw/{date}_airlines.parquet',\n 'destiny_path': f'processed/{date}_airlines.parquet', 'schema_name':\n 'airlines_schema.json'}", 'dag': 'dag'}), "(task_id='processing_airlines_data', python_callable=\n raw_schema_processed, op_kwargs={'bucket_name': GS_BUCKET,\n 'origin_path': f'raw/{date}_airlines.parquet', 'destiny_path':\n f'processed/{date}_airlines.parquet', 'schema_name':\n 'airlines_schema.json'}, dag=dag)\n", (5349, 5627), False, 'from airflow.operators.python import PythonOperator\n'), ((5694, 5986), 'airflow.operators.python.PythonOperator', 'PythonOperator', ([], {'task_id': '"""processing_airports_data"""', 'python_callable': 'raw_schema_processed', 'op_kwargs': "{'bucket_name': GS_BUCKET, 'origin_path': f'raw/{date}_airports.parquet',\n 'destiny_path': f'processed/{date}_airports.parquet', 'schema_name':\n 'airports_schema.json'}", 'dag': 'dag'}), "(task_id='processing_airports_data', python_callable=\n raw_schema_processed, op_kwargs={'bucket_name': GS_BUCKET,\n 'origin_path': f'raw/{date}_airports.parquet', 'destiny_path':\n f'processed/{date}_airports.parquet', 'schema_name':\n 'airports_schema.json'}, dag=dag)\n", (5708, 5986), False, 'from airflow.operators.python import PythonOperator\n'), ((6052, 6340), 'airflow.operators.python.PythonOperator', 'PythonOperator', ([], {'task_id': '"""processing_flights_data"""', 'python_callable': 'raw_schema_processed', 'op_kwargs': "{'bucket_name': GS_BUCKET, 'origin_path': f'raw/{date}_flights.parquet',\n 'destiny_path': f'processed/{date}_flights.parquet', 'schema_name':\n 'flights_schema.json'}", 'dag': 'dag'}), "(task_id='processing_flights_data', python_callable=\n raw_schema_processed, op_kwargs={'bucket_name': GS_BUCKET,\n 'origin_path': f'raw/{date}_flights.parquet', 'destiny_path':\n f'processed/{date}_flights.parquet', 'schema_name':\n 'flights_schema.json'}, dag=dag)\n", (6066, 6340), False, 'from airflow.operators.python import PythonOperator\n'), ((6441, 6796), 'airflow.providers.google.cloud.transfers.gcs_to_bigquery.GCSToBigQueryOperator', 'GCSToBigQueryOperator', ([], {'task_id': '"""load_airlines_data"""', 'bucket': 'GS_BUCKET', 'source_objects': "[f'processed/{date}_airlines.parquet']", 'destination_project_dataset_table': 'f"""{PROJECT_ID}:{MY_DATASET}.airlines_data"""', 'source_format': '"""parquet"""', 'write_disposition': '"""WRITE_TRUNCATE"""', 'skip_leading_rows': '(1)', 'autodetect': '(True)', 'location': 'GCP_LOCATION', 'dag': 'dag'}), "(task_id='load_airlines_data', bucket=GS_BUCKET,\n source_objects=[f'processed/{date}_airlines.parquet'],\n destination_project_dataset_table=\n f'{PROJECT_ID}:{MY_DATASET}.airlines_data', source_format='parquet',\n write_disposition='WRITE_TRUNCATE', skip_leading_rows=1, autodetect=\n True, location=GCP_LOCATION, dag=dag)\n", (6462, 6796), False, 'from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator\n'), ((6843, 7198), 'airflow.providers.google.cloud.transfers.gcs_to_bigquery.GCSToBigQueryOperator', 'GCSToBigQueryOperator', ([], {'task_id': '"""load_airports_data"""', 'bucket': 'GS_BUCKET', 'source_objects': "[f'processed/{date}_airports.parquet']", 'destination_project_dataset_table': 'f"""{PROJECT_ID}:{MY_DATASET}.airports_data"""', 'source_format': '"""parquet"""', 'write_disposition': '"""WRITE_TRUNCATE"""', 'skip_leading_rows': '(1)', 'autodetect': '(True)', 'location': 'GCP_LOCATION', 'dag': 'dag'}), "(task_id='load_airports_data', bucket=GS_BUCKET,\n source_objects=[f'processed/{date}_airports.parquet'],\n destination_project_dataset_table=\n f'{PROJECT_ID}:{MY_DATASET}.airports_data', source_format='parquet',\n write_disposition='WRITE_TRUNCATE', skip_leading_rows=1, autodetect=\n True, location=GCP_LOCATION, dag=dag)\n", (6864, 7198), False, 'from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator\n'), ((7244, 7596), 'airflow.providers.google.cloud.transfers.gcs_to_bigquery.GCSToBigQueryOperator', 'GCSToBigQueryOperator', ([], {'task_id': '"""load_flights_data"""', 'bucket': 'GS_BUCKET', 'source_objects': "[f'processed/{date}_flights.parquet']", 'destination_project_dataset_table': 'f"""{PROJECT_ID}:{MY_DATASET}.flights_data"""', 'source_format': '"""parquet"""', 'write_disposition': '"""WRITE_TRUNCATE"""', 'skip_leading_rows': '(1)', 'autodetect': '(True)', 'location': 'GCP_LOCATION', 'dag': 'dag'}), "(task_id='load_flights_data', bucket=GS_BUCKET,\n source_objects=[f'processed/{date}_flights.parquet'],\n destination_project_dataset_table=\n f'{PROJECT_ID}:{MY_DATASET}.flights_data', source_format='parquet',\n write_disposition='WRITE_TRUNCATE', skip_leading_rows=1, autodetect=\n True, location=GCP_LOCATION, dag=dag)\n", (7265, 7596), False, 'from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator\n'), ((7654, 8015), 'airflow.providers.google.cloud.operators.bigquery.BigQueryCheckOperator', 'BigQueryCheckOperator', ([], {'task_id': '"""check_airlines"""', 'use_legacy_sql': '(False)', 'location': 'GCP_LOCATION', 'bigquery_conn_id': 'GCP_CONNECTION_ID', 'params': "{'project_id': PROJECT_ID, 'my_dataset': MY_DATASET}", 'sql': '"""\n #standardSQL\n SELECT count(*) AS num_airlines \n FROM `{{ params.project_id }}.{{ params.my_dataset }}.airlines_data`\n """', 'dag': 'dag'}), '(task_id=\'check_airlines\', use_legacy_sql=False,\n location=GCP_LOCATION, bigquery_conn_id=GCP_CONNECTION_ID, params={\n \'project_id\': PROJECT_ID, \'my_dataset\': MY_DATASET}, sql=\n """\n #standardSQL\n SELECT count(*) AS num_airlines \n FROM `{{ params.project_id }}.{{ params.my_dataset }}.airlines_data`\n """\n , dag=dag)\n', (7675, 8015), False, 'from airflow.providers.google.cloud.operators.bigquery import BigQueryCheckOperator, BigQueryExecuteQueryOperator\n'), ((8376, 8737), 'airflow.providers.google.cloud.operators.bigquery.BigQueryCheckOperator', 'BigQueryCheckOperator', ([], {'task_id': '"""check_airports"""', 'use_legacy_sql': '(False)', 'location': 'GCP_LOCATION', 'bigquery_conn_id': 'GCP_CONNECTION_ID', 'params': "{'project_id': PROJECT_ID, 'my_dataset': MY_DATASET}", 'sql': '"""\n #standardSQL\n SELECT count(*) AS num_airports \n FROM `{{ params.project_id }}.{{ params.my_dataset }}.airports_data`\n """', 'dag': 'dag'}), '(task_id=\'check_airports\', use_legacy_sql=False,\n location=GCP_LOCATION, bigquery_conn_id=GCP_CONNECTION_ID, params={\n \'project_id\': PROJECT_ID, \'my_dataset\': MY_DATASET}, sql=\n """\n #standardSQL\n SELECT count(*) AS num_airports \n FROM `{{ params.project_id }}.{{ params.my_dataset }}.airports_data`\n """\n , dag=dag)\n', (8397, 8737), False, 'from airflow.providers.google.cloud.operators.bigquery import BigQueryCheckOperator, BigQueryExecuteQueryOperator\n'), ((9097, 9455), 'airflow.providers.google.cloud.operators.bigquery.BigQueryCheckOperator', 'BigQueryCheckOperator', ([], {'task_id': '"""check_flights"""', 'use_legacy_sql': '(False)', 'location': 'GCP_LOCATION', 'bigquery_conn_id': 'GCP_CONNECTION_ID', 'params': "{'project_id': PROJECT_ID, 'my_dataset': MY_DATASET}", 'sql': '"""\n #standardSQL\n SELECT count(*) AS num_flights \n FROM `{{ params.project_id }}.{{ params.my_dataset }}.flights_data`\n """', 'dag': 'dag'}), '(task_id=\'check_flights\', use_legacy_sql=False,\n location=GCP_LOCATION, bigquery_conn_id=GCP_CONNECTION_ID, params={\n \'project_id\': PROJECT_ID, \'my_dataset\': MY_DATASET}, sql=\n """\n #standardSQL\n SELECT count(*) AS num_flights \n FROM `{{ params.project_id }}.{{ params.my_dataset }}.flights_data`\n """\n , dag=dag)\n', (9118, 9455), False, 'from airflow.providers.google.cloud.operators.bigquery import BigQueryCheckOperator, BigQueryExecuteQueryOperator\n'), ((9816, 9861), 'airflow.operators.dummy_operator.DummyOperator', 'DummyOperator', ([], {'task_id': '"""loaded_data"""', 'dag': 'dag'}), "(task_id='loaded_data', dag=dag)\n", (9829, 9861), False, 'from airflow.operators.dummy_operator import DummyOperator\n'), ((11395, 11444), 'airflow.operators.dummy_operator.DummyOperator', 'DummyOperator', ([], {'task_id': '"""finish_pipeline"""', 'dag': 'dag'}), "(task_id='finish_pipeline', dag=dag)\n", (11408, 11444), False, 'from airflow.operators.dummy_operator import DummyOperator\n'), ((753, 773), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(2)'}), '(minutes=2)\n', (762, 773), False, 'from datetime import datetime, timedelta\n'), ((1836, 1867), 'requests.post', 'requests.post', (['url'], {'json': 'values'}), '(url, json=values)\n', (1849, 1867), False, 'import requests\n'), ((2393, 2427), 'requests.post', 'requests.post', (['url[0]'], {'json': 'values'}), '(url[0], json=values)\n', (2406, 2427), False, 'import requests\n'), ((1557, 1571), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1569, 1571), False, 'from datetime import datetime, timedelta\n'), ((2856, 2870), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2868, 2870), False, 'from datetime import datetime, timedelta\n')]
|
import logging
import time
from concurrent.futures import ThreadPoolExecutor
import pytest
import stomp
from stomp.listener import TestListener
from .testutils import *
executor = ThreadPoolExecutor()
def create_thread(fc):
f = executor.submit(fc)
print("Created future %s on executor %s" % (f, executor))
return f
class ReconnectListener(TestListener):
def __init__(self, conn):
TestListener.__init__(self, "123", True)
self.conn = conn
def on_receiver_loop_ended(self, *args):
if self.conn:
c = self.conn
self.conn = None
c.connect(get_default_user(), get_default_password(), wait=True)
c.disconnect()
@pytest.fixture
def conn():
conn = stomp.Connection(get_default_host())
# check thread override here
conn.transport.override_threading(create_thread)
listener = ReconnectListener(conn)
conn.set_listener("testlistener", listener)
conn.connect(get_default_user(), get_default_password(), wait=True)
yield conn
class TestThreadingOverride(object):
def test_threading(self, conn):
listener = conn.get_listener("testlistener")
queuename = "/queue/test1-%s" % listener.timestamp
conn.subscribe(destination=queuename, id=1, ack="auto")
conn.send(body="this is a test", destination=queuename, receipt="123")
validate_send(conn, 1, 1, 0)
logging.info("first disconnect")
conn.disconnect(receipt="112233")
logging.info("reconnecting")
conn.connect(get_default_user(), get_default_password(), wait=True)
logging.info("second disconnect")
conn.disconnect()
|
[
"logging.info",
"concurrent.futures.ThreadPoolExecutor",
"stomp.listener.TestListener.__init__"
] |
[((184, 204), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {}), '()\n', (202, 204), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((412, 452), 'stomp.listener.TestListener.__init__', 'TestListener.__init__', (['self', '"""123"""', '(True)'], {}), "(self, '123', True)\n", (433, 452), False, 'from stomp.listener import TestListener\n'), ((1422, 1454), 'logging.info', 'logging.info', (['"""first disconnect"""'], {}), "('first disconnect')\n", (1434, 1454), False, 'import logging\n'), ((1506, 1534), 'logging.info', 'logging.info', (['"""reconnecting"""'], {}), "('reconnecting')\n", (1518, 1534), False, 'import logging\n'), ((1620, 1653), 'logging.info', 'logging.info', (['"""second disconnect"""'], {}), "('second disconnect')\n", (1632, 1653), False, 'import logging\n')]
|
from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton
from PyQt5 import QtGui
from PyQt5.QtGui import QIcon
from PyQt5 import QtCore
from niapy.util.factory import _algorithm_options
from niaaml_gui.widgets.list_widget_custom import ListWidgetCustom
from niaaml_gui.widgets.base_main_widget import BaseMainWidget
from niaaml_gui.windows import ProcessWindow
from niaaml_gui.process_window_data import ProcessWindowData
from niaaml.classifiers import ClassifierFactory
from niaaml.preprocessing.feature_selection import FeatureSelectionAlgorithmFactory
from niaaml.preprocessing.feature_transform import FeatureTransformAlgorithmFactory
from niaaml.fitness import FitnessFactory
from niaaml.preprocessing.encoding import EncoderFactory
from niaaml.preprocessing.imputation import ImputerFactory
import qtawesome as qta
class OptimizationWidget(BaseMainWidget):
__niaamlFeatureSelectionAlgorithms = FeatureSelectionAlgorithmFactory().get_name_to_classname_mapping()
__niaamlFeatureTransformAlgorithms = FeatureTransformAlgorithmFactory().get_name_to_classname_mapping()
__niaamlClassifiers = ClassifierFactory().get_name_to_classname_mapping()
__niaamlFitnessFunctions = FitnessFactory().get_name_to_classname_mapping()
__niaamlEncoders = EncoderFactory().get_name_to_classname_mapping()
__niaamlImputers = ImputerFactory().get_name_to_classname_mapping()
__niapyAlgorithmsList = list(_algorithm_options().keys())
__niaamlFeatureSelectionAlgorithmsList = list(__niaamlFeatureSelectionAlgorithms.keys())
__niaamlFeatureTransformAlgorithmsList = list(__niaamlFeatureTransformAlgorithms.keys())
__niaamlClassifiersList = list(__niaamlClassifiers.keys())
__niaamlFitnessFunctionsList = list(__niaamlFitnessFunctions.keys())
__niaamlEncodersList = list(__niaamlEncoders.keys())
__niaamlImputersList = list(__niaamlImputers.keys())
def __init__(self, parent, is_v1 = False, *args, **kwargs):
self.__niapyAlgorithmsList.sort()
self.__niaamlFeatureSelectionAlgorithmsList.sort()
self.__niaamlFeatureTransformAlgorithmsList.sort()
self.__niaamlClassifiersList.sort()
self.__is_v1 = is_v1
super().__init__(parent, *args, **kwargs)
fileLayout = QHBoxLayout(self._parent)
selectFileBar = QHBoxLayout(self._parent)
selectFileBar.setSpacing(0)
selectFileBar.setContentsMargins(0, 5, 5, 5)
fNameLine = QLineEdit(self._parent)
fNameLine.setObjectName('csvFile')
fNameLine.setPlaceholderText('Select a CSV dataset file...')
fNameLine.setReadOnly(True)
font = fNameLine.font()
font.setPointSize(12)
fNameLine.setFont(font)
selectFileBar.addWidget(fNameLine)
editBtn = self._createButton(None, self._editCSVFile, 'editCSVButton', qta.icon('fa5.edit'))
editBtn.setEnabled(False)
selectFileBar.addWidget(editBtn)
selectFileBar.addWidget(self._createButton('Select file', self._openCSVFile))
checkBox = QCheckBox('CSV has header')
checkBox.setObjectName('csv')
checkBox.setFont(font)
fileLayout.addItem(selectFileBar)
fileLayout.addWidget(checkBox)
encoders = self.__createComboBox('Categorical features\' encoder:', self.__niaamlEncodersList, 'encoders')
imputers = self.__createComboBox('Missing features\' imputer:', self.__niaamlImputersList, 'imputers')
hBoxLayout = QHBoxLayout(self._parent)
hBoxLayout.setContentsMargins(0, 0, 0, 0)
vBoxLayout = QVBoxLayout(self._parent)
vBoxLayout.setContentsMargins(0, 34, 0, 0)
h1BoxLayout = QHBoxLayout(self._parent)
h1BoxLayout.setContentsMargins(0, 0, 0, 0)
fsasBox = self.__createGridLayoutBox((0, 5, 3, 5), True)
fsasList = self.__createListWidget([], None, 'fsasList')
fsasBox.addWidget(fsasList)
h1BoxLayout.addItem(fsasBox)
ftasBox = self.__createGridLayoutBox((3, 5, 3, 5), True)
ftasList = self.__createListWidget([], None, 'ftasList')
ftasBox.addWidget(ftasList)
h1BoxLayout.addItem(ftasBox)
classifiers = self.__createGridLayoutBox((3, 5, 5, 5), True)
classifiersList = self.__createListWidget([], None, 'classifiersList')
classifiers.addWidget(classifiersList)
h1BoxLayout.addItem(classifiers)
settingsBox = self.__createGridLayoutBox((0, 0, 5, 5), False, 'transparent')
settingsBox.setVerticalSpacing(10)
optAlgosLabel = 'Optimization Algorithm (components selection):' if not self.__is_v1 else 'Optimization Algorithm:'
optAlgos = self.__createComboBox(optAlgosLabel, self.__niapyAlgorithmsList, 'optAlgos')
optAlgosInner = self.__createComboBox('Optimization Algorithm (parameter tuning) - same as first if not selected:', [*['None'], *self.__niapyAlgorithmsList], 'optAlgosInner')
validator = QtGui.QRegExpValidator(QtCore.QRegExp('[1-9][0-9]*'))
popSizeLabel = 'Population size (components selection):' if not self.__is_v1 else 'Population size:'
popSize = self.__createTextInput(popSizeLabel, 'popSize', validator)
popSizeInner = self.__createTextInput('Population size (parameter tuning):', 'popSizeInner', validator)
numEvalsLabel = 'Number of evaluations (components selection):' if not self.__is_v1 else 'Number of evaluations'
numEvals = self.__createTextInput(numEvalsLabel, 'numEvals', validator)
numEvalsInner = self.__createTextInput('Number of evaluations (parameter tuning):', 'numEvalsInner', validator)
fitFuncs = self.__createComboBox('Fitness Function:', self.__niaamlFitnessFunctionsList, 'fitFuncs')
selectOutputFolderBar = QHBoxLayout(self._parent)
selectOutputFolderBar.setSpacing(0)
foNameLine = QLineEdit(self._parent)
foNameLine.setObjectName('outputFolder')
foNameLine.setPlaceholderText('Select pipeline output folder...')
foNameLine.setReadOnly(True)
font = foNameLine.font()
font.setPointSize(12)
foNameLine.setFont(font)
selectOutputFolderBar.addWidget(foNameLine)
selectOutputFolderBar.addWidget(self._createButton('Select folder', self.__selectDirectory))
settingsBox.addItem(optAlgos)
if not self.__is_v1:
settingsBox.addItem(optAlgosInner)
settingsBox.addItem(popSize)
if not self.__is_v1:
settingsBox.addItem(popSizeInner)
settingsBox.addItem(numEvals)
if not self.__is_v1:
settingsBox.addItem(numEvalsInner)
settingsBox.addItem(fitFuncs)
settingsBox.addItem(selectOutputFolderBar)
confirmBar = QHBoxLayout(self._parent)
confirmBar.setContentsMargins(5, 5, 5, 5)
confirmBar.addStretch()
confirmBar.addWidget(self._createButton('Start optimization', self.__runOptimize))
vBoxLayout.addItem(fileLayout)
vBoxLayout.addItem(encoders)
vBoxLayout.addItem(imputers)
vBoxLayout.addItem(h1BoxLayout)
vBoxLayout.addItem(settingsBox)
vBoxLayout.addItem(confirmBar)
exploreBox = self.__createGridLayoutBox((0, 0, 0, 0), False)
exploreBox.addWidget(self.__createTabs(fsasList, ftasList, classifiersList))
hBoxLayout.addItem(exploreBox)
hBoxLayout.addItem(vBoxLayout)
hBoxLayout.setStretchFactor(exploreBox, 1)
hBoxLayout.setStretchFactor(vBoxLayout, 2)
self.setLayout(hBoxLayout)
def __createComboBox(self, label, items, name):
comboBox = QVBoxLayout()
comboBox.setSpacing(5)
label = QLabel(label, self._parent)
font = label.font()
font.setPointSize(12)
label.setFont(font)
cb = QComboBox()
cb.setObjectName(name)
cb.setFont(font)
for k in items:
cb.addItem(k)
comboBox.addWidget(label)
comboBox.addWidget(cb)
return comboBox
def __createTextInput(self, label, name, validator=None):
textBox = QVBoxLayout()
textBox.setSpacing(5)
label = QLabel(label, self._parent)
font = label.font()
font.setPointSize(12)
label.setFont(font)
tb = QLineEdit(self._parent)
tb.setObjectName(name)
tb.setFont(font)
textBox.addWidget(label)
textBox.addWidget(tb)
if validator is not None:
tb.setValidator(validator)
return textBox
def __createGridLayoutBox(self, tupleMargins, visibleBorder, background_color = '#fff'):
l = QGridLayout()
l.setContentsMargins(*tupleMargins)
return l
def __createListWidget(self, items, targetBox = None, name = None):
listWidget = ListWidgetCustom(items, targetBox, name)
font = listWidget.font()
font.setPointSize(12)
listWidget.setFont(font)
return listWidget
def __createTabs(self, fsasList, ftasList, classifiersList):
tabs = QTabWidget(self._parent)
fsas = self.__createListWidget(self.__niaamlFeatureSelectionAlgorithmsList, fsasList)
fsasList.setTarget(fsas)
tabs.addTab(fsas, 'Feature Selection Algorithms')
ftas = self.__createListWidget(self.__niaamlFeatureTransformAlgorithmsList, ftasList)
ftasList.setTarget(ftas)
tabs.addTab(ftas, 'Feature Selection Algorithms')
clas = self.__createListWidget(self.__niaamlClassifiersList, classifiersList)
classifiersList.setTarget(clas)
tabs.addTab(clas, 'Classifiers')
font = tabs.font()
font.setPointSize(10)
tabs.setFont(font)
tabs.setStyleSheet("QTabBar::tab { height: 40px; }")
return tabs
def __selectDirectory(self):
fname = str(QFileDialog.getExistingDirectory(parent=self._parent, caption='Select Directory'))
self.findChild(QLineEdit, 'outputFolder').setText(fname)
def __runOptimize(self):
err = ''
csvSrc = self.findChild(QLineEdit, 'csvFile').text()
if self._isNoneOrWhiteSpace(csvSrc):
err += 'Select CSV dataset file.\n'
encoderName = self.__niaamlEncoders[str(self.findChild(QComboBox, 'encoders').currentText())]
imputerName = self.__niaamlImputers[str(self.findChild(QComboBox, 'imputers').currentText())]
optAlgName = str(self.findChild(QComboBox, 'optAlgos').currentText())
if not self.__is_v1:
optAlgInnerName = str(self.findChild(QComboBox, 'optAlgosInner').currentText())
if optAlgInnerName == 'None':
optAlgInnerName = optAlgName
popSize = self.findChild(QLineEdit, 'popSize').text()
if self._isNoneOrWhiteSpace(popSize):
err += 'Select population size.\n'
else:
try:
popSize = int(popSize)
except:
err += 'Invalid population size value.\n'
if not self.__is_v1:
popSizeInner = self.findChild(QLineEdit, 'popSizeInner').text()
if self._isNoneOrWhiteSpace(popSizeInner):
err += 'Select inner population size.\n'
else:
try:
popSizeInner = int(popSizeInner)
except:
err += 'Invalid inner population size value.\n'
numEvals = self.findChild(QLineEdit, 'numEvals').text()
if self._isNoneOrWhiteSpace(numEvals):
err += 'Select number of evaluations.\n'
else:
try:
numEvals = int(numEvals)
except:
err += 'Invalid number of evaluations.\n'
if not self.__is_v1:
numEvalsInner = self.findChild(QLineEdit, 'numEvalsInner').text()
if self._isNoneOrWhiteSpace(numEvalsInner):
err += 'Select number of inner evaluations.\n'
else:
try:
numEvalsInner = int(numEvalsInner)
except:
err += 'Invalid number of inner evaluations.\n'
fsasList = self.findChild(ListWidgetCustom, 'fsasList')
fsas = [self.__niaamlFeatureSelectionAlgorithms[fsasList.item(i).text()] for i in range(fsasList.count())]
ftasList = self.findChild(ListWidgetCustom, 'ftasList')
ftas = [self.__niaamlFeatureTransformAlgorithms[ftasList.item(i).text()] for i in range(ftasList.count())]
clsList = self.findChild(ListWidgetCustom, 'classifiersList')
classifiers = [self.__niaamlClassifiers[clsList.item(i).text()] for i in range(clsList.count())]
if len(classifiers) == 0:
err += 'Select at least one classifier.\n'
fitnessFunctionName = self.__niaamlFitnessFunctions[str(self.findChild(QComboBox, 'fitFuncs').currentText())]
outputFolder = self.findChild(QLineEdit, 'outputFolder').text()
if self._isNoneOrWhiteSpace(outputFolder):
err += 'Select an output directory.\n'
if not self._isNoneOrWhiteSpace(err):
self._parent.errorMessage.setText(err)
self._parent.errorMessage.show()
return
if not self.__is_v1:
self._processWindow = ProcessWindow(
self._parent,
ProcessWindowData(
True,
csvSrc,
self.findChild(QCheckBox, 'csv').isChecked(),
encoderName,
imputerName,
optAlgName,
optAlgInnerName,
popSize,
popSizeInner,
numEvals,
numEvalsInner,
fsas,
ftas,
classifiers,
fitnessFunctionName,
outputFolder
)
)
else:
self._processWindow = ProcessWindow(
self._parent,
ProcessWindowData(
'v1',
csvSrc,
self.findChild(QCheckBox, 'csv').isChecked(),
encoderName,
imputerName,
optAlgName=optAlgName,
popSize=popSize,
numEvals=numEvals,
fsas=fsas,
ftas=ftas,
classifiers=classifiers,
fitnessFunctionName=fitnessFunctionName,
outputFolder=outputFolder
)
)
self._processWindow.show()
|
[
"PyQt5.QtCore.QRegExp",
"PyQt5.QtWidgets.QGridLayout",
"niaaml.preprocessing.imputation.ImputerFactory",
"niaaml.preprocessing.feature_selection.FeatureSelectionAlgorithmFactory",
"PyQt5.QtWidgets.QVBoxLayout",
"niaaml.fitness.FitnessFactory",
"PyQt5.QtWidgets.QTabWidget",
"PyQt5.QtWidgets.QLabel",
"niapy.util.factory._algorithm_options",
"PyQt5.QtWidgets.QCheckBox",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtWidgets.QFileDialog.getExistingDirectory",
"PyQt5.QtWidgets.QHBoxLayout",
"niaaml.preprocessing.feature_transform.FeatureTransformAlgorithmFactory",
"niaaml.preprocessing.encoding.EncoderFactory",
"niaaml_gui.widgets.list_widget_custom.ListWidgetCustom",
"PyQt5.QtWidgets.QLineEdit",
"niaaml.classifiers.ClassifierFactory",
"qtawesome.icon"
] |
[((2338, 2363), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self._parent'], {}), '(self._parent)\n', (2349, 2363), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((2389, 2414), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self._parent'], {}), '(self._parent)\n', (2400, 2414), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((2524, 2547), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self._parent'], {}), '(self._parent)\n', (2533, 2547), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((3115, 3142), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['"""CSV has header"""'], {}), "('CSV has header')\n", (3124, 3142), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((3543, 3568), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self._parent'], {}), '(self._parent)\n', (3554, 3568), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((3641, 3666), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['self._parent'], {}), '(self._parent)\n', (3652, 3666), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((3741, 3766), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self._parent'], {}), '(self._parent)\n', (3752, 3766), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((5846, 5871), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self._parent'], {}), '(self._parent)\n', (5857, 5871), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((5937, 5960), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self._parent'], {}), '(self._parent)\n', (5946, 5960), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((6822, 6847), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self._parent'], {}), '(self._parent)\n', (6833, 6847), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((7702, 7715), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (7713, 7715), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((7763, 7790), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['label', 'self._parent'], {}), '(label, self._parent)\n', (7769, 7790), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((7890, 7901), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (7899, 7901), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((8182, 8195), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (8193, 8195), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((8242, 8269), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['label', 'self._parent'], {}), '(label, self._parent)\n', (8248, 8269), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((8369, 8392), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self._parent'], {}), '(self._parent)\n', (8378, 8392), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((8716, 8729), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (8727, 8729), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((8889, 8929), 'niaaml_gui.widgets.list_widget_custom.ListWidgetCustom', 'ListWidgetCustom', (['items', 'targetBox', 'name'], {}), '(items, targetBox, name)\n', (8905, 8929), False, 'from niaaml_gui.widgets.list_widget_custom import ListWidgetCustom\n'), ((9137, 9161), 'PyQt5.QtWidgets.QTabWidget', 'QTabWidget', (['self._parent'], {}), '(self._parent)\n', (9147, 9161), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((993, 1027), 'niaaml.preprocessing.feature_selection.FeatureSelectionAlgorithmFactory', 'FeatureSelectionAlgorithmFactory', ([], {}), '()\n', (1025, 1027), False, 'from niaaml.preprocessing.feature_selection import FeatureSelectionAlgorithmFactory\n'), ((1101, 1135), 'niaaml.preprocessing.feature_transform.FeatureTransformAlgorithmFactory', 'FeatureTransformAlgorithmFactory', ([], {}), '()\n', (1133, 1135), False, 'from niaaml.preprocessing.feature_transform import FeatureTransformAlgorithmFactory\n'), ((1194, 1213), 'niaaml.classifiers.ClassifierFactory', 'ClassifierFactory', ([], {}), '()\n', (1211, 1213), False, 'from niaaml.classifiers import ClassifierFactory\n'), ((1277, 1293), 'niaaml.fitness.FitnessFactory', 'FitnessFactory', ([], {}), '()\n', (1291, 1293), False, 'from niaaml.fitness import FitnessFactory\n'), ((1349, 1365), 'niaaml.preprocessing.encoding.EncoderFactory', 'EncoderFactory', ([], {}), '()\n', (1363, 1365), False, 'from niaaml.preprocessing.encoding import EncoderFactory\n'), ((1421, 1437), 'niaaml.preprocessing.imputation.ImputerFactory', 'ImputerFactory', ([], {}), '()\n', (1435, 1437), False, 'from niaaml.preprocessing.imputation import ImputerFactory\n'), ((2912, 2932), 'qtawesome.icon', 'qta.icon', (['"""fa5.edit"""'], {}), "('fa5.edit')\n", (2920, 2932), True, 'import qtawesome as qta\n'), ((5049, 5078), 'PyQt5.QtCore.QRegExp', 'QtCore.QRegExp', (['"""[1-9][0-9]*"""'], {}), "('[1-9][0-9]*')\n", (5063, 5078), False, 'from PyQt5 import QtCore\n'), ((9926, 10012), 'PyQt5.QtWidgets.QFileDialog.getExistingDirectory', 'QFileDialog.getExistingDirectory', ([], {'parent': 'self._parent', 'caption': '"""Select Directory"""'}), "(parent=self._parent, caption=\n 'Select Directory')\n", (9958, 10012), False, 'from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton\n'), ((1503, 1523), 'niapy.util.factory._algorithm_options', '_algorithm_options', ([], {}), '()\n', (1521, 1523), False, 'from niapy.util.factory import _algorithm_options\n')]
|
import os
import numpy as np
import tensorflow as tf
def save_weights_resnet152_10channel():
# Initialize configuration
required_input_shape = (7, 7, 10, 64)
output_file_prefix = "resnet152_10channel"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights
new_weights[:, :, 3:6, :] = input_layer_weights
new_weights[:, :, 6:9, :] = input_layer_weights
weights[0] = new_weights
# Save the new weights
np.save(os.path.join(os.getcwd(), 'data', output_file_prefix + "_mask_weights.npy"), weights)
def save_weights_resnet152_6channel(allpretrained=False):
# Initialize configuration
required_input_shape = (7, 7, 6, 64)
output_file_prefix = "resnet152_6channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights
if allpretrained:
new_weights[:, :, 3:6, :] = input_layer_weights
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_opticalflow_weights.npy"), weights)
def save_weights_resnet152_15channel(allpretrained=False):
# Initialize configuration
required_input_shape = (7, 7, 15, 64)
output_file_prefix = "resnet152_15channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights # First image.
if allpretrained:
new_weights[:, :, 3:6, :] = input_layer_weights # OpticalFlow-1_2.
new_weights[:, :, 6:9, :] = input_layer_weights # Second image.
if allpretrained:
new_weights[:, :, 9:12, :] = input_layer_weights # OpticalFlow-2_3.
new_weights[:, :, 12:15, :] = input_layer_weights # Third image.
# Reassign new weights.
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_opticalflow_weights.npy"), weights)
def save_weights_resnet152_13channel(allpretrained=False):
"""
image_1 (3) + image_2 (3) + image_3 (3) + opticalflow_average (3) + MOG2_mask (1)
"""
# Initialize configuration
required_input_shape = (7, 7, 13, 64)
output_file_prefix = "resnet152_13channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights # First image.
new_weights[:, :, 3:6, :] = input_layer_weights # Second image.
new_weights[:, :, 6:9, :] = input_layer_weights # Third image.
if allpretrained:
new_weights[:, :, 9:12, :] = input_layer_weights # OpticalFlow-average.
# Mask always uses newly initialized weights.
# Reassign new weights.
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_hybrid_weights.npy"), weights)
def save_weights_resnet152_16channel(allpretrained=False):
"""
image_1 (3) + opticalflow_1 (3) + image_2 (3) + opticalflow_2 (3) + image_3 (3) + MOG2_mask (1)
"""
# Initialize configuration
required_input_shape = (7, 7, 16, 64)
output_file_prefix = "resnet152_16channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights # First image.
if allpretrained:
new_weights[:, :, 3:6, :] = input_layer_weights # OpticalFlow-1_2.
new_weights[:, :, 6:9, :] = input_layer_weights # Second image.
if allpretrained:
new_weights[:, :, 9:12, :] = input_layer_weights # OpticalFlow-2_3.
new_weights[:, :, 12:15, :] = input_layer_weights # Third image.
# Mask always uses newly initialized weights.
# Reassign new weights.
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_hybrid_weights.npy"), weights)
def save_weights_resnet152_7channel(allpretrained=False):
# Initialize configuration
required_input_shape = (7, 7, 7, 64)
output_file_prefix = "resnet152_7channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights
if allpretrained:
new_weights[:, :, 3:6, :] = input_layer_weights
# 7th Channel for Mask uses the randomly initialized weights. Therefore, leave it as it is.
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_maskopticalflow_weights.npy"), weights)
|
[
"tensorflow.keras.applications.ResNet152",
"os.getcwd",
"os.path.join",
"numpy.random.normal"
] |
[((280, 350), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (311, 350), True, 'import tensorflow as tf\n'), ((645, 693), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.001)', 'required_input_shape'], {}), '(0, 0.001, required_input_shape)\n', (661, 693), True, 'import numpy as np\n'), ((1335, 1405), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (1366, 1405), True, 'import tensorflow as tf\n'), ((1700, 1748), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.001)', 'required_input_shape'], {}), '(0, 0.001, required_input_shape)\n', (1716, 1748), True, 'import numpy as np\n'), ((2368, 2438), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (2399, 2438), True, 'import tensorflow as tf\n'), ((2733, 2781), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.001)', 'required_input_shape'], {}), '(0, 0.001, required_input_shape)\n', (2749, 2781), True, 'import numpy as np\n'), ((3836, 3906), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (3867, 3906), True, 'import tensorflow as tf\n'), ((4201, 4249), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.001)', 'required_input_shape'], {}), '(0, 0.001, required_input_shape)\n', (4217, 4249), True, 'import numpy as np\n'), ((5319, 5389), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (5350, 5389), True, 'import tensorflow as tf\n'), ((5684, 5732), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.001)', 'required_input_shape'], {}), '(0, 0.001, required_input_shape)\n', (5700, 5732), True, 'import numpy as np\n'), ((6782, 6852), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (6813, 6852), True, 'import tensorflow as tf\n'), ((7147, 7195), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.001)', 'required_input_shape'], {}), '(0, 0.001, required_input_shape)\n', (7163, 7195), True, 'import numpy as np\n'), ((1948, 2023), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', "(output_file_prefix + '_opticalflow_weights.npy')"], {}), "('..', 'data', output_file_prefix + '_opticalflow_weights.npy')\n", (1960, 2023), False, 'import os\n'), ((3313, 3388), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', "(output_file_prefix + '_opticalflow_weights.npy')"], {}), "('..', 'data', output_file_prefix + '_opticalflow_weights.npy')\n", (3325, 3388), False, 'import os\n'), ((4787, 4857), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', "(output_file_prefix + '_hybrid_weights.npy')"], {}), "('..', 'data', output_file_prefix + '_hybrid_weights.npy')\n", (4799, 4857), False, 'import os\n'), ((6370, 6440), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', "(output_file_prefix + '_hybrid_weights.npy')"], {}), "('..', 'data', output_file_prefix + '_hybrid_weights.npy')\n", (6382, 6440), False, 'import os\n'), ((7492, 7571), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', "(output_file_prefix + '_maskopticalflow_weights.npy')"], {}), "('..', 'data', output_file_prefix + '_maskopticalflow_weights.npy')\n", (7504, 7571), False, 'import os\n'), ((931, 942), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (940, 942), False, 'import os\n')]
|
from .AOpticalComponent import AOpticalComponent
from ..IRadiant import IRadiant
import astropy.units as u
from astropy.modeling.models import BlackBody
from ..Entry import Entry
from typing import Union
class CosmicBackground(AOpticalComponent):
"""
This class models the spectral radiance of the cosmic background as black body radiator
"""
@u.quantity_input(temp=[u.Kelvin, u.Celsius])
def __init__(self, parent: IRadiant, temp: u.Quantity = 2.725 * u.K, emissivity: float = 1):
"""
Initialize a new black body point source
Parameters
----------
parent : IRadiant
The parent element of the optical component from which the electromagnetic radiation is received
temp : Quantity in Kelvin / Celsius
Temperature of the black body
emissivity : float
The spectral emissivity coefficient for the optical surface.
Returns
-------
"""
# Create black body model with given temperature
bb = BlackBody(temperature=temp, scale=1 * u.W / (u.m ** 2 * u.nm * u.sr))
# Initialize super class
super().__init__(parent, 1.0, lambda wl: bb(wl) * emissivity)
@staticmethod
def check_config(conf: Entry) -> Union[None, str]:
"""
Check the configuration for this class
Parameters
----------
conf : Entry
The configuration entry to be checked.
Returns
-------
mes : Union[None, str]
The error message of the check. This will be None if the check was successful.
"""
mes = conf.check_quantity("temp", u.K)
if mes is not None:
return mes
if hasattr(conf, "emissivity"):
mes = conf.check_float("emissivity")
if mes is not None:
return mes
|
[
"astropy.modeling.models.BlackBody",
"astropy.units.quantity_input"
] |
[((363, 407), 'astropy.units.quantity_input', 'u.quantity_input', ([], {'temp': '[u.Kelvin, u.Celsius]'}), '(temp=[u.Kelvin, u.Celsius])\n', (379, 407), True, 'import astropy.units as u\n'), ((1041, 1110), 'astropy.modeling.models.BlackBody', 'BlackBody', ([], {'temperature': 'temp', 'scale': '(1 * u.W / (u.m ** 2 * u.nm * u.sr))'}), '(temperature=temp, scale=1 * u.W / (u.m ** 2 * u.nm * u.sr))\n', (1050, 1110), False, 'from astropy.modeling.models import BlackBody\n')]
|
'''
=========================================
Inference for Non-Linear Gaussian Systems
=========================================
This module contains the Unscented Kalman Filter (Wan, <NAME> 2000)
for state estimation in systems with non-Gaussian noise and non-linear dynamics
'''
from collections import namedtuple
import numpy as np
from numpy import ma
from scipy import linalg
from .utils import array1d, array2d, check_random_state, get_params, preprocess_arguments, check_random_state
from .standard import _last_dims, _determine_dimensionality, _arg_or_default
# represents a collection of sigma points and their associated weights. one
# point per row
SigmaPoints = namedtuple(
'SigmaPoints',
['points', 'weights_mean', 'weights_covariance']
)
# represents mean and covariance of a multivariate normal distribution
Moments = namedtuple('Moments', ['mean', 'covariance'])
def points2moments(points, sigma_noise=None):
'''Calculate estimated mean and covariance of sigma points
Parameters
----------
points : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
SigmaPoints object containing points and weights
sigma_noise : [n_dim_state, n_dim_state] array
additive noise covariance matrix, if any
Returns
-------
moments : Moments object of size [n_dim_state]
Mean and covariance estimated using points
'''
(points, weights_mu, weights_sigma) = points
mu = points.T.dot(weights_mu)
points_diff = points.T - mu[:, np.newaxis]
sigma = points_diff.dot(np.diag(weights_sigma)).dot(points_diff.T)
if sigma_noise is not None:
sigma = sigma + sigma_noise
return Moments(mu.ravel(), sigma)
def moments2points(moments, alpha=None, beta=None, kappa=None):
'''Calculate "sigma points" used in Unscented Kalman Filter
Parameters
----------
moments : [n_dim] Moments object
mean and covariance of a multivariate normal
alpha : float
Spread of the sigma points. Typically 1e-3.
beta : float
Used to "incorporate prior knowledge of the distribution of the state".
2 is optimal is the state is normally distributed.
kappa : float
a parameter which means ????
Returns
-------
points : [2*n_dim+1, n_dim] SigmaPoints
sigma points and associated weights
'''
(mu, sigma) = moments
n_dim = len(mu)
mu = array2d(mu, dtype=float)
if alpha is None:
alpha = 1.0
if beta is None:
beta = 0.0
if kappa is None:
kappa = 3.0 - n_dim
# compute sqrt(sigma)
sigma2 = linalg.cholesky(sigma).T
# Calculate scaling factor for all off-center points
lamda = (alpha * alpha) * (n_dim + kappa) - n_dim
c = n_dim + lamda
# calculate the sigma points; that is,
# mu
# mu + each column of sigma2 * sqrt(c)
# mu - each column of sigma2 * sqrt(c)
# Each column of points is one of these.
points = np.tile(mu.T, (1, 2 * n_dim + 1))
points[:, 1:(n_dim + 1)] += sigma2 * np.sqrt(c)
points[:, (n_dim + 1):] -= sigma2 * np.sqrt(c)
# Calculate weights
weights_mean = np.ones(2 * n_dim + 1)
weights_mean[0] = lamda / c
weights_mean[1:] = 0.5 / c
weights_cov = np.copy(weights_mean)
weights_cov[0] = lamda / c + (1 - alpha * alpha + beta)
return SigmaPoints(points.T, weights_mean, weights_cov)
def unscented_transform(points, f=None, points_noise=None, sigma_noise=None):
'''Apply the Unscented Transform to a set of points
Apply f to points (with secondary argument points_noise, if available),
then approximate the resulting mean and covariance. If sigma_noise is
available, treat it as additional variance due to additive noise.
Parameters
----------
points : [n_points, n_dim_state] SigmaPoints
points to pass into f's first argument and associated weights if f is
defined. If f is unavailable, then f is assumed to be the identity
function.
f : [n_dim_state, n_dim_state_noise] -> [n_dim_state] function
transition function from time t to time t+1, if available.
points_noise : [n_points, n_dim_state_noise] array
points to pass into f's second argument, if any
sigma_noise : [n_dim_state, n_dim_state] array
covariance matrix for additive noise, if any
Returns
-------
points_pred : [n_points, n_dim_state] SigmaPoints
points transformed by f with same weights
moments_pred : [n_dim_state] Moments
moments associated with points_pred
'''
n_points, n_dim_state = points.points.shape
(points, weights_mean, weights_covariance) = points
# propagate points through f
if f is not None:
if points_noise is None:
points_pred = [f(points[i]) for i in range(n_points)]
else:
points_noise = points_noise.points
points_pred = [f(points[i], points_noise[i]) for i in range(n_points)]
else:
points_pred = points
# make each row a predicted point
points_pred = np.vstack(points_pred)
points_pred = SigmaPoints(points_pred, weights_mean, weights_covariance)
# calculate approximate mean, covariance
moments_pred = points2moments(points_pred, sigma_noise)
return (points_pred, moments_pred)
def unscented_correct(cross_sigma, moments_pred, obs_moments_pred, z):
'''Correct predicted state estimates with an observation
Parameters
----------
cross_sigma : [n_dim_state, n_dim_obs] array
cross-covariance between the state at time t given all observations
from timesteps [0, t-1] and the observation at time t
moments_pred : [n_dim_state] Moments
mean and covariance of state at time t given observations from
timesteps [0, t-1]
obs_moments_pred : [n_dim_obs] Moments
mean and covariance of observation at time t given observations from
times [0, t-1]
z : [n_dim_obs] array
observation at time t
Returns
-------
moments_filt : [n_dim_state] Moments
mean and covariance of state at time t given observations from time
steps [0, t]
'''
mu_pred, sigma_pred = moments_pred
obs_mu_pred, obs_sigma_pred = obs_moments_pred
n_dim_state = len(mu_pred)
n_dim_obs = len(obs_mu_pred)
if not np.any(ma.getmask(z)):
# calculate Kalman gain
K = cross_sigma.dot(linalg.pinv(obs_sigma_pred))
# correct mu, sigma
mu_filt = mu_pred + K.dot(z - obs_mu_pred)
sigma_filt = sigma_pred - K.dot(cross_sigma.T)
else:
# no corrections to be made
mu_filt = mu_pred
sigma_filt = sigma_pred
return Moments(mu_filt, sigma_filt)
def augmented_points(momentses):
'''Calculate sigma points for augmented UKF
Parameters
----------
momentses : list of Moments
means and covariances for multiple multivariate normals
Returns
-------
pointses : list of Points
sigma points for each element of momentses
'''
# stack everything together
means, covariances = zip(*momentses)
mu_aug = np.concatenate(means)
sigma_aug = linalg.block_diag(*covariances)
moments_aug = Moments(mu_aug, sigma_aug)
# turn augmented representation into sigma points
points_aug = moments2points(moments_aug)
# unstack everything
dims = [len(m) for m in means]
result = []
start = 0
for i in range(len(dims)):
end = start + dims[i]
part = SigmaPoints(
points_aug.points[:, start:end],
points_aug.weights_mean,
points_aug.weights_covariance
)
result.append(part)
start = end
# return
return result
def augmented_unscented_filter_points(mean_state, covariance_state,
covariance_transition,
covariance_observation):
"""Extract sigma points using augmented state representation
Primarily used as a pre-processing step before predicting and updating in
the Augmented UKF.
Parameters
----------
mean_state : [n_dim_state] array
mean of state at time t given observations from time steps 0...t
covariance_state : [n_dim_state, n_dim_state] array
covariance of state at time t given observations from time steps 0...t
covariance_transition : [n_dim_state, n_dim_state] array
covariance of zero-mean noise resulting from transitioning from time
step t to t+1
covariance_observation : [n_dim_obs, n_dim_obs] array
covariance of zero-mean noise resulting from observation state at time
t+1
Returns
-------
points_state : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
sigma points for state at time t
points_transition : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
sigma points for transition noise between time t and t+1
points_observation : [2 * n_dim_state + 1, n_dim_obs] SigmaPoints
sigma points for observation noise at time step t+1
"""
# get sizes of dimensions
n_dim_state = covariance_state.shape[0]
n_dim_obs = covariance_observation.shape[0]
# extract sigma points using augmented representation
state_moments = Moments(mean_state, covariance_state)
transition_noise_moments = (
Moments(np.zeros(n_dim_state), covariance_transition)
)
observation_noise_moments = (
Moments(np.zeros(n_dim_obs), covariance_observation)
)
(points_state, points_transition, points_observation) = (
augmented_points([
state_moments,
transition_noise_moments,
observation_noise_moments
])
)
return (points_state, points_transition, points_observation)
def unscented_filter_predict(transition_function, points_state,
points_transition=None,
sigma_transition=None):
"""Predict next state distribution
Using the sigma points representing the state at time t given observations
from time steps 0...t, calculate the predicted mean, covariance, and sigma
points for the state at time t+1.
Parameters
----------
transition_function : function
function describing how the state changes between times t and t+1
points_state : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to the state at time step t given
observations from time steps 0...t
points_transition : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to the noise in transitioning from time step
t to t+1, if available. If not, assumes that noise is additive
sigma_transition : [n_dim_state, n_dim_state] array
covariance corresponding to additive noise in transitioning from time
step t to t+1, if available. If not, assumes noise is not additive.
Returns
-------
points_pred : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to state at time step t+1 given observations
from time steps 0...t. These points have not been "standardized" by the
unscented transform yet.
moments_pred : [n_dim_state] Moments
mean and covariance corresponding to time step t+1 given observations
from time steps 0...t
"""
assert points_transition is not None or sigma_transition is not None, \
"Your system is noiseless? really?"
(points_pred, moments_pred) = (
unscented_transform(
points_state, transition_function,
points_noise=points_transition, sigma_noise=sigma_transition
)
)
return (points_pred, moments_pred)
def unscented_filter_correct(observation_function, moments_pred,
points_pred, observation,
points_observation=None,
sigma_observation=None):
"""Integrate new observation to correct state estimates
Parameters
----------
observation_function : function
function characterizing how the observation at time t+1 is generated
moments_pred : [n_dim_state] Moments
mean and covariance of state at time t+1 given observations from time
steps 0...t
points_pred : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to moments_pred
observation : [n_dim_state] array
observation at time t+1. If masked, treated as missing.
points_observation : [2*n_dim_state, n_dim_obs] SigmaPoints
sigma points corresponding to predicted observation at time t+1 given
observations from times 0...t, if available. If not, noise is assumed
to be additive.
sigma_observation : [n_dim_obs, n_dim_obs] array
covariance matrix corresponding to additive noise in observation at
time t+1, if available. If missing, noise is assumed to be non-linear.
Returns
-------
moments_filt : [n_dim_state] Moments
mean and covariance of state at time t+1 given observations from time
steps 0...t+1
"""
# Calculate E[z_t | z_{0:t-1}], Var(z_t | z_{0:t-1})
(obs_points_pred, obs_moments_pred) = (
unscented_transform(
points_pred, observation_function,
points_noise=points_observation, sigma_noise=sigma_observation
)
)
# Calculate Cov(x_t, z_t | z_{0:t-1})
sigma_pair = (
((points_pred.points - moments_pred.mean).T)
.dot(np.diag(points_pred.weights_mean))
.dot(obs_points_pred.points - obs_moments_pred.mean)
)
# Calculate E[x_t | z_{0:t}], Var(x_t | z_{0:t})
moments_filt = unscented_correct(sigma_pair, moments_pred, obs_moments_pred, observation)
return moments_filt
def augmented_unscented_filter(mu_0, sigma_0, f, g, Q, R, Z):
'''Apply the Unscented Kalman Filter with arbitrary noise
Parameters
----------
mu_0 : [n_dim_state] array
mean of initial state distribution
sigma_0 : [n_dim_state, n_dim_state] array
covariance of initial state distribution
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and the
process noise and outputs the next state.
g : function or [T] array of functions
observation function(s). Takes in the current state and outputs the
current observation.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
R : [n_dim_state, n_dim_state] array
observation covariance matrix
Returns
-------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times [0,
t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
'''
# extract size of key components
T = Z.shape[0]
n_dim_state = Q.shape[-1]
n_dim_obs = R.shape[-1]
# construct container for results
mu_filt = np.zeros((T, n_dim_state))
sigma_filt = np.zeros((T, n_dim_state, n_dim_state))
# TODO use _augumented_unscented_filter_update here
for t in range(T):
# Calculate sigma points for augmented state:
# [actual state, transition noise, observation noise]
if t == 0:
mu, sigma = mu_0, sigma_0
else:
mu, sigma = mu_filt[t - 1], sigma_filt[t - 1]
# extract sigma points using augmented representation
(points_state, points_transition, points_observation) = (
augmented_unscented_filter_points(mu, sigma, Q, R)
)
# Calculate E[x_t | z_{0:t-1}], Var(x_t | z_{0:t-1}) and sigma points
# for P(x_t | z_{0:t-1})
if t == 0:
points_pred = points_state
moments_pred = points2moments(points_pred)
else:
transition_function = _last_dims(f, t - 1, ndims=1)[0]
(points_pred, moments_pred) = (
unscented_filter_predict(
transition_function, points_state,
points_transition=points_transition
)
)
# Calculate E[z_t | z_{0:t-1}], Var(z_t | z_{0:t-1})
observation_function = _last_dims(g, t, ndims=1)[0]
mu_filt[t], sigma_filt[t] = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
Z[t], points_observation=points_observation
)
)
return (mu_filt, sigma_filt)
def augmented_unscented_smoother(mu_filt, sigma_filt, f, Q):
'''Apply the Unscented Kalman Smoother with arbitrary noise
Parameters
----------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times
[0, t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and the
process noise and outputs the next state.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
Returns
-------
mu_smooth : [T, n_dim_state] array
mu_smooth[t] = mean of state at time t given observations from times
[0, T-1]
sigma_smooth : [T, n_dim_state, n_dim_state] array
sigma_smooth[t] = covariance of state at time t given observations from
times [0, T-1]
'''
# extract size of key parts of problem
T, n_dim_state = mu_filt.shape
# instantiate containers for results
mu_smooth = np.zeros(mu_filt.shape)
sigma_smooth = np.zeros(sigma_filt.shape)
mu_smooth[-1], sigma_smooth[-1] = mu_filt[-1], sigma_filt[-1]
for t in reversed(range(T - 1)):
# get sigma points for [state, transition noise]
mu = mu_filt[t]
sigma = sigma_filt[t]
moments_state = Moments(mu, sigma)
moments_transition_noise = Moments(np.zeros(n_dim_state), Q)
(points_state, points_transition) = (
augmented_points([moments_state, moments_transition_noise])
)
# compute E[x_{t+1} | z_{0:t}], Var(x_{t+1} | z_{0:t})
f_t = _last_dims(f, t, ndims=1)[0]
(points_pred, moments_pred) = unscented_transform(
points_state, f_t, points_noise=points_transition
)
# Calculate Cov(x_{t+1}, x_t | z_{0:t-1})
sigma_pair = (
(points_pred.points - moments_pred.mean).T
.dot(np.diag(points_pred.weights_covariance))
.dot(points_state.points - moments_state.mean).T
)
# compute smoothed mean, covariance
smoother_gain = sigma_pair.dot(linalg.pinv(moments_pred.covariance))
mu_smooth[t] = (
mu_filt[t]
+ smoother_gain
.dot(mu_smooth[t + 1] - moments_pred.mean)
)
sigma_smooth[t] = (
sigma_filt[t]
+ smoother_gain
.dot(sigma_smooth[t + 1] - moments_pred.covariance)
.dot(smoother_gain.T)
)
return (mu_smooth, sigma_smooth)
def additive_unscented_filter(mu_0, sigma_0, f, g, Q, R, Z):
'''Apply the Unscented Kalman Filter with additive noise
Parameters
----------
mu_0 : [n_dim_state] array
mean of initial state distribution
sigma_0 : [n_dim_state, n_dim_state] array
covariance of initial state distribution
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and outputs
the next.
g : function or [T] array of functions
observation function(s). Takes in the current state and outputs the
current observation.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
R : [n_dim_state, n_dim_state] array
observation covariance matrix
Returns
-------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times [0,
t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
'''
# extract size of key components
T = Z.shape[0]
n_dim_state = Q.shape[-1]
n_dim_obs = R.shape[-1]
# construct container for results
mu_filt = np.zeros((T, n_dim_state))
sigma_filt = np.zeros((T, n_dim_state, n_dim_state))
for t in range(T):
# Calculate sigma points for P(x_{t-1} | z_{0:t-1})
if t == 0:
mu, sigma = mu_0, sigma_0
else:
mu, sigma = mu_filt[t - 1], sigma_filt[t - 1]
points_state = moments2points(Moments(mu, sigma))
# Calculate E[x_t | z_{0:t-1}], Var(x_t | z_{0:t-1})
if t == 0:
points_pred = points_state
moments_pred = points2moments(points_pred)
else:
transition_function = _last_dims(f, t - 1, ndims=1)[0]
(_, moments_pred) = (
unscented_filter_predict(
transition_function, points_state, sigma_transition=Q
)
)
points_pred = moments2points(moments_pred)
# Calculate E[x_t | z_{0:t}], Var(x_t | z_{0:t})
observation_function = _last_dims(g, t, ndims=1)[0]
mu_filt[t], sigma_filt[t] = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
Z[t], sigma_observation=R
)
)
return (mu_filt, sigma_filt)
def additive_unscented_smoother(mu_filt, sigma_filt, f, Q):
'''Apply the Unscented Kalman Filter assuming additiven noise
Parameters
----------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times
[0, t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and outputs
the next.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
Returns
-------
mu_smooth : [T, n_dim_state] array
mu_smooth[t] = mean of state at time t given observations from times
[0, T-1]
sigma_smooth : [T, n_dim_state, n_dim_state] array
sigma_smooth[t] = covariance of state at time t given observations from
times [0, T-1]
'''
# extract size of key parts of problem
T, n_dim_state = mu_filt.shape
# instantiate containers for results
mu_smooth = np.zeros(mu_filt.shape)
sigma_smooth = np.zeros(sigma_filt.shape)
mu_smooth[-1], sigma_smooth[-1] = mu_filt[-1], sigma_filt[-1]
for t in reversed(range(T - 1)):
# get sigma points for state
mu = mu_filt[t]
sigma = sigma_filt[t]
moments_state = Moments(mu, sigma)
points_state = moments2points(moments_state)
# compute E[x_{t+1} | z_{0:t}], Var(x_{t+1} | z_{0:t})
f_t = _last_dims(f, t, ndims=1)[0]
(points_pred, moments_pred) = (
unscented_transform(points_state, f_t, sigma_noise=Q)
)
# Calculate Cov(x_{t+1}, x_t | z_{0:t-1})
sigma_pair = (
(points_pred.points - moments_pred.mean).T
.dot(np.diag(points_pred.weights_covariance))
.dot(points_state.points - moments_state.mean).T
)
# compute smoothed mean, covariance
smoother_gain = sigma_pair.dot(linalg.pinv(moments_pred.covariance))
mu_smooth[t] = (
mu_filt[t]
+ smoother_gain
.dot(mu_smooth[t + 1] - moments_pred.mean)
)
sigma_smooth[t] = (
sigma_filt[t]
+ smoother_gain
.dot(sigma_smooth[t + 1] - moments_pred.covariance)
.dot(smoother_gain.T)
)
return (mu_smooth, sigma_smooth)
class UnscentedMixin(object):
"""Methods shared by all Unscented Kalman Filter implementations."""
def __init__(self, transition_functions=None, observation_functions=None,
transition_covariance=None, observation_covariance=None,
initial_state_mean=None, initial_state_covariance=None,
n_dim_state=None, n_dim_obs=None, random_state=None):
# determine size of state and observation space
n_dim_state = _determine_dimensionality(
[(transition_covariance, array2d, -2),
(initial_state_covariance, array2d, -2),
(initial_state_mean, array1d, -1)],
n_dim_state
)
n_dim_obs = _determine_dimensionality(
[(observation_covariance, array2d, -2)],
n_dim_obs
)
# set parameters
self.transition_functions = transition_functions
self.observation_functions = observation_functions
self.transition_covariance = transition_covariance
self.observation_covariance = observation_covariance
self.initial_state_mean = initial_state_mean
self.initial_state_covariance = initial_state_covariance
self.n_dim_state = n_dim_state
self.n_dim_obs = n_dim_obs
self.random_state = random_state
def _initialize_parameters(self):
"""Retrieve parameters if they exist, else replace with defaults"""
arguments = get_params(self)
defaults = self._default_parameters()
converters = self._converters()
processed = preprocess_arguments([arguments, defaults], converters)
return (
processed['transition_functions'],
processed['observation_functions'],
processed['transition_covariance'],
processed['observation_covariance'],
processed['initial_state_mean'],
processed['initial_state_covariance']
)
def _parse_observations(self, obs):
"""Safely convert observations to their expected format"""
obs = ma.atleast_2d(obs)
if obs.shape[0] == 1 and obs.shape[1] > 1:
obs = obs.T
return obs
def _converters(self):
return {
'transition_functions': array1d,
'observation_functions': array1d,
'transition_covariance': array2d,
'observation_covariance': array2d,
'initial_state_mean': array1d,
'initial_state_covariance': array2d,
'n_dim_state': int,
'n_dim_obs': int,
'random_state': check_random_state,
}
class UnscentedKalmanFilter(UnscentedMixin):
r'''Implements the General (aka Augmented) Unscented Kalman Filter governed
by the following equations,
.. math::
x_0 &\sim \text{Normal}(\mu_0, \Sigma_0) \\
x_{t+1} &= f_t(x_t, \text{Normal}(0, Q)) \\
z_{t} &= g_t(x_t, \text{Normal}(0, R))
Notice that although the input noise to the state transition equation and
the observation equation are both normally distributed, any non-linear
transformation may be applied afterwards. This allows for greater
generality, but at the expense of computational complexity. The complexity
of :class:`UnscentedKalmanFilter.filter()` is :math:`O(T(2n+m)^3)`
where :math:`T` is the number of time steps, :math:`n` is the size of the
state space, and :math:`m` is the size of the observation space.
If your noise is simply additive, consider using the
:class:`AdditiveUnscentedKalmanFilter`
Parameters
----------
transition_functions : function or [n_timesteps-1] array of functions
transition_functions[t] is a function of the state and the transition
noise at time t and produces the state at time t+1. Also known as
:math:`f_t`.
observation_functions : function or [n_timesteps] array of functions
observation_functions[t] is a function of the state and the observation
noise at time t and produces the observation at time t. Also known as
:math:`g_t`.
transition_covariance : [n_dim_state, n_dim_state] array
transition noise covariance matrix. Also known as :math:`Q`.
observation_covariance : [n_dim_obs, n_dim_obs] array
observation noise covariance matrix. Also known as :math:`R`.
initial_state_mean : [n_dim_state] array
mean of initial state distribution. Also known as :math:`\mu_0`
initial_state_covariance : [n_dim_state, n_dim_state] array
covariance of initial state distribution. Also known as
:math:`\Sigma_0`
n_dim_state: optional, integer
the dimensionality of the state space. Only meaningful when you do not
specify initial values for `transition_covariance`, or
`initial_state_mean`, `initial_state_covariance`.
n_dim_obs: optional, integer
the dimensionality of the observation space. Only meaningful when you
do not specify initial values for `observation_covariance`.
random_state : optional, int or RandomState
seed for random sample generation
'''
def sample(self, n_timesteps, initial_state=None, random_state=None):
'''Sample from model defined by the Unscented Kalman Filter
Parameters
----------
n_timesteps : int
number of time steps
initial_state : optional, [n_dim_state] array
initial state. If unspecified, will be sampled from initial state
distribution.
random_state : optional, int or Random
random number generator
'''
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
n_dim_state = transition_covariance.shape[-1]
n_dim_obs = observation_covariance.shape[-1]
# logic for instantiating rng
if random_state is None:
rng = check_random_state(self.random_state)
else:
rng = check_random_state(random_state)
# logic for selecting initial state
if initial_state is None:
initial_state = rng.multivariate_normal(
initial_state_mean, initial_state_covariance
)
# logic for generating samples
x = np.zeros((n_timesteps, n_dim_state))
z = np.zeros((n_timesteps, n_dim_obs))
for t in range(n_timesteps):
if t == 0:
x[0] = initial_state
else:
transition_function = (
_last_dims(transition_functions, t - 1, ndims=1)[0]
)
transition_noise = (
rng.multivariate_normal(
np.zeros(n_dim_state),
transition_covariance.newbyteorder('=')
)
)
x[t] = transition_function(x[t - 1], transition_noise)
observation_function = (
_last_dims(observation_functions, t, ndims=1)[0]
)
observation_noise = (
rng.multivariate_normal(
np.zeros(n_dim_obs),
observation_covariance.newbyteorder('=')
)
)
z[t] = observation_function(x[t], observation_noise)
return (x, ma.asarray(z))
def filter(self, Z):
'''Run Unscented Kalman Filter
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
filtered_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, t]
filtered_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, t]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = (
augmented_unscented_filter(
initial_state_mean, initial_state_covariance,
transition_functions, observation_functions,
transition_covariance, observation_covariance,
Z
)
)
return (filtered_state_means, filtered_state_covariances)
def filter_update(self,
filtered_state_mean, filtered_state_covariance,
observation=None,
transition_function=None, transition_covariance=None,
observation_function=None, observation_covariance=None):
r"""Update a Kalman Filter state estimate
Perform a one-step update to estimate the state at time :math:`t+1`
give an observation at time :math:`t+1` and the previous estimate for
time :math:`t` given observations from times :math:`[0...t]`. This
method is useful if one wants to track an object with streaming
observations.
Parameters
----------
filtered_state_mean : [n_dim_state] array
mean estimate for state at time t given observations from times
[1...t]
filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t given observations from
times [1...t]
observation : [n_dim_obs] array or None
observation from time t+1. If `observation` is a masked array and
any of `observation`'s components are masked or if `observation` is
None, then `observation` will be treated as a missing observation.
transition_function : optional, function
state transition function from time t to t+1. If unspecified,
`self.transition_functions` will be used.
transition_covariance : optional, [n_dim_state, n_dim_state] array
state transition covariance from time t to t+1. If unspecified,
`self.transition_covariance` will be used.
observation_function : optional, function
observation function at time t+1. If unspecified,
`self.observation_functions` will be used.
observation_covariance : optional, [n_dim_obs, n_dim_obs] array
observation covariance at time t+1. If unspecified,
`self.observation_covariance` will be used.
Returns
-------
next_filtered_state_mean : [n_dim_state] array
mean estimate for state at time t+1 given observations from times
[1...t+1]
next_filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t+1 given observations
from times [1...t+1]
"""
# initialize parameters
(transition_functions, observation_functions,
transition_cov, observation_cov,
_, _) = (
self._initialize_parameters()
)
def default_function(f, arr):
if f is None:
assert len(arr) == 1
f = arr[0]
return f
transition_function = default_function(
transition_function, transition_functions
)
observation_function = default_function(
observation_function, observation_functions
)
transition_covariance = _arg_or_default(
transition_covariance, transition_cov,
2, "transition_covariance"
)
observation_covariance = _arg_or_default(
observation_covariance, observation_cov,
2, "observation_covariance"
)
# Make a masked observation if necessary
if observation is None:
n_dim_obs = observation_covariance.shape[0]
observation = np.ma.array(np.zeros(n_dim_obs))
observation.mask = True
else:
observation = np.ma.asarray(observation)
# make sigma points
(points_state, points_transition, points_observation) = (
augmented_unscented_filter_points(
filtered_state_mean, filtered_state_covariance,
transition_covariance, observation_covariance
)
)
# predict
(points_pred, moments_pred) = (
unscented_filter_predict(
transition_function, points_state, points_transition
)
)
# correct
next_filtered_state_mean, next_filtered_state_covariance = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
observation, points_observation=points_observation
)
)
return (next_filtered_state_mean, next_filtered_state_covariance)
def smooth(self, Z):
'''Run Unscented Kalman Smoother
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
smoothed_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, n_timesteps-1]
smoothed_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, n_timesteps-1]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = self.filter(Z)
(smoothed_state_means, smoothed_state_covariances) = (
augmented_unscented_smoother(
filtered_state_means, filtered_state_covariances,
transition_functions, transition_covariance
)
)
return (smoothed_state_means, smoothed_state_covariances)
def _default_parameters(self):
return {
'transition_functions': lambda state, noise: state + noise,
'observation_functions': lambda state, noise: state + noise,
'transition_covariance': np.eye(self.n_dim_state),
'observation_covariance': np.eye(self.n_dim_obs),
'initial_state_mean': np.zeros(self.n_dim_state),
'initial_state_covariance': np.eye(self.n_dim_state),
'random_state': 0,
}
class AdditiveUnscentedKalmanFilter(UnscentedMixin):
r'''Implements the Unscented Kalman Filter with additive noise.
Observations are assumed to be generated from the following process,
.. math::
x_0 &\sim \text{Normal}(\mu_0, \Sigma_0) \\
x_{t+1} &= f_t(x_t) + \text{Normal}(0, Q) \\
z_{t} &= g_t(x_t) + \text{Normal}(0, R)
While less general the general-noise Unscented Kalman Filter, the Additive
version is more computationally efficient with complexity :math:`O(Tn^3)`
where :math:`T` is the number of time steps and :math:`n` is the size of
the state space.
Parameters
----------
transition_functions : function or [n_timesteps-1] array of functions
transition_functions[t] is a function of the state at time t and
produces the state at time t+1. Also known as :math:`f_t`.
observation_functions : function or [n_timesteps] array of functions
observation_functions[t] is a function of the state at time t and
produces the observation at time t. Also known as :math:`g_t`.
transition_covariance : [n_dim_state, n_dim_state] array
transition noise covariance matrix. Also known as :math:`Q`.
observation_covariance : [n_dim_obs, n_dim_obs] array
observation noise covariance matrix. Also known as :math:`R`.
initial_state_mean : [n_dim_state] array
mean of initial state distribution. Also known as :math:`\mu_0`.
initial_state_covariance : [n_dim_state, n_dim_state] array
covariance of initial state distribution. Also known as
:math:`\Sigma_0`.
n_dim_state: optional, integer
the dimensionality of the state space. Only meaningful when you do not
specify initial values for `transition_covariance`, or
`initial_state_mean`, `initial_state_covariance`.
n_dim_obs: optional, integer
the dimensionality of the observation space. Only meaningful when you
do not specify initial values for `observation_covariance`.
random_state : optional, int or RandomState
seed for random sample generation
'''
def sample(self, n_timesteps, initial_state=None, random_state=None):
'''Sample from model defined by the Unscented Kalman Filter
Parameters
----------
n_timesteps : int
number of time steps
initial_state : optional, [n_dim_state] array
initial state. If unspecified, will be sampled from initial state
distribution.
'''
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
n_dim_state = transition_covariance.shape[-1]
n_dim_obs = observation_covariance.shape[-1]
# logic for instantiating rng
if random_state is None:
rng = check_random_state(self.random_state)
else:
rng = check_random_state(random_state)
# logic for selecting initial state
if initial_state is None:
initial_state = (
rng.multivariate_normal(
initial_state_mean,
initial_state_covariance
)
)
# logic for generating samples
x = np.zeros((n_timesteps, n_dim_state))
z = np.zeros((n_timesteps, n_dim_obs))
for t in range(n_timesteps):
if t == 0:
x[0] = initial_state
else:
transition_function = (
_last_dims(transition_functions, t - 1, ndims=1)[0]
)
transition_noise = (
rng.multivariate_normal(
np.zeros(n_dim_state),
transition_covariance.newbyteorder('=')
)
)
x[t] = transition_function(x[t - 1]) + transition_noise
observation_function = (
_last_dims(observation_functions, t, ndims=1)[0]
)
observation_noise = (
rng.multivariate_normal(
np.zeros(n_dim_obs),
observation_covariance.newbyteorder('=')
)
)
z[t] = observation_function(x[t]) + observation_noise
return (x, ma.asarray(z))
def filter(self, Z):
'''Run Unscented Kalman Filter
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
filtered_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, t]
filtered_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, t]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = (
additive_unscented_filter(
initial_state_mean, initial_state_covariance,
transition_functions, observation_functions,
transition_covariance, observation_covariance,
Z
)
)
return (filtered_state_means, filtered_state_covariances)
def filter_update(self,
filtered_state_mean, filtered_state_covariance,
observation=None,
transition_function=None, transition_covariance=None,
observation_function=None, observation_covariance=None):
r"""Update a Kalman Filter state estimate
Perform a one-step update to estimate the state at time :math:`t+1`
give an observation at time :math:`t+1` and the previous estimate for
time :math:`t` given observations from times :math:`[0...t]`. This
method is useful if one wants to track an object with streaming
observations.
Parameters
----------
filtered_state_mean : [n_dim_state] array
mean estimate for state at time t given observations from times
[1...t]
filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t given observations from
times [1...t]
observation : [n_dim_obs] array or None
observation from time t+1. If `observation` is a masked array and
any of `observation`'s components are masked or if `observation` is
None, then `observation` will be treated as a missing observation.
transition_function : optional, function
state transition function from time t to t+1. If unspecified,
`self.transition_functions` will be used.
transition_covariance : optional, [n_dim_state, n_dim_state] array
state transition covariance from time t to t+1. If unspecified,
`self.transition_covariance` will be used.
observation_function : optional, function
observation function at time t+1. If unspecified,
`self.observation_functions` will be used.
observation_covariance : optional, [n_dim_obs, n_dim_obs] array
observation covariance at time t+1. If unspecified,
`self.observation_covariance` will be used.
Returns
-------
next_filtered_state_mean : [n_dim_state] array
mean estimate for state at time t+1 given observations from times
[1...t+1]
next_filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t+1 given observations
from times [1...t+1]
"""
# initialize parameters
(transition_functions, observation_functions,
transition_cov, observation_cov,
_, _) = (
self._initialize_parameters()
)
def default_function(f, arr):
if f is None:
assert len(arr) == 1
f = arr[0]
return f
transition_function = default_function(
transition_function, transition_functions
)
observation_function = default_function(
observation_function, observation_functions
)
transition_covariance = _arg_or_default(
transition_covariance, transition_cov,
2, "transition_covariance"
)
observation_covariance = _arg_or_default(
observation_covariance, observation_cov,
2, "observation_covariance"
)
# Make a masked observation if necessary
if observation is None:
n_dim_obs = observation_covariance.shape[0]
observation = np.ma.array(np.zeros(n_dim_obs))
observation.mask = True
else:
observation = np.ma.asarray(observation)
# make sigma points
moments_state = Moments(filtered_state_mean, filtered_state_covariance)
points_state = moments2points(moments_state)
# predict
(_, moments_pred) = (
unscented_filter_predict(
transition_function, points_state,
sigma_transition=transition_covariance
)
)
points_pred = moments2points(moments_pred)
# correct
(next_filtered_state_mean, next_filtered_state_covariance) = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
observation, sigma_observation=observation_covariance
)
)
return (next_filtered_state_mean, next_filtered_state_covariance)
def smooth(self, Z):
'''Run Unscented Kalman Smoother
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
smoothed_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, n_timesteps-1]
smoothed_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, n_timesteps-1]
'''
Z = ma.asarray(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = self.filter(Z)
(smoothed_state_means, smoothed_state_covariances) = (
additive_unscented_smoother(
filtered_state_means, filtered_state_covariances,
transition_functions, transition_covariance
)
)
return (smoothed_state_means, smoothed_state_covariances)
def _default_parameters(self):
return {
'transition_functions': lambda state: state,
'observation_functions': lambda state: state,
'transition_covariance': np.eye(self.n_dim_state),
'observation_covariance': np.eye(self.n_dim_obs),
'initial_state_mean': np.zeros(self.n_dim_state),
'initial_state_covariance': np.eye(self.n_dim_state),
'random_state': 0,
}
|
[
"numpy.diag",
"numpy.concatenate",
"numpy.copy",
"numpy.eye",
"scipy.linalg.block_diag",
"scipy.linalg.cholesky",
"numpy.zeros",
"numpy.ones",
"numpy.ma.atleast_2d",
"numpy.tile",
"collections.namedtuple",
"numpy.ma.getmask",
"numpy.ma.asarray",
"scipy.linalg.pinv",
"numpy.vstack",
"numpy.sqrt"
] |
[((681, 756), 'collections.namedtuple', 'namedtuple', (['"""SigmaPoints"""', "['points', 'weights_mean', 'weights_covariance']"], {}), "('SigmaPoints', ['points', 'weights_mean', 'weights_covariance'])\n", (691, 756), False, 'from collections import namedtuple\n'), ((850, 895), 'collections.namedtuple', 'namedtuple', (['"""Moments"""', "['mean', 'covariance']"], {}), "('Moments', ['mean', 'covariance'])\n", (860, 895), False, 'from collections import namedtuple\n'), ((2959, 2992), 'numpy.tile', 'np.tile', (['mu.T', '(1, 2 * n_dim + 1)'], {}), '(mu.T, (1, 2 * n_dim + 1))\n', (2966, 2992), True, 'import numpy as np\n'), ((3140, 3162), 'numpy.ones', 'np.ones', (['(2 * n_dim + 1)'], {}), '(2 * n_dim + 1)\n', (3147, 3162), True, 'import numpy as np\n'), ((3244, 3265), 'numpy.copy', 'np.copy', (['weights_mean'], {}), '(weights_mean)\n', (3251, 3265), True, 'import numpy as np\n'), ((5065, 5087), 'numpy.vstack', 'np.vstack', (['points_pred'], {}), '(points_pred)\n', (5074, 5087), True, 'import numpy as np\n'), ((7140, 7161), 'numpy.concatenate', 'np.concatenate', (['means'], {}), '(means)\n', (7154, 7161), True, 'import numpy as np\n'), ((7178, 7209), 'scipy.linalg.block_diag', 'linalg.block_diag', (['*covariances'], {}), '(*covariances)\n', (7195, 7209), False, 'from scipy import linalg\n'), ((15128, 15154), 'numpy.zeros', 'np.zeros', (['(T, n_dim_state)'], {}), '((T, n_dim_state))\n', (15136, 15154), True, 'import numpy as np\n'), ((15172, 15211), 'numpy.zeros', 'np.zeros', (['(T, n_dim_state, n_dim_state)'], {}), '((T, n_dim_state, n_dim_state))\n', (15180, 15211), True, 'import numpy as np\n'), ((17803, 17826), 'numpy.zeros', 'np.zeros', (['mu_filt.shape'], {}), '(mu_filt.shape)\n', (17811, 17826), True, 'import numpy as np\n'), ((17846, 17872), 'numpy.zeros', 'np.zeros', (['sigma_filt.shape'], {}), '(sigma_filt.shape)\n', (17854, 17872), True, 'import numpy as np\n'), ((20574, 20600), 'numpy.zeros', 'np.zeros', (['(T, n_dim_state)'], {}), '((T, n_dim_state))\n', (20582, 20600), True, 'import numpy as np\n'), ((20618, 20657), 'numpy.zeros', 'np.zeros', (['(T, n_dim_state, n_dim_state)'], {}), '((T, n_dim_state, n_dim_state))\n', (20626, 20657), True, 'import numpy as np\n'), ((22901, 22924), 'numpy.zeros', 'np.zeros', (['mu_filt.shape'], {}), '(mu_filt.shape)\n', (22909, 22924), True, 'import numpy as np\n'), ((22944, 22970), 'numpy.zeros', 'np.zeros', (['sigma_filt.shape'], {}), '(sigma_filt.shape)\n', (22952, 22970), True, 'import numpy as np\n'), ((2597, 2619), 'scipy.linalg.cholesky', 'linalg.cholesky', (['sigma'], {}), '(sigma)\n', (2612, 2619), False, 'from scipy import linalg\n'), ((3034, 3044), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (3041, 3044), True, 'import numpy as np\n'), ((3085, 3095), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (3092, 3095), True, 'import numpy as np\n'), ((9389, 9410), 'numpy.zeros', 'np.zeros', (['n_dim_state'], {}), '(n_dim_state)\n', (9397, 9410), True, 'import numpy as np\n'), ((9491, 9510), 'numpy.zeros', 'np.zeros', (['n_dim_obs'], {}), '(n_dim_obs)\n', (9499, 9510), True, 'import numpy as np\n'), ((26300, 26318), 'numpy.ma.atleast_2d', 'ma.atleast_2d', (['obs'], {}), '(obs)\n', (26313, 26318), False, 'from numpy import ma\n'), ((30672, 30708), 'numpy.zeros', 'np.zeros', (['(n_timesteps, n_dim_state)'], {}), '((n_timesteps, n_dim_state))\n', (30680, 30708), True, 'import numpy as np\n'), ((30721, 30755), 'numpy.zeros', 'np.zeros', (['(n_timesteps, n_dim_obs)'], {}), '((n_timesteps, n_dim_obs))\n', (30729, 30755), True, 'import numpy as np\n'), ((42987, 43023), 'numpy.zeros', 'np.zeros', (['(n_timesteps, n_dim_state)'], {}), '((n_timesteps, n_dim_state))\n', (42995, 43023), True, 'import numpy as np\n'), ((43036, 43070), 'numpy.zeros', 'np.zeros', (['(n_timesteps, n_dim_obs)'], {}), '((n_timesteps, n_dim_obs))\n', (43044, 43070), True, 'import numpy as np\n'), ((50716, 50729), 'numpy.ma.asarray', 'ma.asarray', (['Z'], {}), '(Z)\n', (50726, 50729), False, 'from numpy import ma\n'), ((6346, 6359), 'numpy.ma.getmask', 'ma.getmask', (['z'], {}), '(z)\n', (6356, 6359), False, 'from numpy import ma\n'), ((6422, 6449), 'scipy.linalg.pinv', 'linalg.pinv', (['obs_sigma_pred'], {}), '(obs_sigma_pred)\n', (6433, 6449), False, 'from scipy import linalg\n'), ((18175, 18196), 'numpy.zeros', 'np.zeros', (['n_dim_state'], {}), '(n_dim_state)\n', (18183, 18196), True, 'import numpy as np\n'), ((18909, 18945), 'scipy.linalg.pinv', 'linalg.pinv', (['moments_pred.covariance'], {}), '(moments_pred.covariance)\n', (18920, 18945), False, 'from scipy import linalg\n'), ((23828, 23864), 'scipy.linalg.pinv', 'linalg.pinv', (['moments_pred.covariance'], {}), '(moments_pred.covariance)\n', (23839, 23864), False, 'from scipy import linalg\n'), ((31716, 31729), 'numpy.ma.asarray', 'ma.asarray', (['z'], {}), '(z)\n', (31726, 31729), False, 'from numpy import ma\n'), ((36765, 36791), 'numpy.ma.asarray', 'np.ma.asarray', (['observation'], {}), '(observation)\n', (36778, 36791), True, 'import numpy as np\n'), ((39331, 39355), 'numpy.eye', 'np.eye', (['self.n_dim_state'], {}), '(self.n_dim_state)\n', (39337, 39355), True, 'import numpy as np\n'), ((39395, 39417), 'numpy.eye', 'np.eye', (['self.n_dim_obs'], {}), '(self.n_dim_obs)\n', (39401, 39417), True, 'import numpy as np\n'), ((39453, 39479), 'numpy.zeros', 'np.zeros', (['self.n_dim_state'], {}), '(self.n_dim_state)\n', (39461, 39479), True, 'import numpy as np\n'), ((39521, 39545), 'numpy.eye', 'np.eye', (['self.n_dim_state'], {}), '(self.n_dim_state)\n', (39527, 39545), True, 'import numpy as np\n'), ((44033, 44046), 'numpy.ma.asarray', 'ma.asarray', (['z'], {}), '(z)\n', (44043, 44046), False, 'from numpy import ma\n'), ((49081, 49107), 'numpy.ma.asarray', 'np.ma.asarray', (['observation'], {}), '(observation)\n', (49094, 49107), True, 'import numpy as np\n'), ((51555, 51579), 'numpy.eye', 'np.eye', (['self.n_dim_state'], {}), '(self.n_dim_state)\n', (51561, 51579), True, 'import numpy as np\n'), ((51619, 51641), 'numpy.eye', 'np.eye', (['self.n_dim_obs'], {}), '(self.n_dim_obs)\n', (51625, 51641), True, 'import numpy as np\n'), ((51677, 51703), 'numpy.zeros', 'np.zeros', (['self.n_dim_state'], {}), '(self.n_dim_state)\n', (51685, 51703), True, 'import numpy as np\n'), ((51745, 51769), 'numpy.eye', 'np.eye', (['self.n_dim_state'], {}), '(self.n_dim_state)\n', (51751, 51769), True, 'import numpy as np\n'), ((1548, 1570), 'numpy.diag', 'np.diag', (['weights_sigma'], {}), '(weights_sigma)\n', (1555, 1570), True, 'import numpy as np\n'), ((13572, 13605), 'numpy.diag', 'np.diag', (['points_pred.weights_mean'], {}), '(points_pred.weights_mean)\n', (13579, 13605), True, 'import numpy as np\n'), ((31517, 31536), 'numpy.zeros', 'np.zeros', (['n_dim_obs'], {}), '(n_dim_obs)\n', (31525, 31536), True, 'import numpy as np\n'), ((36668, 36687), 'numpy.zeros', 'np.zeros', (['n_dim_obs'], {}), '(n_dim_obs)\n', (36676, 36687), True, 'import numpy as np\n'), ((43833, 43852), 'numpy.zeros', 'np.zeros', (['n_dim_obs'], {}), '(n_dim_obs)\n', (43841, 43852), True, 'import numpy as np\n'), ((48984, 49003), 'numpy.zeros', 'np.zeros', (['n_dim_obs'], {}), '(n_dim_obs)\n', (48992, 49003), True, 'import numpy as np\n'), ((31107, 31128), 'numpy.zeros', 'np.zeros', (['n_dim_state'], {}), '(n_dim_state)\n', (31115, 31128), True, 'import numpy as np\n'), ((43422, 43443), 'numpy.zeros', 'np.zeros', (['n_dim_state'], {}), '(n_dim_state)\n', (43430, 43443), True, 'import numpy as np\n'), ((18713, 18752), 'numpy.diag', 'np.diag', (['points_pred.weights_covariance'], {}), '(points_pred.weights_covariance)\n', (18720, 18752), True, 'import numpy as np\n'), ((23632, 23671), 'numpy.diag', 'np.diag', (['points_pred.weights_covariance'], {}), '(points_pred.weights_covariance)\n', (23639, 23671), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""Setup file for EspressoDB
"""
from espressodb import __version__
__author__ = "@cchang5, @ckoerber"
from os import path
from setuptools import setup, find_packages
CWD = path.abspath(path.dirname(__file__))
with open(path.join(CWD, "README.md"), encoding="utf-8") as inp:
LONG_DESCRIPTION = inp.read()
with open(path.join(CWD, "requirements.txt"), encoding="utf-8") as inp:
REQUIREMENTS = [el.strip() for el in inp.read().split(",")]
with open(path.join(CWD, "requirements-dev.txt"), encoding="utf-8") as inp:
REQUIREMENTS_DEV = [el.strip() for el in inp.read().split(",")]
setup(
name="espressodb",
python_requires=">=3.6",
version=__version__,
description="Science database interface using Django as the content manager.",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://github.com/callat-qcd/espressodb",
project_urls={
"Bug Reports": "https://github.com/callat-qcd/espressodb/issues",
"Source": "https://github.com/callat-qcd/espressodb",
"Documentation": "https://espressodb.readthedocs.io",
},
author=__author__,
author_email="<EMAIL>",
keywords=["Database", "Workflow", "Django"],
packages=find_packages(exclude=["docs", "tests", "example"]),
install_requires=REQUIREMENTS,
entry_points={"console_scripts": ["espressodb=espressodb.manage:main"]},
extras_require={"dev": REQUIREMENTS_DEV},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Topic :: Database :: Database Engines/Servers",
"Topic :: Database",
"Topic :: Scientific/Engineering",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: BSD License",
],
include_package_data=True,
)
|
[
"os.path.dirname",
"os.path.join",
"setuptools.find_packages"
] |
[((215, 237), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (227, 237), False, 'from os import path\n'), ((250, 277), 'os.path.join', 'path.join', (['CWD', '"""README.md"""'], {}), "(CWD, 'README.md')\n", (259, 277), False, 'from os import path\n'), ((350, 384), 'os.path.join', 'path.join', (['CWD', '"""requirements.txt"""'], {}), "(CWD, 'requirements.txt')\n", (359, 384), False, 'from os import path\n'), ((487, 525), 'os.path.join', 'path.join', (['CWD', '"""requirements-dev.txt"""'], {}), "(CWD, 'requirements-dev.txt')\n", (496, 525), False, 'from os import path\n'), ((1269, 1320), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['docs', 'tests', 'example']"}), "(exclude=['docs', 'tests', 'example'])\n", (1282, 1320), False, 'from setuptools import setup, find_packages\n')]
|
from EasyParser import EasyParser
import re
import time
# FG1K2D-2 # diag hardware sysinfo interrupts
# CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7
# 0: 36 0 0 0 0 0 0 0 IO-APIC-edge timer
# 2: 0 0 0 0 0 0 0 0 XT-PIC-XT-PIC cascade
# 3: 0 3577171 0 0 0 0 0 0 IO-APIC-edge serial
# 4: 0 4688 0 0 0 0 0 0 IO-APIC-edge serial
# 8: 0 0 0 0 0 0 0 0 IO-APIC-edge rtc
# 16: 0 1832355 0 0 0 0 0 0 IO-APIC-fasteoi ehci_hcd:usb1, ehci_hcd:usb2, uhci_hcd:usb5, uhci_hcd:usb9, linux-kernel-bde, mgmt1
# 17: 0 0 3 0 0 0 0 0 IO-APIC-fasteoi uhci_hcd:usb3, uhci_hcd:usb6, mgmt2
# 18: 0 0 0 0 0 0 0 0 IO-APIC-fasteoi uhci_hcd:usb4, uhci_hcd:usb7
# 19: 0 0 0 0 0 0 0 0 IO-APIC-fasteoi uhci_hcd:usb8, net2280
# 64: 1 0 0 260298 0 0 0 0 PCI-MSI-edge ahci
# 65: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_0_vpn0
# 66: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_0_vpn1
# 67: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_0_vpn2
# 68: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_0_vpn3
# 69: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_0_kxp
# 70: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_1_vpn0
# 71: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_1_vpn1
# 72: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_1_vpn2
# 73: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_1_vpn3
# 74: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_1_kxp
# 75: 5 1 0 0 0 0 0 0 PCI-MSI-edge np6_0-tx-rx0
# 76: 0 1 5 0 0 0 0 0 PCI-MSI-edge np6_0-tx-rx1
# 77: 0 0 1 0 5 0 0 0 PCI-MSI-edge np6_0-tx-rx2
# 78: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_0-err0
# 79: 0 0 17 0 0 0 0 0 PCI-MSI-edge np6_0-nturbo-tx-rx0
# 80: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_0-nturbo-err0
# 81: 16418964 0 0 1 0 0 0 0 PCI-MSI-edge np6_0-nturbo-ips-0
# 82: 0 16141636 0 1 0 0 0 0 PCI-MSI-edge np6_0-nturbo-ips-1
# 83: 0 0 0 14991882 0 0 0 0 PCI-MSI-edge np6_0-nturbo-ips-2
# 84: 0 0 0 1 15879562 0 0 0 PCI-MSI-edge np6_0-nturbo-ips-3
# 85: 0 0 0 0 1 16707050 0 0 PCI-MSI-edge np6_0-nturbo-ips-4
# 86: 0 0 0 0 1 0 16444822 0 PCI-MSI-edge np6_0-nturbo-ips-5
# 87: 0 0 0 0 1 0 0 16581448 PCI-MSI-edge np6_0-nturbo-ips-6
# 88: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_0-nturbo-ips-7
# 89: 0 0 0 0 0 1 7 0 PCI-MSI-edge np6_0-tx-rx3
# 90: 5 0 0 0 0 1 0 0 PCI-MSI-edge np6_0-tx-rx4
# 91: 0 0 5 0 0 1 0 0 PCI-MSI-edge np6_0-tx-rx5
# 92: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_0-err1
# 93: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_0-nturbo-err1
# 94: 207221826 0 0 0 0 0 1 0 PCI-MSI-edge np6_1-tx-rx0
# 95: 0 0 200639569 0 0 0 1 0 PCI-MSI-edge np6_1-tx-rx1
# 96: 0 0 0 0 240962811 0 1 0 PCI-MSI-edge np6_1-tx-rx2
# 97: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_1-err0
# 98: 0 1 479259756 0 0 0 0 0 PCI-MSI-edge np6_1-nturbo-tx-rx0
# 99: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_1-nturbo-err0
# 100: 0 0 1 0 0 0 240663469 0 PCI-MSI-edge np6_1-tx-rx3
# 101: 210887756 0 1 0 0 0 0 0 PCI-MSI-edge np6_1-tx-rx4
# 102: 0 0 202674599 0 0 0 0 0 PCI-MSI-edge np6_1-tx-rx5
# 103: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_1-err1
# 104: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_1-nturbo-err1
# NMI: 451378 451332 451379 451331 451379 451330 451380 451329 Non-maskable interrupts
# LOC: 27025393 27025374 27025356 27025338 27025320 27025302 27025284 27025266 Local timer interrupts
# SPU: 0 0 0 0 0 0 0 0 Spurious interrupts
# PMI: 451378 451332 451379 451331 451379 451330 451380 451329 Performance monitoring interrupts
# IWI: 0 0 0 0 0 0 0 0 IRQ work interrupts
# RES: 54764029 23029410 66355685 21516202 64664597 18859876 69639605 20136217 Rescheduling interrupts
# CAL: 1227 1315 1304 287 1295 1290 1323 1325 Function call interrupts
# TLB: 350 792 1188 1324 712 547 831 507 TLB shootdowns
# ERR: 0
# MIS: 0
# FG1K2D-2 # fnsysctl cat /proc/softirqs
# CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7
# HI: 0 0 0 0 0 0 0 0
# TIMER: 28521064 28525832 28520649 28526326 28524819 28526243 28524655 28526254
# NET_TX: 994 57592 871 518 854 502 578 462
# NET_RX: 576621254 1990912 889144076 0 350281983 2 353098308 0
# BLOCK: 476 301 193 275534 181 396 98 313
# BLOCK_IOPOLL: 0 0 0 0 0 0 0 0
# TASKLET: 14128586 1943262 12439627 1942008 9747759 1944864 9735439 1961939
# SCHED: 9818324 13579287 11060339 13505914 10051866 12468454 9796770 12164434
# HRTIMER: 0 0 0 0 0 0 0 0
# RCU: 26288609 14045430 23576147 14059434 19574070 15025426 19446047 15275527
class ParserInterrupts(EasyParser):
def prepare(self):
self.re_cpus = re.compile("^\s+CPU.*?(\d+)\s+\n")
def get(self, soft=True, hard=True, description=None):
interrupts = {}
collected_on = None
cpus = None
desc_re = None
if description != None:
desc_re = re.compile(description)
if hard:
hw = self.get_real('hard')
interrupts.update(hw['interrupts'])
collected_on = hw['collected_on']
cpus = hw['cpus']
if soft:
sw = self.get_real('soft')
interrupts.update(sw['interrupts'])
collected_on = sw['collected_on']
cpus = sw['cpus']
if collected_on == None or cpus == None:
raise Exception('Either soft or hard interrupts must be selected')
# filter out not matching
for irq in interrupts.keys():
if desc_re == None or desc_re.search(interrupts[irq]['description']) != None: continue
del interrupts[irq]
return {
'collected_on': collected_on,
'cpus' : cpus,
'interrupts' : interrupts,
}
def get_real(self, source):
if source == 'hard':
interrupts = self.sshc.clever_exec("diagnose hardware sysinfo interrupts")
elif source == 'soft':
interrupts = self.sshc.clever_exec("fnsysctl cat /proc/softirqs")
else:
raise Exception('Interrupts can be either "hard" or "soft"')
command_time = time.time()
result = {}
# count cpus
g = self.re_cpus.search(interrupts)
if g == None: raise Exception("Cannot count CPUs")
cpus = int(g.group(1))+1
# parse lines with entry for each cpu
tmp = "^\s*(\S+):" + "\s*(\d+)"*cpus
if source == 'hard': tmp += "\s+(.*?)[\r]*$"
re_interrupt = re.compile(tmp, re.M)
for iline in re_interrupt.findall(interrupts):
if source == 'hard':
try: int(iline[0])
except ValueError: itype = 'other'
else: itype = 'numeric'
if itype == 'numeric':
tmp = iline[-1].split(None, 1)
trigger = tmp[0]
desc = tmp[1]
elif itype == 'other':
trigger = 'other'
desc = iline[-1]
elif source == 'soft':
itype = 'soft'
trigger = 'other'
if iline[0] == 'NET_RX':
desc = 'Incoming packets (NAPI)'
elif iline[0] == 'NET_TX':
desc = 'Outgoing packets (NAPI)'
elif iline[0] == 'HI':
desc = 'High priority tasklet'
elif iline[0] == 'TASKLET':
desc = 'Normal priority tasklet'
elif iline[0] == 'TIMER':
desc = 'Normal timer'
elif iline[0] == 'HRTIMER':
desc = 'High-resolution timer'
elif iline[0] == 'RCU':
desc = 'RCU locking'
elif iline[0] == 'SCHED':
desc = 'Scheduler'
elif iline[0] in ('BLOCK', 'BLOCK_IOPOLL'):
desc = 'Block device (disk)'
else:
desc = 'softirq'
ticks = {'total':0}
for i in range(cpus):
ticks[i] = int(iline[1+i])
ticks['total'] += ticks[i]
result[iline[0]] = {
'type' : itype,
'trigger' : trigger,
'description' : desc,
'ticks' : ticks,
'source' : source,
}
# parse lines with single cpu column
re_single = re.compile('^\s*(ERR|MIS):\s*(\d+)', re.M)
for single in re_single.findall(interrupts):
ticks = {'total': int(single[1])}
for i in range(cpus): ticks[i] = ticks['total']
result[single[0]] = {
'type' : 'single',
'trigger' : 'other',
'description' : 'unknown',
'ticks' : ticks,
'source' : source,
}
return {
'collected_on': command_time,
'cpus' : cpus,
'interrupts' : result,
}
|
[
"time.time",
"re.compile"
] |
[((9054, 9091), 're.compile', 're.compile', (['"""^\\\\s+CPU.*?(\\\\d+)\\\\s+\n"""'], {}), "('^\\\\s+CPU.*?(\\\\d+)\\\\s+\\n')\n", (9064, 9091), False, 'import re\n'), ((10299, 10310), 'time.time', 'time.time', ([], {}), '()\n', (10308, 10310), False, 'import time\n'), ((10603, 10624), 're.compile', 're.compile', (['tmp', 're.M'], {}), '(tmp, re.M)\n', (10613, 10624), False, 'import re\n'), ((11993, 12038), 're.compile', 're.compile', (['"""^\\\\s*(ERR|MIS):\\\\s*(\\\\d+)"""', 're.M'], {}), "('^\\\\s*(ERR|MIS):\\\\s*(\\\\d+)', re.M)\n", (12003, 12038), False, 'import re\n'), ((9272, 9295), 're.compile', 're.compile', (['description'], {}), '(description)\n', (9282, 9295), False, 'import re\n')]
|
# -*- coding: utf-8 -*-
"""1-Step Advantage Actor-Critic agent for episodic tasks in OpenAI Gym.
- Author: <NAME>
- Contact: <EMAIL>
"""
import argparse
from typing import Tuple
import gym
import numpy as np
import torch
import wandb
from rl_algorithms.common.abstract.agent import Agent
from rl_algorithms.common.helper_functions import numpy2floattensor
from rl_algorithms.registry import AGENTS, build_learner
from rl_algorithms.utils.config import ConfigDict
@AGENTS.register_module
class A2CAgent(Agent):
"""1-Step Advantage Actor-Critic interacting with environment.
Attributes:
env (gym.Env): openAI Gym environment
args (argparse.Namespace): arguments including hyperparameters and training settings
hyper_params (ConfigDict): hyper-parameters
network_cfg (ConfigDict): config of network for training agent
optim_cfg (ConfigDict): config of optimizer
state_dim (int): state size of env
action_dim (int): action size of env
actor (nn.Module): policy model to select actions
critic (nn.Module): critic model to evaluate states
actor_optim (Optimizer): optimizer for actor
critic_optim (Optimizer): optimizer for critic
episode_step (int): step number of the current episode
i_episode (int): current episode number
transition (list): recent transition information
"""
def __init__(
self,
env: gym.Env,
env_info: ConfigDict,
args: argparse.Namespace,
hyper_params: ConfigDict,
learner_cfg: ConfigDict,
log_cfg: ConfigDict,
):
"""Initialize."""
Agent.__init__(self, env, env_info, args, log_cfg)
self.transition: list = list()
self.episode_step = 0
self.i_episode = 0
self.hyper_params = hyper_params
self.learner_cfg = learner_cfg
self.learner_cfg.args = self.args
self.learner_cfg.env_info = self.env_info
self.learner_cfg.hyper_params = self.hyper_params
self.learner_cfg.log_cfg = self.log_cfg
self.learner = build_learner(self.learner_cfg)
def select_action(self, state: np.ndarray) -> torch.Tensor:
"""Select an action from the input space."""
state = numpy2floattensor(state, self.learner.device)
selected_action, dist = self.learner.actor(state)
if self.args.test:
selected_action = dist.mean
else:
predicted_value = self.learner.critic(state)
log_prob = dist.log_prob(selected_action).sum(dim=-1)
self.transition = []
self.transition.extend([log_prob, predicted_value])
return selected_action
def step(self, action: torch.Tensor) -> Tuple[np.ndarray, np.float64, bool, dict]:
"""Take an action and return the response of the env."""
action = action.detach().cpu().numpy()
next_state, reward, done, info = self.env.step(action)
if not self.args.test:
done_bool = done
if self.episode_step == self.args.max_episode_steps:
done_bool = False
self.transition.extend([next_state, reward, done_bool])
return next_state, reward, done, info
def write_log(self, log_value: tuple):
i, score, policy_loss, value_loss = log_value
total_loss = policy_loss + value_loss
print(
"[INFO] episode %d\tepisode step: %d\ttotal score: %d\n"
"total loss: %.4f\tpolicy loss: %.4f\tvalue loss: %.4f\n"
% (i, self.episode_step, score, total_loss, policy_loss, value_loss)
)
if self.args.log:
wandb.log(
{
"total loss": total_loss,
"policy loss": policy_loss,
"value loss": value_loss,
"score": score,
}
)
def train(self):
"""Train the agent."""
# logger
if self.args.log:
self.set_wandb()
# wandb.watch([self.actor, self.critic], log="parameters")
for self.i_episode in range(1, self.args.episode_num + 1):
state = self.env.reset()
done = False
score = 0
policy_loss_episode = list()
value_loss_episode = list()
self.episode_step = 0
while not done:
if self.args.render and self.i_episode >= self.args.render_after:
self.env.render()
action = self.select_action(state)
next_state, reward, done, _ = self.step(action)
self.episode_step += 1
policy_loss, value_loss = self.learner.update_model(self.transition)
policy_loss_episode.append(policy_loss)
value_loss_episode.append(value_loss)
state = next_state
score += reward
# logging
policy_loss = np.array(policy_loss_episode).mean()
value_loss = np.array(value_loss_episode).mean()
log_value = (self.i_episode, score, policy_loss, value_loss)
self.write_log(log_value)
if self.i_episode % self.args.save_period == 0:
self.learner.save_params(self.i_episode)
self.interim_test()
# termination
self.env.close()
self.learner.save_params(self.i_episode)
self.interim_test()
|
[
"wandb.log",
"rl_algorithms.common.abstract.agent.Agent.__init__",
"rl_algorithms.registry.build_learner",
"rl_algorithms.common.helper_functions.numpy2floattensor",
"numpy.array"
] |
[((1661, 1711), 'rl_algorithms.common.abstract.agent.Agent.__init__', 'Agent.__init__', (['self', 'env', 'env_info', 'args', 'log_cfg'], {}), '(self, env, env_info, args, log_cfg)\n', (1675, 1711), False, 'from rl_algorithms.common.abstract.agent import Agent\n'), ((2112, 2143), 'rl_algorithms.registry.build_learner', 'build_learner', (['self.learner_cfg'], {}), '(self.learner_cfg)\n', (2125, 2143), False, 'from rl_algorithms.registry import AGENTS, build_learner\n'), ((2278, 2323), 'rl_algorithms.common.helper_functions.numpy2floattensor', 'numpy2floattensor', (['state', 'self.learner.device'], {}), '(state, self.learner.device)\n', (2295, 2323), False, 'from rl_algorithms.common.helper_functions import numpy2floattensor\n'), ((3685, 3796), 'wandb.log', 'wandb.log', (["{'total loss': total_loss, 'policy loss': policy_loss, 'value loss':\n value_loss, 'score': score}"], {}), "({'total loss': total_loss, 'policy loss': policy_loss,\n 'value loss': value_loss, 'score': score})\n", (3694, 3796), False, 'import wandb\n'), ((5003, 5032), 'numpy.array', 'np.array', (['policy_loss_episode'], {}), '(policy_loss_episode)\n', (5011, 5032), True, 'import numpy as np\n'), ((5065, 5093), 'numpy.array', 'np.array', (['value_loss_episode'], {}), '(value_loss_episode)\n', (5073, 5093), True, 'import numpy as np\n')]
|
from typing import NamedTuple
from random import choice, randint, shuffle
__doc___ = '''
This module is used to generate the graph of a game map.
The graph is divided in partition in such way that to any two partitions, there
are at most one edge between this two.
This partitions are linked in a tree structure, internally it can be any
ordinary graph, but between two partitions there are only one possible walk.
To every edge that link two partitions `a` and `b`, it is considered locked, the
key is granted to be in a partition bigger then min(a, b), this way, the
navigation starting from the last partition can go through every vertex.
'''
class Raw(NamedTuple):
vertexes: set
edges: set
keys: set
initial: 'Vertex'
final: 'Vertex'
class Vertex(NamedTuple):
area: int
sub_area: int
@property
def identifier(self) -> str:
return f'{self.area}_{self.sub_area}'
class Edge(NamedTuple):
origin: 'Vertex'
destin: 'Vertex'
class Key(NamedTuple):
position: 'Vertex'
door: 'Edge'
def raw(size = 3, size_factor = 4) -> Raw:
if not size or size < 3:
size = 3
if not size_factor or size_factor < 4:
size_factor = 4
vertexes = [Vertex(0, 0)]
edges = []
keys = []
for area_id in range(1, size):
vertexes.append(Vertex(area_id, 0))
minimum_sub_size = size_factor//2+1
maximum_sub_size = size_factor*2-1
sub_size = randint(minimum_sub_size, maximum_sub_size)
for sub_area_id in range(1, sub_size):
new_vertex = Vertex(area_id, sub_area_id)
minimum_connection = 1
maximum_connection = min(sub_area_id, 3)
connection_amount = randint(minimum_connection, maximum_connection)
for connection_id in range(connection_amount):
edges.append(Edge(
new_vertex,
choice(tuple(v for v in vertexes if v.area == area_id))
))
vertexes.append(new_vertex)
for area_id in range(0, size-1):
previous = [area_id + 1, randint(min(area_id+1, size-1), size-1)]
shuffle(previous)
key_area, door_area = previous
new_edge = Edge(
choice(tuple(v for v in vertexes if v.area == door_area)),
choice(tuple(v for v in vertexes if v.area == area_id)),
)
new_key = Key(
choice(tuple(v for v in vertexes if v.area == key_area and v not in new_edge)),
new_edge,
)
edges.append(new_edge)
keys.append(new_key)
return Raw(
vertexes = set(vertexes),
edges = set(edges),
keys = set(keys),
initial = vertexes[-1],
final = vertexes[0])
|
[
"random.shuffle",
"random.randint"
] |
[((1455, 1498), 'random.randint', 'randint', (['minimum_sub_size', 'maximum_sub_size'], {}), '(minimum_sub_size, maximum_sub_size)\n', (1462, 1498), False, 'from random import choice, randint, shuffle\n'), ((2153, 2170), 'random.shuffle', 'shuffle', (['previous'], {}), '(previous)\n', (2160, 2170), False, 'from random import choice, randint, shuffle\n'), ((1720, 1767), 'random.randint', 'randint', (['minimum_connection', 'maximum_connection'], {}), '(minimum_connection, maximum_connection)\n', (1727, 1767), False, 'from random import choice, randint, shuffle\n')]
|
from setuptools import setup, find_packages
setup(name='unmix', version='1.0', packages=find_packages())
|
[
"setuptools.find_packages"
] |
[((89, 104), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (102, 104), False, 'from setuptools import setup, find_packages\n')]
|
# BenchExec is a framework for reliable benchmarking.
# This file is part of BenchExec.
#
# Copyright (C) <NAME>
#
# SPDX-License-Identifier: Apache-2.0
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import io
import json
import logging
import os
import requests
import shutil
import sys
import time
import zipfile
import benchexec.util
from benchexec.model import MEMLIMIT, TIMELIMIT, CORELIMIT
sys.dont_write_bytecode = True # prevent creation of .pyc files
REQUEST_URL = {
"create": "{0}{1}/execution/create",
"upload": "{0}{1}/upload/{2}?file={3}",
"launch": "{0}{1}/execution/{2}/launch?verifier={3}&verifierS3={4}&tasks={5}&tasksS3={6}&commands={7}",
"progress": "{0}{1}/execution/{2}/progress",
"results": "{0}{1}/execution/{2}/results",
"clean": "{0}{1}/clean",
}
DEFAULT_CLOUD_TIMELIMIT = 300 # s
DEFAULT_CLOUD_MEMLIMIT = None
DEFAULT_CLOUD_MEMORY_REQUIREMENT = 7000000000 # 7 GB
DEFAULT_CLOUD_CPUCORE_REQUIREMENT = 2 # one core with hyperthreading
DEFAULT_CLOUD_CPUMODEL_REQUIREMENT = "" # empty string matches every model
STOPPED_BY_INTERRUPT = False
def init(config, benchmark):
benchmark.executable = benchmark.tool.executable()
benchmark.tool_version = benchmark.tool.version(benchmark.executable)
logging.info("Using %s version %s.", benchmark.tool_name, benchmark.tool_version)
def get_system_info():
return None
def execute_benchmark(benchmark, output_handler):
(toolpaths, awsInput) = getAWSInput(benchmark)
with open(benchmark.config.aws_config, "r") as conf_file:
conf = json.load(conf_file)[0]
aws_endpoint = conf["Endpoint"]
aws_token = conf["UserToken"]
try:
logging.info("Building archive files for the verifier-tool and the tasks...")
verifier_arc_name = benchmark.tool_name + "_" + benchmark.instance + ".zip"
verifier_arc_path = _createArchiveFile(
verifier_arc_name, toolpaths["absBaseDir"], toolpaths["absToolpaths"],
)
tasks_arc_name = "tasks_" + benchmark.instance + ".zip"
tasks_arc_path = _createArchiveFile(
tasks_arc_name, toolpaths["absBaseDir"], toolpaths["absSourceFiles"],
)
start_time = benchexec.util.read_local_time()
logging.info("Waiting for the AWS EC2-instance to set everything up...")
# Create
http_request = requests.get(
REQUEST_URL["create"].format(aws_endpoint, aws_token)
)
_exitWhenRequestFailed(http_request)
msg = http_request.json()
if (
msg.get("message") is not None
and msg.get("message") == "Token not authorized."
):
sys.exit("Invalid token submitted: " + aws_token)
requestId = msg["requestId"]
# Upload verifier
http_request = requests.get(
REQUEST_URL["upload"].format(
aws_endpoint, aws_token, requestId, verifier_arc_name
)
)
_exitWhenRequestFailed(http_request)
msg = http_request.json()
(verifier_uploadUrl, verifier_s3_key, verifier_aws_public_url) = (
msg["uploadUrl"],
msg["S3Key"],
msg["publicURL"],
)
payload = open(verifier_arc_path, "rb").read()
headers = {"Content-Type": "application/zip"}
http_request = requests.request(
"PUT", verifier_uploadUrl, headers=headers, data=payload
)
_exitWhenRequestFailed(http_request)
# Upload tasks
http_request = requests.get(
REQUEST_URL["upload"].format(
aws_endpoint, aws_token, requestId, tasks_arc_name
)
)
_exitWhenRequestFailed(http_request)
msg = http_request.json()
(tasks_uploadUrl, tasks_s3_key, tasks_aws_public_url) = (
msg["uploadUrl"],
msg["S3Key"],
msg["publicURL"],
)
payload = open(tasks_arc_path, "rb").read()
headers = {"Content-Type": "application/zip"}
http_request = requests.request(
"PUT", tasks_uploadUrl, headers=headers, data=payload
)
_exitWhenRequestFailed(http_request)
# Launch
http_request = requests.get(
REQUEST_URL["launch"].format(
aws_endpoint,
aws_token,
requestId,
verifier_aws_public_url,
verifier_s3_key,
tasks_aws_public_url,
tasks_s3_key,
json.dumps(awsInput),
)
)
_exitWhenRequestFailed(http_request)
# Progress
logging.info(
"Executing RunExec on the AWS workers. Depending on the size of the tasks, this might take a while."
)
progress_url = REQUEST_URL["progress"].format(
aws_endpoint, aws_token, requestId
)
initialized = False
# Give the ec2-instance some time for instantiation
while not initialized:
http_request = requests.get(progress_url)
_exitWhenRequestFailed(http_request)
msg = http_request.json()
if (
msg["message"] == "Internal server error"
or msg["instancesNotTerminatedTotal"] > 0
):
logging.info("waiting...")
time.sleep(10)
continue
initialized = True
logging.info("Done. Collecting the results back from AWS.")
# Results
http_request = requests.get(
REQUEST_URL["results"].format(aws_endpoint, aws_token, requestId)
)
_exitWhenRequestFailed(http_request)
for url in http_request.json()["urls"]:
logging.debug("Downloading file from url: %s", url)
result_file = requests.get(url)
zipfile.ZipFile(io.BytesIO(result_file.content)).extractall(
benchmark.log_folder
)
except KeyboardInterrupt:
stop()
finally:
if os.path.exists(verifier_arc_path):
os.remove(verifier_arc_path)
if os.path.exists(tasks_arc_path):
os.remove(tasks_arc_path)
if STOPPED_BY_INTERRUPT:
output_handler.set_error("interrupted")
end_time = benchexec.util.read_local_time()
handleCloudResults(benchmark, output_handler, start_time, end_time)
# Clean
requests.get(REQUEST_URL["clean"].format(aws_endpoint, aws_token))
def stop():
global STOPPED_BY_INTERRUPT
STOPPED_BY_INTERRUPT = True
def _exitWhenRequestFailed(http_request):
if http_request.status_code != 200:
sys.exit(
"Http-request failed (Server responded with status code: {0}).".format(
http_request.status_code
)
)
def getAWSInput(benchmark):
(
requirements,
numberOfRuns,
limitsAndNumRuns,
runDefinitions,
sourceFiles,
) = getBenchmarkData(benchmark)
(workingDir, toolpaths) = getToolData(benchmark)
absWorkingDir = os.path.abspath(workingDir)
absToolpaths = list(map(os.path.abspath, toolpaths))
absSourceFiles = list(map(os.path.abspath, sourceFiles))
absBaseDir = benchexec.util.common_base_dir(absSourceFiles + absToolpaths)
if absBaseDir == "":
sys.exit("No common base dir found.")
toolpaths = {
"absBaseDir": absBaseDir,
"workingDir": workingDir,
"absWorkingDir": absWorkingDir,
"toolpaths": toolpaths,
"absToolpaths": absToolpaths,
"sourceFiles": sourceFiles,
"absSourceFiles": absSourceFiles,
}
awsInput = {
"requirements": requirements,
"workingDir": os.path.relpath(absWorkingDir, absBaseDir),
}
if benchmark.result_files_patterns:
if len(benchmark.result_files_patterns) > 1:
sys.exit("Multiple result-files patterns not supported in cloud mode.")
awsInput.update({"resultFilePatterns": benchmark.result_files_patterns[0]})
awsInput.update({"limitsAndNumRuns": limitsAndNumRuns})
awsInput.update({"runDefinitions": runDefinitions})
return (toolpaths, awsInput)
def _zipdir(path, zipfile, absBaseDir):
for root, dirs, files in os.walk(path):
for file in files:
filepath = os.path.join(root, file)
zipfile.write(filepath, os.path.relpath(filepath, absBaseDir))
def _createArchiveFile(archive_name, absBaseDir, abs_paths):
archive_path = os.path.join(absBaseDir, archive_name)
if os.path.isfile(archive_path):
sys.exit(
"Zip file already exists: '{0}'; not going to overwrite it.".format(
os.path.normpath(archive_path)
)
)
zipf = zipfile.ZipFile(archive_path, "w", zipfile.ZIP_DEFLATED)
for file in abs_paths:
if not os.path.exists(file):
zipf.close()
if os.path.isfile(archive_path):
os.remove(archive_path)
sys.exit(
"Missing file '{0}', cannot run benchmark without it.".format(
os.path.normpath(file)
)
)
if os.path.isdir(file):
_zipdir(file, zipf, absBaseDir)
else:
zipf.write(file, os.path.relpath(file, absBaseDir))
zipf.close()
return archive_path
def getBenchmarkData(benchmark):
# get requirements
r = benchmark.requirements
requirements = {
"cpu_cores": DEFAULT_CLOUD_CPUCORE_REQUIREMENT
if r.cpu_cores is None
else r.cpu_cores,
"cpu_model": DEFAULT_CLOUD_CPUMODEL_REQUIREMENT
if r.cpu_model is None
else r.cpu_model,
"memory_in_mb": bytes_to_mb(
DEFAULT_CLOUD_MEMORY_REQUIREMENT if r.memory is None else r.memory
),
}
# get limits and number of Runs
timeLimit = benchmark.rlimits.get(TIMELIMIT, DEFAULT_CLOUD_TIMELIMIT)
memLimit = bytes_to_mb(benchmark.rlimits.get(MEMLIMIT, DEFAULT_CLOUD_MEMLIMIT))
coreLimit = benchmark.rlimits.get(CORELIMIT, None)
numberOfRuns = sum(
len(runSet.runs) for runSet in benchmark.run_sets if runSet.should_be_executed()
)
limitsAndNumRuns = {
"number_of_runs": numberOfRuns,
"time_limit_in_sec": timeLimit,
"mem_limit_in_mb": memLimit,
}
if coreLimit is not None:
limitsAndNumRuns.update({"core_limit": coreLimit})
# get Runs with args and sourcefiles
sourceFiles = set()
runDefinitions = []
for runSet in benchmark.run_sets:
if not runSet.should_be_executed():
continue
if STOPPED_BY_INTERRUPT:
break
# get runs
for run in runSet.runs:
runDefinition = {}
# wrap list-elements in quotations-marks if they contain whitespace
cmdline = ["'{}'".format(x) if " " in x else x for x in run.cmdline()]
cmdline = " ".join(cmdline)
log_file = os.path.relpath(run.log_file, benchmark.log_folder)
runDefinition.update(
{
"cmdline": cmdline,
"log_file": log_file,
"sourcefile": run.sourcefiles,
"required_files": run.required_files,
}
)
runDefinitions.append(runDefinition)
sourceFiles.update(run.sourcefiles)
sourceFiles.update(run.required_files)
if not runDefinitions:
sys.exit("Benchmark has nothing to run.")
return (requirements, numberOfRuns, limitsAndNumRuns, runDefinitions, sourceFiles)
def getToolData(benchmark):
workingDir = benchmark.working_directory()
if not os.path.isdir(workingDir):
sys.exit("Missing working directory '{0}', cannot run tool.".format(workingDir))
logging.debug("Working dir: " + workingDir)
toolpaths = benchmark.required_files()
validToolpaths = set()
for file in toolpaths:
if not os.path.exists(file):
sys.exit(
"Missing file '{0}', not runing benchmark without it.".format(
os.path.normpath(file)
)
)
validToolpaths.add(file)
return (workingDir, validToolpaths)
def bytes_to_mb(mb):
if mb is None:
return None
return int(mb / 1000 / 1000)
def handleCloudResults(benchmark, output_handler, start_time, end_time):
outputDir = benchmark.log_folder
if not os.path.isdir(outputDir) or not os.listdir(outputDir):
# outputDir does not exist or is empty
logging.warning(
"Cloud produced no results. Output-directory is missing or empty: %s",
outputDir,
)
if start_time and end_time:
usedWallTime = (end_time - start_time).total_seconds()
else:
usedWallTime = None
# write results in runs and handle output after all runs are done
executedAllRuns = True
runsProducedErrorOutput = False
for runSet in benchmark.run_sets:
if not runSet.should_be_executed():
output_handler.output_for_skipping_run_set(runSet)
continue
output_handler.output_before_run_set(runSet, start_time=start_time)
for run in runSet.runs:
filename = os.path.split(run.log_file)[1]
resultFilesDir = os.path.splitext(filename)[0]
awsFileDir = os.path.join(benchmark.log_folder, resultFilesDir)
logFile = os.path.join(awsFileDir, filename)
shutil.move(logFile, run.log_file)
dataFile = run.log_file + ".data"
shutil.move(logFile + ".data", dataFile)
errFile = run.log_file + ".stdError"
if os.path.exists(errFile):
shutil.move(logFile + ".stdError", errFile)
if os.path.isdir(awsFileDir):
if os.listdir(awsFileDir):
logging.info("Dir %s contains unhandled files", awsFileDir)
else:
os.rmdir(awsFileDir)
if os.path.exists(dataFile) and os.path.exists(run.log_file):
try:
values = parseCloudRunResultFile(dataFile)
if not benchmark.config.debug:
os.remove(dataFile)
except IOError as e:
logging.warning(
"Cannot extract measured values from output for file %s: %s",
run.identifier,
e,
)
output_handler.all_created_files.add(dataFile)
output_handler.set_error("missing results", runSet)
executedAllRuns = False
else:
output_handler.output_before_run(run)
run.set_result(values, ["host"])
output_handler.output_after_run(run)
else:
logging.warning("No results exist for file %s.", run.identifier)
output_handler.set_error("missing results", runSet)
executedAllRuns = False
if os.path.exists(run.log_file + ".stdError"):
runsProducedErrorOutput = True
# The directory structure differs between direct and webclient mode when using VCloud.
# Move all output files from "sibling of log-file" to "sibling of parent directory".
rawPath = run.log_file[: -len(".log")]
dirname, filename = os.path.split(rawPath)
vcloudFilesDirectory = rawPath + ".files"
benchexecFilesDirectory = os.path.join(
dirname[: -len(".logfiles")] + ".files", filename
)
if os.path.isdir(vcloudFilesDirectory) and not os.path.isdir(
benchexecFilesDirectory
):
shutil.move(vcloudFilesDirectory, benchexecFilesDirectory)
output_handler.output_after_run_set(
runSet, walltime=usedWallTime, end_time=end_time
)
output_handler.output_after_benchmark(STOPPED_BY_INTERRUPT)
if not executedAllRuns:
logging.warning("Some expected result files could not be found!")
if runsProducedErrorOutput and not benchmark.config.debug:
logging.warning(
"Some runs produced unexpected warnings on stderr, please check the %s files!",
os.path.join(outputDir, "*.stdError"),
)
def parseCloudRunResultFile(filePath):
def read_items():
with open(filePath, "rt") as file:
for line in file:
key, value = line.split("=", 1)
yield key, value
return parse_vcloud_run_result(read_items())
def parse_vcloud_run_result(values):
result_values = collections.OrderedDict()
def parse_time_value(s):
if s[-1] != "s":
raise ValueError('Cannot parse "{0}" as a time value.'.format(s))
return float(s[:-1])
def set_exitcode(new):
if "exitcode" in result_values:
old = result_values["exitcode"]
assert (
old == new
), "Inconsistent exit codes {} and {} from VerifierCloud".format(old, new)
else:
result_values["exitcode"] = new
for key, value in values:
value = value.strip()
if key in ["cputime", "walltime"]:
result_values[key] = parse_time_value(value)
elif key == "memory":
result_values["memory"] = int(value.strip("B"))
elif key == "exitcode":
set_exitcode(benchexec.util.ProcessExitCode.from_raw(int(value)))
elif key == "returnvalue":
set_exitcode(benchexec.util.ProcessExitCode.create(value=int(value)))
elif key == "exitsignal":
set_exitcode(benchexec.util.ProcessExitCode.create(signal=int(value)))
elif (
key in ["host", "terminationreason", "cpuCores", "memoryNodes", "starttime"]
or key.startswith("blkio-")
or key.startswith("cpuenergy")
or key.startswith("energy-")
or key.startswith("cputime-cpu")
):
result_values[key] = value
elif key not in ["command", "timeLimit", "coreLimit", "memoryLimit"]:
result_values["vcloud-" + key] = value
return result_values
|
[
"os.remove",
"os.walk",
"json.dumps",
"os.path.isfile",
"os.path.join",
"os.path.abspath",
"logging.warning",
"os.path.exists",
"os.path.normpath",
"requests.get",
"requests.request",
"io.BytesIO",
"time.sleep",
"os.rmdir",
"os.listdir",
"sys.exit",
"json.load",
"zipfile.ZipFile",
"logging.debug",
"os.path.isdir",
"logging.info",
"os.path.relpath",
"shutil.move",
"os.path.splitext",
"collections.OrderedDict",
"os.path.split"
] |
[((1343, 1429), 'logging.info', 'logging.info', (['"""Using %s version %s."""', 'benchmark.tool_name', 'benchmark.tool_version'], {}), "('Using %s version %s.', benchmark.tool_name, benchmark.\n tool_version)\n", (1355, 1429), False, 'import logging\n'), ((7164, 7191), 'os.path.abspath', 'os.path.abspath', (['workingDir'], {}), '(workingDir)\n', (7179, 7191), False, 'import os\n'), ((8353, 8366), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (8360, 8366), False, 'import os\n'), ((8601, 8639), 'os.path.join', 'os.path.join', (['absBaseDir', 'archive_name'], {}), '(absBaseDir, archive_name)\n', (8613, 8639), False, 'import os\n'), ((8647, 8675), 'os.path.isfile', 'os.path.isfile', (['archive_path'], {}), '(archive_path)\n', (8661, 8675), False, 'import os\n'), ((8859, 8915), 'zipfile.ZipFile', 'zipfile.ZipFile', (['archive_path', '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "(archive_path, 'w', zipfile.ZIP_DEFLATED)\n", (8874, 8915), False, 'import zipfile\n'), ((11946, 11989), 'logging.debug', 'logging.debug', (["('Working dir: ' + workingDir)"], {}), "('Working dir: ' + workingDir)\n", (11959, 11989), False, 'import logging\n'), ((16888, 16913), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (16911, 16913), False, 'import collections\n'), ((1767, 1844), 'logging.info', 'logging.info', (['"""Building archive files for the verifier-tool and the tasks..."""'], {}), "('Building archive files for the verifier-tool and the tasks...')\n", (1779, 1844), False, 'import logging\n'), ((2335, 2407), 'logging.info', 'logging.info', (['"""Waiting for the AWS EC2-instance to set everything up..."""'], {}), "('Waiting for the AWS EC2-instance to set everything up...')\n", (2347, 2407), False, 'import logging\n'), ((3432, 3506), 'requests.request', 'requests.request', (['"""PUT"""', 'verifier_uploadUrl'], {'headers': 'headers', 'data': 'payload'}), "('PUT', verifier_uploadUrl, headers=headers, data=payload)\n", (3448, 3506), False, 'import requests\n'), ((4140, 4211), 'requests.request', 'requests.request', (['"""PUT"""', 'tasks_uploadUrl'], {'headers': 'headers', 'data': 'payload'}), "('PUT', tasks_uploadUrl, headers=headers, data=payload)\n", (4156, 4211), False, 'import requests\n'), ((4737, 4861), 'logging.info', 'logging.info', (['"""Executing RunExec on the AWS workers. Depending on the size of the tasks, this might take a while."""'], {}), "(\n 'Executing RunExec on the AWS workers. Depending on the size of the tasks, this might take a while.'\n )\n", (4749, 4861), False, 'import logging\n'), ((5535, 5594), 'logging.info', 'logging.info', (['"""Done. Collecting the results back from AWS."""'], {}), "('Done. Collecting the results back from AWS.')\n", (5547, 5594), False, 'import logging\n'), ((6133, 6166), 'os.path.exists', 'os.path.exists', (['verifier_arc_path'], {}), '(verifier_arc_path)\n', (6147, 6166), False, 'import os\n'), ((6220, 6250), 'os.path.exists', 'os.path.exists', (['tasks_arc_path'], {}), '(tasks_arc_path)\n', (6234, 6250), False, 'import os\n'), ((7423, 7460), 'sys.exit', 'sys.exit', (['"""No common base dir found."""'], {}), "('No common base dir found.')\n", (7431, 7460), False, 'import sys\n'), ((7820, 7862), 'os.path.relpath', 'os.path.relpath', (['absWorkingDir', 'absBaseDir'], {}), '(absWorkingDir, absBaseDir)\n', (7835, 7862), False, 'import os\n'), ((9280, 9299), 'os.path.isdir', 'os.path.isdir', (['file'], {}), '(file)\n', (9293, 9299), False, 'import os\n'), ((11607, 11648), 'sys.exit', 'sys.exit', (['"""Benchmark has nothing to run."""'], {}), "('Benchmark has nothing to run.')\n", (11615, 11648), False, 'import sys\n'), ((11826, 11851), 'os.path.isdir', 'os.path.isdir', (['workingDir'], {}), '(workingDir)\n', (11839, 11851), False, 'import os\n'), ((12704, 12810), 'logging.warning', 'logging.warning', (['"""Cloud produced no results. Output-directory is missing or empty: %s"""', 'outputDir'], {}), "(\n 'Cloud produced no results. Output-directory is missing or empty: %s',\n outputDir)\n", (12719, 12810), False, 'import logging\n'), ((16255, 16320), 'logging.warning', 'logging.warning', (['"""Some expected result files could not be found!"""'], {}), "('Some expected result files could not be found!')\n", (16270, 16320), False, 'import logging\n'), ((1647, 1667), 'json.load', 'json.load', (['conf_file'], {}), '(conf_file)\n', (1656, 1667), False, 'import json\n'), ((2760, 2809), 'sys.exit', 'sys.exit', (["('Invalid token submitted: ' + aws_token)"], {}), "('Invalid token submitted: ' + aws_token)\n", (2768, 2809), False, 'import sys\n'), ((5132, 5158), 'requests.get', 'requests.get', (['progress_url'], {}), '(progress_url)\n', (5144, 5158), False, 'import requests\n'), ((5844, 5895), 'logging.debug', 'logging.debug', (['"""Downloading file from url: %s"""', 'url'], {}), "('Downloading file from url: %s', url)\n", (5857, 5895), False, 'import logging\n'), ((5922, 5939), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5934, 5939), False, 'import requests\n'), ((6180, 6208), 'os.remove', 'os.remove', (['verifier_arc_path'], {}), '(verifier_arc_path)\n', (6189, 6208), False, 'import os\n'), ((6264, 6289), 'os.remove', 'os.remove', (['tasks_arc_path'], {}), '(tasks_arc_path)\n', (6273, 6289), False, 'import os\n'), ((7975, 8046), 'sys.exit', 'sys.exit', (['"""Multiple result-files patterns not supported in cloud mode."""'], {}), "('Multiple result-files patterns not supported in cloud mode.')\n", (7983, 8046), False, 'import sys\n'), ((8418, 8442), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (8430, 8442), False, 'import os\n'), ((8959, 8979), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (8973, 8979), False, 'import os\n'), ((9021, 9049), 'os.path.isfile', 'os.path.isfile', (['archive_path'], {}), '(archive_path)\n', (9035, 9049), False, 'import os\n'), ((11094, 11145), 'os.path.relpath', 'os.path.relpath', (['run.log_file', 'benchmark.log_folder'], {}), '(run.log_file, benchmark.log_folder)\n', (11109, 11145), False, 'import os\n'), ((12103, 12123), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (12117, 12123), False, 'import os\n'), ((12594, 12618), 'os.path.isdir', 'os.path.isdir', (['outputDir'], {}), '(outputDir)\n', (12607, 12618), False, 'import os\n'), ((12626, 12647), 'os.listdir', 'os.listdir', (['outputDir'], {}), '(outputDir)\n', (12636, 12647), False, 'import os\n'), ((13519, 13569), 'os.path.join', 'os.path.join', (['benchmark.log_folder', 'resultFilesDir'], {}), '(benchmark.log_folder, resultFilesDir)\n', (13531, 13569), False, 'import os\n'), ((13592, 13626), 'os.path.join', 'os.path.join', (['awsFileDir', 'filename'], {}), '(awsFileDir, filename)\n', (13604, 13626), False, 'import os\n'), ((13639, 13673), 'shutil.move', 'shutil.move', (['logFile', 'run.log_file'], {}), '(logFile, run.log_file)\n', (13650, 13673), False, 'import shutil\n'), ((13733, 13773), 'shutil.move', 'shutil.move', (["(logFile + '.data')", 'dataFile'], {}), "(logFile + '.data', dataFile)\n", (13744, 13773), False, 'import shutil\n'), ((13839, 13862), 'os.path.exists', 'os.path.exists', (['errFile'], {}), '(errFile)\n', (13853, 13862), False, 'import os\n'), ((13940, 13965), 'os.path.isdir', 'os.path.isdir', (['awsFileDir'], {}), '(awsFileDir)\n', (13953, 13965), False, 'import os\n'), ((15252, 15294), 'os.path.exists', 'os.path.exists', (["(run.log_file + '.stdError')"], {}), "(run.log_file + '.stdError')\n", (15266, 15294), False, 'import os\n'), ((15623, 15645), 'os.path.split', 'os.path.split', (['rawPath'], {}), '(rawPath)\n', (15636, 15645), False, 'import os\n'), ((16513, 16550), 'os.path.join', 'os.path.join', (['outputDir', '"""*.stdError"""'], {}), "(outputDir, '*.stdError')\n", (16525, 16550), False, 'import os\n'), ((4618, 4638), 'json.dumps', 'json.dumps', (['awsInput'], {}), '(awsInput)\n', (4628, 4638), False, 'import json\n'), ((5411, 5437), 'logging.info', 'logging.info', (['"""waiting..."""'], {}), "('waiting...')\n", (5423, 5437), False, 'import logging\n'), ((5454, 5468), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (5464, 5468), False, 'import time\n'), ((8479, 8516), 'os.path.relpath', 'os.path.relpath', (['filepath', 'absBaseDir'], {}), '(filepath, absBaseDir)\n', (8494, 8516), False, 'import os\n'), ((8792, 8822), 'os.path.normpath', 'os.path.normpath', (['archive_path'], {}), '(archive_path)\n', (8808, 8822), False, 'import os\n'), ((9067, 9090), 'os.remove', 'os.remove', (['archive_path'], {}), '(archive_path)\n', (9076, 9090), False, 'import os\n'), ((9388, 9421), 'os.path.relpath', 'os.path.relpath', (['file', 'absBaseDir'], {}), '(file, absBaseDir)\n', (9403, 9421), False, 'import os\n'), ((13404, 13431), 'os.path.split', 'os.path.split', (['run.log_file'], {}), '(run.log_file)\n', (13417, 13431), False, 'import os\n'), ((13464, 13490), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (13480, 13490), False, 'import os\n'), ((13880, 13923), 'shutil.move', 'shutil.move', (["(logFile + '.stdError')", 'errFile'], {}), "(logFile + '.stdError', errFile)\n", (13891, 13923), False, 'import shutil\n'), ((13986, 14008), 'os.listdir', 'os.listdir', (['awsFileDir'], {}), '(awsFileDir)\n', (13996, 14008), False, 'import os\n'), ((14169, 14193), 'os.path.exists', 'os.path.exists', (['dataFile'], {}), '(dataFile)\n', (14183, 14193), False, 'import os\n'), ((14198, 14226), 'os.path.exists', 'os.path.exists', (['run.log_file'], {}), '(run.log_file)\n', (14212, 14226), False, 'import os\n'), ((15063, 15127), 'logging.warning', 'logging.warning', (['"""No results exist for file %s."""', 'run.identifier'], {}), "('No results exist for file %s.', run.identifier)\n", (15078, 15127), False, 'import logging\n'), ((15847, 15882), 'os.path.isdir', 'os.path.isdir', (['vcloudFilesDirectory'], {}), '(vcloudFilesDirectory)\n', (15860, 15882), False, 'import os\n'), ((15977, 16035), 'shutil.move', 'shutil.move', (['vcloudFilesDirectory', 'benchexecFilesDirectory'], {}), '(vcloudFilesDirectory, benchexecFilesDirectory)\n', (15988, 16035), False, 'import shutil\n'), ((9213, 9235), 'os.path.normpath', 'os.path.normpath', (['file'], {}), '(file)\n', (9229, 9235), False, 'import os\n'), ((12246, 12268), 'os.path.normpath', 'os.path.normpath', (['file'], {}), '(file)\n', (12262, 12268), False, 'import os\n'), ((14030, 14089), 'logging.info', 'logging.info', (['"""Dir %s contains unhandled files"""', 'awsFileDir'], {}), "('Dir %s contains unhandled files', awsFileDir)\n", (14042, 14089), False, 'import logging\n'), ((14132, 14152), 'os.rmdir', 'os.rmdir', (['awsFileDir'], {}), '(awsFileDir)\n', (14140, 14152), False, 'import os\n'), ((15891, 15929), 'os.path.isdir', 'os.path.isdir', (['benchexecFilesDirectory'], {}), '(benchexecFilesDirectory)\n', (15904, 15929), False, 'import os\n'), ((5968, 5999), 'io.BytesIO', 'io.BytesIO', (['result_file.content'], {}), '(result_file.content)\n', (5978, 5999), False, 'import io\n'), ((14387, 14406), 'os.remove', 'os.remove', (['dataFile'], {}), '(dataFile)\n', (14396, 14406), False, 'import os\n'), ((14464, 14564), 'logging.warning', 'logging.warning', (['"""Cannot extract measured values from output for file %s: %s"""', 'run.identifier', 'e'], {}), "('Cannot extract measured values from output for file %s: %s',\n run.identifier, e)\n", (14479, 14564), False, 'import logging\n')]
|
from .domain import Domain
from math import factorial
import itertools
import random
class Permutations(Domain):
def __init__(self, domain, name=None):
super(Permutations, self).__init__(name)
self._set_flags_from_domain(domain)
self.step_jumps = False # not implemented yet
self.domain = domain
def _compute_size(self):
return factorial(self.domain.size)
def create_iter(self, step=0):
assert step == 0 # nonzero step implemented yet
items = tuple(self.domain)
return itertools.permutations(items)
def generate_one(self):
return random.shuffle(tuple(self.domain))
def _remap_domains(self, transformation):
return Permutations(transformation(self.domain), self.name)
|
[
"itertools.permutations",
"math.factorial"
] |
[((382, 409), 'math.factorial', 'factorial', (['self.domain.size'], {}), '(self.domain.size)\n', (391, 409), False, 'from math import factorial\n'), ((553, 582), 'itertools.permutations', 'itertools.permutations', (['items'], {}), '(items)\n', (575, 582), False, 'import itertools\n')]
|
#!/usr/bin/env python
u"""
model.py
Written by <NAME> (09/2021)
Retrieves tide model parameters for named tide models and
from model definition files
UPDATE HISTORY:
Written 09/2021
"""
import os
import re
import io
import copy
class model:
"""Retrieves tide model parameters for named models or
from a model definition file for use in the pyTMD tide
prediction programs
"""
def __init__(self, directory=os.getcwd(), **kwargs):
# set default keyword arguments
kwargs.setdefault('compressed',False)
kwargs.setdefault('format','netcdf')
kwargs.setdefault('verify',True)
# set initial attributes
self.atl03 = None
self.atl06 = None
self.atl07 = None
self.atl11 = None
self.atl12 = None
self.compressed = copy.copy(kwargs['compressed'])
self.constituents = None
self.description = None
self.directory = os.path.expanduser(directory)
self.format = copy.copy(kwargs['format'])
self.gla12 = None
self.grid_file = None
self.long_name = None
self.model_file = None
self.name = None
self.projection = None
self.reference = None
self.scale = None
self.type = None
self.variable = None
self.verify = copy.copy(kwargs['verify'])
self.version = None
def grid(self,m):
"""Create a model object from known tide grid files
"""
# model name
self.name = m
# select between known tide models
if (m == 'CATS0201'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'cats0201_tmd')
self.grid_file = self.pathfinder('grid_CATS')
elif (m == 'CATS2008'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'CATS2008')
self.grid_file = self.pathfinder('grid_CATS2008')
elif (m == 'CATS2008_load'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,
'CATS2008a_SPOTL_Load')
self.grid_file = self.pathfinder('grid_CATS2008a_opt')
elif (m == 'TPXO9-atlas'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas')
self.grid_file = self.pathfinder('grid_tpxo9_atlas')
self.version = 'v1'
elif (m == 'TPXO9-atlas-v2'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v2')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v2')
self.version = 'v2'
elif (m == 'TPXO9-atlas-v3'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v3')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v3')
self.version = 'v3'
elif (m == 'TPXO9-atlas-v4'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v4')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v4')
self.version = 'v4'
elif (m == 'TPXO9.1'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO9.1','DATA')
self.grid_file = self.pathfinder('grid_tpxo9')
self.version = '9.1'
elif (m == 'TPXO8-atlas'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'tpxo8_atlas')
self.grid_file = self.pathfinder('grid_tpxo8atlas_30_v1')
self.version = '8'
elif (m == 'TPXO7.2'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO7.2_tmd')
self.grid_file = self.pathfinder('grid_tpxo7.2')
self.version = '7.2'
elif (m == 'TPXO7.2_load'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO7.2_load')
self.grid_file = self.pathfinder('grid_tpxo6.2')
self.version = '7.2'
elif (m == 'AODTM-5'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'aodtm5_tmd')
self.grid_file = self.pathfinder('grid_Arc5km')
elif (m == 'AOTIM-5'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'aotim5_tmd')
self.grid_file = self.pathfinder('grid_Arc5km')
elif (m == 'AOTIM-5-2018'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'Arc5km2018')
self.grid_file = self.pathfinder('grid_Arc5km2018')
self.version = '2018'
elif (m == 'Gr1km-v2'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'greenlandTMD_v2')
self.grid_file = self.pathfinder('grid_Greenland8.v2')
self.version = 'v2'
else:
raise Exception("Unlisted tide model")
# return the model parameters
return self
def elevation(self,m):
"""Create a model object from known tidal elevation models
"""
# model name
self.name = m
# model type
self.type = 'z'
# select between known tide models
if (m == 'CATS0201'):
self.model_directory = os.path.join(self.directory,'cats0201_tmd')
self.grid_file = self.pathfinder('grid_CATS')
self.model_file = self.pathfinder('h0_CATS02_01')
self.format = 'OTIS'
self.projection = '4326'
# model description and references
self.reference = ('https://mail.esr.org/polar_tide_models/'
'Model_CATS0201.html')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'CATS2008'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'CATS2008')
self.grid_file = self.pathfinder('grid_CATS2008')
self.model_file = self.pathfinder('hf.CATS2008.out')
self.projection = 'CATS2008'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/cats2008/')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'CATS2008_load'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,
'CATS2008a_SPOTL_Load')
self.grid_file = self.pathfinder('grid_CATS2008a_opt')
self.model_file = self.pathfinder('h_CATS2008a_SPOTL_load')
self.projection = 'CATS2008'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/cats2008/')
self.atl03 = 'tide_load'
self.atl06 = 'tide_load'
self.atl07 = 'height_segment_load'
self.atl11 = 'tide_load'
self.atl12 = 'tide_load_seg'
self.gla12 = 'd_ldElv'
self.variable = 'tide_load'
self.long_name = "Load Tide"
self.description = ("Local displacement due to Ocean "
"Loading (-6 to 0 cm)")
elif (m == 'TPXO9-atlas'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas')
self.grid_file = self.pathfinder('grid_tpxo9_atlas')
model_files = ['h_q1_tpxo9_atlas_30','h_o1_tpxo9_atlas_30',
'h_p1_tpxo9_atlas_30','h_k1_tpxo9_atlas_30',
'h_n2_tpxo9_atlas_30','h_m2_tpxo9_atlas_30',
'h_s2_tpxo9_atlas_30','h_k2_tpxo9_atlas_30',
'h_m4_tpxo9_atlas_30','h_ms4_tpxo9_atlas_30',
'h_mn4_tpxo9_atlas_30','h_2n2_tpxo9_atlas_30']
self.model_file = self.pathfinder(model_files)
self.projection = '4326'
self.scale = 1.0/1000.0
self.version = 'v1'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/tides/'
'tpxo9_atlas.html')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO9-atlas-v2'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v2')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v2')
model_files = ['h_q1_tpxo9_atlas_30_v2','h_o1_tpxo9_atlas_30_v2',
'h_p1_tpxo9_atlas_30_v2','h_k1_tpxo9_atlas_30_v2',
'h_n2_tpxo9_atlas_30_v2','h_m2_tpxo9_atlas_30_v2',
'h_s2_tpxo9_atlas_30_v2','h_k2_tpxo9_atlas_30_v2',
'h_m4_tpxo9_atlas_30_v2','h_ms4_tpxo9_atlas_30_v2',
'h_mn4_tpxo9_atlas_30_v2','h_2n2_tpxo9_atlas_30_v2']
self.model_file = self.pathfinder(model_files)
self.projection = '4326'
self.scale = 1.0/1000.0
self.version = 'v2'
# model description and references
self.reference = 'https://www.tpxo.net/global/tpxo9-atlas'
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO9-atlas-v3'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v3')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v3')
model_files = ['h_q1_tpxo9_atlas_30_v3','h_o1_tpxo9_atlas_30_v3',
'h_p1_tpxo9_atlas_30_v3','h_k1_tpxo9_atlas_30_v3',
'h_n2_tpxo9_atlas_30_v3','h_m2_tpxo9_atlas_30_v3',
'h_s2_tpxo9_atlas_30_v3','h_k2_tpxo9_atlas_30_v3',
'h_m4_tpxo9_atlas_30_v3','h_ms4_tpxo9_atlas_30_v3',
'h_mn4_tpxo9_atlas_30_v3','h_2n2_tpxo9_atlas_30_v3',
'h_mf_tpxo9_atlas_30_v3','h_mm_tpxo9_atlas_30_v3']
self.model_file = self.pathfinder(model_files)
self.projection = '4326'
self.scale = 1.0/1000.0
self.version = 'v3'
# model description and references
self.reference = 'https://www.tpxo.net/global/tpxo9-atlas'
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO9-atlas-v4'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v4')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v4')
model_files = ['h_q1_tpxo9_atlas_30_v4','h_o1_tpxo9_atlas_30_v4',
'h_p1_tpxo9_atlas_30_v4','h_k1_tpxo9_atlas_30_v4',
'h_n2_tpxo9_atlas_30_v4','h_m2_tpxo9_atlas_30_v4',
'h_s2_tpxo9_atlas_30_v4','h_k2_tpxo9_atlas_30_v4',
'h_m4_tpxo9_atlas_30_v4','h_ms4_tpxo9_atlas_30_v4',
'h_mn4_tpxo9_atlas_30_v4','h_2n2_tpxo9_atlas_30_v4',
'h_mf_tpxo9_atlas_30_v4','h_mm_tpxo9_atlas_30_v4']
self.model_file = self.pathfinder(model_files)
self.projection = '4326'
self.scale = 1.0/1000.0
self.version = 'v4'
# model description and references
self.reference = 'https://www.tpxo.net/global/tpxo9-atlas'
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO9.1'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO9.1','DATA')
self.grid_file = self.pathfinder('grid_tpxo9')
self.model_file = self.pathfinder('h_tpxo9.v1')
self.projection = '4326'
self.version = '9.1'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/'
'tides/global.html')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO8-atlas'):
self.format = 'ATLAS'
self.model_directory = os.path.join(self.directory,'tpxo8_atlas')
self.grid_file = self.pathfinder('grid_tpxo8atlas_30_v1')
self.model_file = self.pathfinder('hf.tpxo8_atlas_30_v1')
self.projection = '4326'
self.version = '8'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/'
'tides/tpxo8_atlas.html')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO7.2'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO7.2_tmd')
self.grid_file = self.pathfinder('grid_tpxo7.2')
self.model_file = self.pathfinder('h_tpxo7.2')
self.projection = '4326'
self.version = '7.2'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/'
'tides/global.html')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO7.2_load'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO7.2_load')
self.grid_file = self.pathfinder('grid_tpxo6.2')
self.model_file = self.pathfinder('h_tpxo7.2_load')
self.projection = '4326'
self.version = '7.2'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/'
'tides/global.html')
self.atl03 = 'tide_load'
self.atl06 = 'tide_load'
self.atl07 = 'height_segment_load'
self.atl11 = 'tide_load'
self.atl12 = 'tide_load_seg'
self.gla12 = 'd_ldElv'
self.variable = 'tide_load'
self.long_name = "Load Tide"
self.description = ("Local displacement due to Ocean "
"Loading (-6 to 0 cm)")
elif (m == 'AODTM-5'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'aodtm5_tmd')
self.grid_file = self.pathfinder('grid_Arc5km')
self.model_file = self.pathfinder('h0_Arc5km.oce')
self.projection = 'PSNorth'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/'
'aodtm-5/')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'AOTIM-5'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'aotim5_tmd')
self.grid_file = self.pathfinder('grid_Arc5km')
self.model_file = self.pathfinder('h_Arc5km.oce')
self.projection = 'PSNorth'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/'
'aotim-5/')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'AOTIM-5-2018'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'Arc5km2018')
self.grid_file = self.pathfinder('grid_Arc5km2018')
self.model_file = self.pathfinder('h_Arc5km2018')
self.projection = 'PSNorth'
self.version = '2018'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/'
'aotim-5/')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'Gr1km-v2'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'greenlandTMD_v2')
self.grid_file = self.pathfinder('grid_Greenland8.v2')
self.model_file = self.pathfinder('h_Greenland8.v2')
self.projection = '3413'
self.version = 'v2'
# model description and references
self.reference = 'https://doi.org/10.1002/2016RG000546'
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'GOT4.7'):
self.format = 'GOT'
self.model_directory = os.path.join(self.directory,
'GOT4.7','grids_oceantide')
model_files = ['q1.d','o1.d','p1.d','k1.d','n2.d',
'm2.d','s2.d','k2.d','s1.d','m4.d']
self.model_file = self.pathfinder(model_files)
self.scale = 1.0/100.0
self.version = '4.7'
# model description and references
self.reference = ('https://denali.gsfc.nasa.gov/'
'personal_pages/ray/MiscPubs/'
'19990089548_1999150788.pdf')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'GOT4.7_load'):
self.format = 'GOT'
self.model_directory = os.path.join(self.directory,
'GOT4.7','grids_loadtide')
model_files = ['q1load.d','o1load.d',
'p1load.d','k1load.d','n2load.d',
'm2load.d','s2load.d','k2load.d',
's1load.d','m4load.d']
self.model_file = self.pathfinder(model_files)
self.scale = 1.0/1000.0
self.version = '4.7'
# model description and references
self.reference = ('https://denali.gsfc.nasa.gov/'
'personal_pages/ray/MiscPubs/'
'19990089548_1999150788.pdf')
self.atl03 = 'tide_load'
self.atl06 = 'tide_load'
self.atl07 = 'height_segment_load'
self.atl11 = 'tide_load'
self.atl12 = 'tide_load_seg'
self.gla12 = 'd_ldElv'
self.variable = 'tide_load'
self.long_name = "Load Tide"
self.description = ("Local displacement due to Ocean "
"Loading (-6 to 0 cm)")
elif (m == 'GOT4.8'):
self.format = 'GOT'
self.model_directory = os.path.join(self.directory,
'got4.8','grids_oceantide')
model_files = ['q1.d','o1.d','p1.d','k1.d','n2.d',
'm2.d','s2.d','k2.d','s1.d','m4.d']
self.model_file = self.pathfinder(model_files)
self.scale = 1.0/100.0
self.version = '4.8'
# model description and references
self.reference = ('https://denali.gsfc.nasa.gov/'
'personal_pages/ray/MiscPubs/'
'19990089548_1999150788.pdf')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'GOT4.8_load'):
self.format = 'GOT'
self.model_directory = os.path.join(self.directory,
'got4.8','grids_loadtide')
model_files = ['q1load.d','o1load.d',
'p1load.d','k1load.d','n2load.d',
'm2load.d','s2load.d','k2load.d',
's1load.d','m4load.d']
self.model_file = self.pathfinder(model_files)
self.scale = 1.0/1000.0
self.version = '4.8'
# model description and references
self.reference = ('https://denali.gsfc.nasa.gov/'
'personal_pages/ray/MiscPubs/'
'19990089548_1999150788.pdf')
self.atl03 = 'tide_load'
self.atl06 = 'tide_load'
self.atl07 = 'height_segment_load'
self.atl11 = 'tide_load'
self.atl12 = 'tide_load_seg'
self.gla12 = 'd_ldElv'
self.variable = 'tide_load'
self.long_name = "Load Tide"
self.description = ("Local displacement due to Ocean "
"Loading (-6 to 0 cm)")
elif (m == 'GOT4.10'):
self.format = 'GOT'
self.model_directory = os.path.join(self.directory,
'GOT4.10c','grids_oceantide')
model_files = ['q1.d','o1.d','p1.d','k1.d','n2.d',
'm2.d','s2.d','k2.d','s1.d','m4.d']
self.model_file = self.pathfinder(model_files)
self.scale = 1.0/100.0
self.version = '4.10'
# model description and references
self.reference = ('https://denali.gsfc.nasa.gov/'
'personal_pages/ray/MiscPubs/'
'19990089548_1999150788.pdf')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'GOT4.10_load'):
self.format = 'GOT'
self.model_directory = os.path.join(self.directory,
'GOT4.10c','grids_loadtide')
model_files = ['q1load.d','o1load.d',
'p1load.d','k1load.d','n2load.d',
'm2load.d','s2load.d','k2load.d',
's1load.d','m4load.d']
self.model_file = self.pathfinder(model_files)
self.scale = 1.0/1000.0
self.version = '4.10'
# model description and references
self.reference = ('https://denali.gsfc.nasa.gov/'
'personal_pages/ray/MiscPubs/'
'19990089548_1999150788.pdf')
self.atl03 = 'tide_load'
self.atl06 = 'tide_load'
self.atl07 = 'height_segment_load'
self.atl11 = 'tide_load'
self.atl12 = 'tide_load_seg'
self.gla12 = 'd_ldElv'
self.variable = 'tide_load'
self.long_name = "Load Tide"
self.description = ("Local displacement due to Ocean "
"Loading (-6 to 0 cm)")
elif (m == 'FES2014'):
self.format = 'FES'
self.model_directory = os.path.join(self.directory,
'fes2014','ocean_tide')
model_files = ['2n2.nc','eps2.nc','j1.nc','k1.nc',
'k2.nc','l2.nc','la2.nc','m2.nc','m3.nc','m4.nc',
'm6.nc','m8.nc','mf.nc','mks2.nc','mm.nc',
'mn4.nc','ms4.nc','msf.nc','msqm.nc','mtm.nc',
'mu2.nc','n2.nc','n4.nc','nu2.nc','o1.nc','p1.nc',
'q1.nc','r2.nc','s1.nc','s2.nc','s4.nc','sa.nc',
'ssa.nc','t2.nc']
self.model_file = self.pathfinder(model_files)
self.constituents = ['2n2','eps2','j1','k1','k2','l2',
'lambda2','m2','m3','m4','m6','m8','mf','mks2','mm',
'mn4','ms4','msf','msqm','mtm','mu2','n2','n4','nu2',
'o1','p1','q1','r2','s1','s2','s4','sa','ssa','t2']
self.scale = 1.0/100.0
self.version = 'FES2014'
# model description and references
self.reference = ('https://www.aviso.altimetry.fr/'
'en/data/products/auxiliary-products/'
'global-tide-fes.html')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'FES2014_load'):
self.format = 'FES'
self.model_directory = os.path.join(self.directory,
'fes2014','load_tide')
model_files = ['2n2.nc','eps2.nc','j1.nc','k1.nc',
'k2.nc','l2.nc','la2.nc','m2.nc','m3.nc','m4.nc',
'm6.nc','m8.nc','mf.nc','mks2.nc','mm.nc',
'mn4.nc','ms4.nc','msf.nc','msqm.nc','mtm.nc',
'mu2.nc','n2.nc','n4.nc','nu2.nc','o1.nc','p1.nc',
'q1.nc','r2.nc','s1.nc','s2.nc','s4.nc','sa.nc',
'ssa.nc','t2.nc']
self.model_file = self.pathfinder(model_files)
self.constituents = ['2n2','eps2','j1','k1','k2','l2',
'lambda2','m2','m3','m4','m6','m8','mf','mks2','mm',
'mn4','ms4','msf','msqm','mtm','mu2','n2','n4','nu2',
'o1','p1','q1','r2','s1','s2','s4','sa','ssa','t2']
self.scale = 1.0/100.0
self.version = 'FES2014'
# model description and references
self.reference = ('https://www.aviso.altimetry.fr/'
'en/data/products/auxiliary-products/'
'global-tide-fes.html')
self.atl03 = 'tide_load'
self.atl06 = 'tide_load'
self.atl07 = 'height_segment_load'
self.atl11 = 'tide_load'
self.atl12 = 'tide_load_seg'
self.gla12 = 'd_ldElv'
self.variable = 'tide_load'
self.long_name = "Load Tide"
self.description = ("Local displacement due to Ocean "
"Loading (-6 to 0 cm)")
else:
raise Exception("Unlisted tide model")
# return the model parameters
return self
def current(self,m):
"""Create a model object from known tidal current models
"""
# model name
self.name = m
# model type
self.type = ['u','v']
# select between tide models
if (m == 'CATS0201'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'cats0201_tmd')
self.grid_file = self.pathfinder('grid_CATS')
self.model_file = dict(u=self.pathfinder('UV0_CATS02_01'))
self.projection = '4326'
# model description and references
self.reference = ('https://mail.esr.org/polar_tide_models/'
'Model_CATS0201.html')
elif (m == 'CATS2008'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'CATS2008')
self.grid_file = self.pathfinder('grid_CATS2008')
self.model_file = dict(u=self.pathfinder('uv.CATS2008.out'))
self.projection = 'CATS2008'
elif (m == 'TPXO9-atlas'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas')
self.grid_file = self.pathfinder('grid_tpxo9_atlas')
model_files = {}
model_files['u'] = ['u_q1_tpxo9_atlas_30','u_o1_tpxo9_atlas_30',
'u_p1_tpxo9_atlas_30','u_k1_tpxo9_atlas_30',
'u_n2_tpxo9_atlas_30','u_m2_tpxo9_atlas_30',
'u_s2_tpxo9_atlas_30','u_k2_tpxo9_atlas_30',
'u_m4_tpxo9_atlas_30','u_ms4_tpxo9_atlas_30',
'u_mn4_tpxo9_atlas_30','u_2n2_tpxo9_atlas_30']
model_files['v'] = ['v_q1_tpxo9_atlas_30','v_o1_tpxo9_atlas_30',
'v_p1_tpxo9_atlas_30','v_k1_tpxo9_atlas_30',
'v_n2_tpxo9_atlas_30','v_m2_tpxo9_atlas_30',
'v_s2_tpxo9_atlas_30','v_k2_tpxo9_atlas_30',
'v_m4_tpxo9_atlas_30','v_ms4_tpxo9_atlas_30',
'v_mn4_tpxo9_atlas_30','v_2n2_tpxo9_atlas_30']
self.model_file = {}
for key,val in model_files.items():
self.model_file[key] = self.pathfinder(val)
self.projection = '4326'
self.scale = 1.0/100.0
self.version = 'v1'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/tides/'
'tpxo9_atlas.html')
elif (m == 'TPXO9-atlas-v2'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v2')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v2')
model_files = {}
model_files['u'] = ['u_q1_tpxo9_atlas_30_v2','u_o1_tpxo9_atlas_30_v2',
'u_p1_tpxo9_atlas_30_v2','u_k1_tpxo9_atlas_30_v2',
'u_n2_tpxo9_atlas_30_v2','u_m2_tpxo9_atlas_30_v2',
'u_s2_tpxo9_atlas_30_v2','u_k2_tpxo9_atlas_30_v2',
'u_m4_tpxo9_atlas_30_v2','u_ms4_tpxo9_atlas_30_v2',
'u_mn4_tpxo9_atlas_30_v2','u_2n2_tpxo9_atlas_30_v2']
model_files['v'] = ['v_q1_tpxo9_atlas_30_v2','v_o1_tpxo9_atlas_30_v2',
'v_p1_tpxo9_atlas_30_v2','v_k1_tpxo9_atlas_30_v2',
'v_n2_tpxo9_atlas_30_v2','v_m2_tpxo9_atlas_30_v2',
'v_s2_tpxo9_atlas_30_v2','v_k2_tpxo9_atlas_30_v2',
'v_m4_tpxo9_atlas_30_v2','v_ms4_tpxo9_atlas_30_v2',
'v_mn4_tpxo9_atlas_30_v2','v_2n2_tpxo9_atlas_30_v2']
self.model_file = {}
for key,val in model_files.items():
self.model_file[key] = self.pathfinder(val)
self.projection = '4326'
self.scale = 1.0/100.0
self.version = 'v2'
# model description and references
self.reference = 'https://www.tpxo.net/global/tpxo9-atlas'
elif (m == 'TPXO9-atlas-v3'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v3')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v3')
model_files = {}
model_files['u'] = ['u_q1_tpxo9_atlas_30_v3','u_o1_tpxo9_atlas_30_v3',
'u_p1_tpxo9_atlas_30_v3','u_k1_tpxo9_atlas_30_v3',
'u_n2_tpxo9_atlas_30_v3','u_m2_tpxo9_atlas_30_v3',
'u_s2_tpxo9_atlas_30_v3','u_k2_tpxo9_atlas_30_v3',
'u_m4_tpxo9_atlas_30_v3','u_ms4_tpxo9_atlas_30_v3',
'u_mn4_tpxo9_atlas_30_v3','u_2n2_tpxo9_atlas_30_v3']
model_files['v'] = ['v_q1_tpxo9_atlas_30_v3','v_o1_tpxo9_atlas_30_v3',
'v_p1_tpxo9_atlas_30_v3','v_k1_tpxo9_atlas_30_v3',
'v_n2_tpxo9_atlas_30_v3','v_m2_tpxo9_atlas_30_v3',
'v_s2_tpxo9_atlas_30_v3','v_k2_tpxo9_atlas_30_v3',
'v_m4_tpxo9_atlas_30_v3','v_ms4_tpxo9_atlas_30_v3',
'v_mn4_tpxo9_atlas_30_v3','v_2n2_tpxo9_atlas_30_v3']
self.model_file = {}
for key,val in model_files.items():
self.model_file[key] = self.pathfinder(val)
self.projection = '4326'
self.scale = 1.0/100.0
self.version = 'v3'
# model description and references
self.reference = 'https://www.tpxo.net/global/tpxo9-atlas'
elif (m == 'TPXO9-atlas-v4'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v4')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v4')
model_files = {}
model_files['u'] = ['u_q1_tpxo9_atlas_30_v4','u_o1_tpxo9_atlas_30_v4',
'u_p1_tpxo9_atlas_30_v4','u_k1_tpxo9_atlas_30_v4',
'u_n2_tpxo9_atlas_30_v4','u_m2_tpxo9_atlas_30_v4',
'u_s2_tpxo9_atlas_30_v4','u_k2_tpxo9_atlas_30_v4',
'u_m4_tpxo9_atlas_30_v4','u_ms4_tpxo9_atlas_30_v4',
'u_mn4_tpxo9_atlas_30_v4','u_2n2_tpxo9_atlas_30_v4']
model_files['v'] = ['v_q1_tpxo9_atlas_30_v4','v_o1_tpxo9_atlas_30_v4',
'v_p1_tpxo9_atlas_30_v4','v_k1_tpxo9_atlas_30_v4',
'v_n2_tpxo9_atlas_30_v4','v_m2_tpxo9_atlas_30_v4',
'v_s2_tpxo9_atlas_30_v4','v_k2_tpxo9_atlas_30_v4',
'v_m4_tpxo9_atlas_30_v4','v_ms4_tpxo9_atlas_30_v4',
'v_mn4_tpxo9_atlas_30_v4','v_2n2_tpxo9_atlas_30_v4']
self.model_file = {}
for key,val in model_files.items():
self.model_file[key] = self.pathfinder(val)
self.projection = '4326'
self.scale = 1.0/100.0
self.version = 'v4'
# model description and references
self.reference = 'https://www.tpxo.net/global/tpxo9-atlas'
elif (m == 'TPXO9.1'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO9.1')
self.grid_file = self.pathfinder('grid_tpxo9')
self.model_file = dict(u=self.pathfinder('u_tpxo9.v1'))
self.projection = '4326'
self.version = '9.1'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/tides/'
'global.html')
elif (m == 'TPXO8-atlas'):
self.format = 'ATLAS'
self.model_directory = os.path.join(self.directory,'tpxo8_atlas')
self.grid_file = self.pathfinder('grid_tpxo8atlas_30_v1')
self.model_file = dict(u=self.pathfinder('uv.tpxo8_atlas_30_v1'))
self.projection = '4326'
self.version = '8'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/tides/'
'tpxo8_atlas.html')
elif (m == 'TPXO7.2'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO7.2_tmd')
self.grid_file = self.pathfinder('grid_tpxo7.2')
self.model_file = dict(u=self.pathfinder('u_tpxo7.2'))
self.projection = '4326'
self.version = '7.2'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/tides/'
'global.html')
elif (m == 'AODTM-5'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'aodtm5_tmd')
self.grid_file = self.pathfinder('grid_Arc5km')
self.model_file = dict(u=self.pathfinder('UV0_Arc5km'))
self.projection = 'PSNorth'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/'
'aodtm-5/')
elif (m == 'AOTIM-5'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'aotim5_tmd')
self.grid_file = self.pathfinder('grid_Arc5km')
self.model_file = dict(u=self.pathfinder('UV_Arc5km'))
self.projection = 'PSNorth'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/'
'aotim-5/')
elif (m == 'AOTIM-5-2018'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'Arc5km2018')
self.grid_file = self.pathfinder('grid_Arc5km2018')
self.model_file = dict(u=self.pathfinder('UV_Arc5km2018'))
self.projection = 'PSNorth'
self.version = '2018'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/'
'aotim-5/')
elif (m == 'Gr1km-v2'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'greenlandTMD_v2')
self.grid_file = self.pathfinder('grid_Greenland8.v2')
self.model_file = dict(u=self.pathfinder('u_Greenland8_rot.v2'))
self.projection = '3413'
self.version = 'v2'
# model description and references
self.reference = 'https://doi.org/10.1002/2016RG000546'
elif (m == 'FES2014'):
self.format = 'FES'
model_directory = {}
model_directory['u'] = os.path.join(self.directory,
'fes2014','eastward_velocity')
model_directory['v'] = os.path.join(self.directory,
'fes2014','northward_velocity')
model_files = ['2n2.nc','eps2.nc','j1.nc','k1.nc',
'k2.nc','l2.nc','la2.nc','m2.nc','m3.nc','m4.nc',
'm6.nc','m8.nc','mf.nc','mks2.nc','mm.nc',
'mn4.nc','ms4.nc','msf.nc','msqm.nc','mtm.nc',
'mu2.nc','n2.nc','n4.nc','nu2.nc','o1.nc','p1.nc',
'q1.nc','r2.nc','s1.nc','s2.nc','s4.nc','sa.nc',
'ssa.nc','t2.nc']
self.model_file = {}
for key,val in model_directory.items():
self.model_directory = os.path.expanduser(model_directory)
self.model_file[key] = self.pathfinder(val)
self.constituents = ['2n2','eps2','j1','k1','k2','l2','lambda2',
'm2','m3','m4','m6','m8','mf','mks2','mm','mn4','ms4','msf',
'msqm','mtm','mu2','n2','n4','nu2','o1','p1','q1','r2','s1',
's2','s4','sa','ssa','t2']
self.scale = 1.0
self.version = 'FES2014'
# model description and references
self.reference = ('https://www.aviso.altimetry.fr/en/data/products'
'auxiliary-products/global-tide-fes.html')
else:
raise Exception("Unlisted tide model")
# return the model parameters
return self
@property
def gzip(self):
"""compression flag"""
return '.gz' if self.compressed else ''
@property
def suffix(self):
"""format suffix flag"""
return '.nc' if (self.format == 'netcdf') else ''
def pathfinder(self,model_file):
"""Completes file paths and appends file and gzip suffixes
"""
if isinstance(model_file,list):
output_file = [os.path.join(self.model_directory,
''.join([f,self.suffix,self.gzip])) for f in model_file]
valid = all([os.access(f, os.F_OK) for f in output_file])
elif isinstance(model_file,str):
output_file = os.path.join(self.model_directory,
''.join([model_file,self.suffix,self.gzip]))
valid = os.access(output_file, os.F_OK)
#-- check that (all) output files exist
if self.verify and not valid:
raise FileNotFoundError(output_file)
#-- return the complete output path
return output_file
def from_file(self, definition_file):
"""Create a model object from an input definition file
"""
# variable with parameter definitions
parameters = {}
# Opening definition file and assigning file ID number
if isinstance(definition_file,io.IOBase):
fid = copy.copy(definition_file)
else:
fid = open(os.path.expanduser(definition_file), 'r')
# for each line in the file will extract the parameter (name and value)
for fileline in fid:
# Splitting the input line between parameter name and value
part = fileline.rstrip().split(maxsplit=1)
# filling the parameter definition variable
parameters[part[0]] = part[1]
# close the parameter file
fid.close()
# convert from dictionary to model variable
temp = self.from_dict(parameters)
# verify model name, format and type
assert temp.name
assert temp.format in ('OTIS','ATLAS','netcdf','GOT','FES')
assert temp.type
# verify necessary attributes are with model format
assert temp.model_file
# split model file into list if an ATLAS, GOT or FES file
# model files can be comma, tab or space delimited
# extract full path to tide model files
if re.search(r'[\s\,]+', temp.model_file):
temp.model_file = [os.path.expanduser(f) for f in
re.split(r'[\s\,]+',temp.model_file)]
temp.model_directory = os.path.dirname(temp.model_file[0])
else:
temp.model_file = os.path.expanduser(temp.model_file)
temp.model_directory = os.path.dirname(temp.model_file)
# extract full path to tide grid file
if temp.format in ('OTIS','ATLAS','netcdf'):
assert temp.grid_file
temp.grid_file = os.path.expanduser(temp.grid_file)
if temp.format in ('OTIS','ATLAS'):
assert temp.projection
# convert scale from string to float
if temp.format in ('netcdf','GOT','FES'):
assert temp.scale
temp.scale = float(temp.scale)
if temp.format in ('FES',):
assert temp.version
# split type into list if currents u,v
if re.search(r'[\s\,]+', temp.type):
temp.type = re.split(r'[\s\,]+',temp.type)
# convert boolean strings
if isinstance(temp.compressed,str):
temp.compressed = self.to_bool(temp.compressed)
# return the model parameters
return temp
def from_dict(self,d):
"""Create a model object from a python dictionary
"""
for key,val in d.items():
setattr(self,key,copy.copy(val))
# return the model parameters
return self
def to_bool(self,val):
"""Converts strings of True/False to a boolean values
"""
if val.lower() in ('y','yes','t','true','1'):
return True
elif val.lower() in ('n','no','f','false','0'):
return False
else:
raise ValueError('Invalid boolean string {0}'.format(val))
|
[
"os.path.expanduser",
"re.split",
"os.path.join",
"os.getcwd",
"os.path.dirname",
"copy.copy",
"re.search",
"os.access"
] |
[((434, 445), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (443, 445), False, 'import os\n'), ((819, 850), 'copy.copy', 'copy.copy', (["kwargs['compressed']"], {}), "(kwargs['compressed'])\n", (828, 850), False, 'import copy\n'), ((941, 970), 'os.path.expanduser', 'os.path.expanduser', (['directory'], {}), '(directory)\n', (959, 970), False, 'import os\n'), ((993, 1020), 'copy.copy', 'copy.copy', (["kwargs['format']"], {}), "(kwargs['format'])\n", (1002, 1020), False, 'import copy\n'), ((1326, 1353), 'copy.copy', 'copy.copy', (["kwargs['verify']"], {}), "(kwargs['verify'])\n", (1335, 1353), False, 'import copy\n'), ((46555, 46594), 're.search', 're.search', (['"""[\\\\s\\\\,]+"""', 'temp.model_file'], {}), "('[\\\\s\\\\,]+', temp.model_file)\n", (46564, 46594), False, 'import re\n'), ((47500, 47533), 're.search', 're.search', (['"""[\\\\s\\\\,]+"""', 'temp.type'], {}), "('[\\\\s\\\\,]+', temp.type)\n", (47509, 47533), False, 'import re\n'), ((1661, 1705), 'os.path.join', 'os.path.join', (['self.directory', '"""cats0201_tmd"""'], {}), "(self.directory, 'cats0201_tmd')\n", (1673, 1705), False, 'import os\n'), ((5362, 5406), 'os.path.join', 'os.path.join', (['self.directory', '"""cats0201_tmd"""'], {}), "(self.directory, 'cats0201_tmd')\n", (5374, 5406), False, 'import os\n'), ((32730, 32774), 'os.path.join', 'os.path.join', (['self.directory', '"""cats0201_tmd"""'], {}), "(self.directory, 'cats0201_tmd')\n", (32742, 32774), False, 'import os\n'), ((45528, 45554), 'copy.copy', 'copy.copy', (['definition_file'], {}), '(definition_file)\n', (45537, 45554), False, 'import copy\n'), ((46746, 46781), 'os.path.dirname', 'os.path.dirname', (['temp.model_file[0]'], {}), '(temp.model_file[0])\n', (46761, 46781), False, 'import os\n'), ((46826, 46861), 'os.path.expanduser', 'os.path.expanduser', (['temp.model_file'], {}), '(temp.model_file)\n', (46844, 46861), False, 'import os\n'), ((46897, 46929), 'os.path.dirname', 'os.path.dirname', (['temp.model_file'], {}), '(temp.model_file)\n', (46912, 46929), False, 'import os\n'), ((47092, 47126), 'os.path.expanduser', 'os.path.expanduser', (['temp.grid_file'], {}), '(temp.grid_file)\n', (47110, 47126), False, 'import os\n'), ((47558, 47590), 're.split', 're.split', (['"""[\\\\s\\\\,]+"""', 'temp.type'], {}), "('[\\\\s\\\\,]+', temp.type)\n", (47566, 47590), False, 'import re\n'), ((1863, 1903), 'os.path.join', 'os.path.join', (['self.directory', '"""CATS2008"""'], {}), "(self.directory, 'CATS2008')\n", (1875, 1903), False, 'import os\n'), ((6384, 6424), 'os.path.join', 'os.path.join', (['self.directory', '"""CATS2008"""'], {}), "(self.directory, 'CATS2008')\n", (6396, 6424), False, 'import os\n'), ((33198, 33238), 'os.path.join', 'os.path.join', (['self.directory', '"""CATS2008"""'], {}), "(self.directory, 'CATS2008')\n", (33210, 33238), False, 'import os\n'), ((44971, 45002), 'os.access', 'os.access', (['output_file', 'os.F_OK'], {}), '(output_file, os.F_OK)\n', (44980, 45002), False, 'import os\n'), ((45592, 45627), 'os.path.expanduser', 'os.path.expanduser', (['definition_file'], {}), '(definition_file)\n', (45610, 45627), False, 'import os\n'), ((46626, 46647), 'os.path.expanduser', 'os.path.expanduser', (['f'], {}), '(f)\n', (46644, 46647), False, 'import os\n'), ((47946, 47960), 'copy.copy', 'copy.copy', (['val'], {}), '(val)\n', (47955, 47960), False, 'import copy\n'), ((2070, 2122), 'os.path.join', 'os.path.join', (['self.directory', '"""CATS2008a_SPOTL_Load"""'], {}), "(self.directory, 'CATS2008a_SPOTL_Load')\n", (2082, 2122), False, 'import os\n'), ((7409, 7461), 'os.path.join', 'os.path.join', (['self.directory', '"""CATS2008a_SPOTL_Load"""'], {}), "(self.directory, 'CATS2008a_SPOTL_Load')\n", (7421, 7461), False, 'import os\n'), ((33484, 33527), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9_atlas"""'], {}), "(self.directory, 'TPXO9_atlas')\n", (33496, 33527), False, 'import os\n'), ((44743, 44764), 'os.access', 'os.access', (['f', 'os.F_OK'], {}), '(f, os.F_OK)\n', (44752, 44764), False, 'import os\n'), ((46673, 46711), 're.split', 're.split', (['"""[\\\\s\\\\,]+"""', 'temp.model_file'], {}), "('[\\\\s\\\\,]+', temp.model_file)\n", (46681, 46711), False, 'import re\n'), ((2276, 2319), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9_atlas"""'], {}), "(self.directory, 'TPXO9_atlas')\n", (2288, 2319), False, 'import os\n'), ((8332, 8375), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9_atlas"""'], {}), "(self.directory, 'TPXO9_atlas')\n", (8344, 8375), False, 'import os\n'), ((34858, 34904), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9_atlas_v2"""'], {}), "(self.directory, 'TPXO9_atlas_v2')\n", (34870, 34904), False, 'import os\n'), ((2489, 2535), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9_atlas_v2"""'], {}), "(self.directory, 'TPXO9_atlas_v2')\n", (2501, 2535), False, 'import os\n'), ((9736, 9782), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9_atlas_v2"""'], {}), "(self.directory, 'TPXO9_atlas_v2')\n", (9748, 9782), False, 'import os\n'), ((36282, 36328), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9_atlas_v3"""'], {}), "(self.directory, 'TPXO9_atlas_v3')\n", (36294, 36328), False, 'import os\n'), ((2711, 2757), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9_atlas_v3"""'], {}), "(self.directory, 'TPXO9_atlas_v3')\n", (2723, 2757), False, 'import os\n'), ((11154, 11200), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9_atlas_v3"""'], {}), "(self.directory, 'TPXO9_atlas_v3')\n", (11166, 11200), False, 'import os\n'), ((37706, 37752), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9_atlas_v4"""'], {}), "(self.directory, 'TPXO9_atlas_v4')\n", (37718, 37752), False, 'import os\n'), ((2933, 2979), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9_atlas_v4"""'], {}), "(self.directory, 'TPXO9_atlas_v4')\n", (2945, 2979), False, 'import os\n'), ((12639, 12685), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9_atlas_v4"""'], {}), "(self.directory, 'TPXO9_atlas_v4')\n", (12651, 12685), False, 'import os\n'), ((39156, 39195), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9.1"""'], {}), "(self.directory, 'TPXO9.1')\n", (39168, 39195), False, 'import os\n'), ((3181, 3228), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9.1"""', '"""DATA"""'], {}), "(self.directory, 'TPXO9.1', 'DATA')\n", (3193, 3228), False, 'import os\n'), ((14150, 14197), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO9.1"""', '"""DATA"""'], {}), "(self.directory, 'TPXO9.1', 'DATA')\n", (14162, 14197), False, 'import os\n'), ((39640, 39683), 'os.path.join', 'os.path.join', (['self.directory', '"""tpxo8_atlas"""'], {}), "(self.directory, 'tpxo8_atlas')\n", (39652, 39683), False, 'import os\n'), ((3422, 3465), 'os.path.join', 'os.path.join', (['self.directory', '"""tpxo8_atlas"""'], {}), "(self.directory, 'tpxo8_atlas')\n", (3434, 3465), False, 'import os\n'), ((15163, 15206), 'os.path.join', 'os.path.join', (['self.directory', '"""tpxo8_atlas"""'], {}), "(self.directory, 'tpxo8_atlas')\n", (15175, 15206), False, 'import os\n'), ((40147, 40190), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO7.2_tmd"""'], {}), "(self.directory, 'TPXO7.2_tmd')\n", (40159, 40190), False, 'import os\n'), ((3665, 3708), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO7.2_tmd"""'], {}), "(self.directory, 'TPXO7.2_tmd')\n", (3677, 3708), False, 'import os\n'), ((16192, 16235), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO7.2_tmd"""'], {}), "(self.directory, 'TPXO7.2_tmd')\n", (16204, 16235), False, 'import os\n'), ((40631, 40673), 'os.path.join', 'os.path.join', (['self.directory', '"""aodtm5_tmd"""'], {}), "(self.directory, 'aodtm5_tmd')\n", (40643, 40673), False, 'import os\n'), ((3906, 3950), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO7.2_load"""'], {}), "(self.directory, 'TPXO7.2_load')\n", (3918, 3950), False, 'import os\n'), ((17203, 17247), 'os.path.join', 'os.path.join', (['self.directory', '"""TPXO7.2_load"""'], {}), "(self.directory, 'TPXO7.2_load')\n", (17215, 17247), False, 'import os\n'), ((41140, 41182), 'os.path.join', 'os.path.join', (['self.directory', '"""aotim5_tmd"""'], {}), "(self.directory, 'aotim5_tmd')\n", (41152, 41182), False, 'import os\n'), ((4143, 4185), 'os.path.join', 'os.path.join', (['self.directory', '"""aodtm5_tmd"""'], {}), "(self.directory, 'aodtm5_tmd')\n", (4155, 4185), False, 'import os\n'), ((18107, 18149), 'os.path.join', 'os.path.join', (['self.directory', '"""aodtm5_tmd"""'], {}), "(self.directory, 'aodtm5_tmd')\n", (18119, 18149), False, 'import os\n'), ((41653, 41695), 'os.path.join', 'os.path.join', (['self.directory', '"""Arc5km2018"""'], {}), "(self.directory, 'Arc5km2018')\n", (41665, 41695), False, 'import os\n'), ((4344, 4386), 'os.path.join', 'os.path.join', (['self.directory', '"""aotim5_tmd"""'], {}), "(self.directory, 'aotim5_tmd')\n", (4356, 4386), False, 'import os\n'), ((19141, 19183), 'os.path.join', 'os.path.join', (['self.directory', '"""aotim5_tmd"""'], {}), "(self.directory, 'aotim5_tmd')\n", (19153, 19183), False, 'import os\n'), ((42204, 42251), 'os.path.join', 'os.path.join', (['self.directory', '"""greenlandTMD_v2"""'], {}), "(self.directory, 'greenlandTMD_v2')\n", (42216, 42251), False, 'import os\n'), ((4550, 4592), 'os.path.join', 'os.path.join', (['self.directory', '"""Arc5km2018"""'], {}), "(self.directory, 'Arc5km2018')\n", (4562, 4592), False, 'import os\n'), ((20179, 20221), 'os.path.join', 'os.path.join', (['self.directory', '"""Arc5km2018"""'], {}), "(self.directory, 'Arc5km2018')\n", (20191, 20221), False, 'import os\n'), ((42710, 42770), 'os.path.join', 'os.path.join', (['self.directory', '"""fes2014"""', '"""eastward_velocity"""'], {}), "(self.directory, 'fes2014', 'eastward_velocity')\n", (42722, 42770), False, 'import os\n'), ((42821, 42882), 'os.path.join', 'os.path.join', (['self.directory', '"""fes2014"""', '"""northward_velocity"""'], {}), "(self.directory, 'fes2014', 'northward_velocity')\n", (42833, 42882), False, 'import os\n'), ((4790, 4837), 'os.path.join', 'os.path.join', (['self.directory', '"""greenlandTMD_v2"""'], {}), "(self.directory, 'greenlandTMD_v2')\n", (4802, 4837), False, 'import os\n'), ((21251, 21298), 'os.path.join', 'os.path.join', (['self.directory', '"""greenlandTMD_v2"""'], {}), "(self.directory, 'greenlandTMD_v2')\n", (21263, 21298), False, 'import os\n'), ((43439, 43474), 'os.path.expanduser', 'os.path.expanduser', (['model_directory'], {}), '(model_directory)\n', (43457, 43474), False, 'import os\n'), ((22241, 22298), 'os.path.join', 'os.path.join', (['self.directory', '"""GOT4.7"""', '"""grids_oceantide"""'], {}), "(self.directory, 'GOT4.7', 'grids_oceantide')\n", (22253, 22298), False, 'import os\n'), ((23390, 23446), 'os.path.join', 'os.path.join', (['self.directory', '"""GOT4.7"""', '"""grids_loadtide"""'], {}), "(self.directory, 'GOT4.7', 'grids_loadtide')\n", (23402, 23446), False, 'import os\n'), ((24500, 24557), 'os.path.join', 'os.path.join', (['self.directory', '"""got4.8"""', '"""grids_oceantide"""'], {}), "(self.directory, 'got4.8', 'grids_oceantide')\n", (24512, 24557), False, 'import os\n'), ((25649, 25705), 'os.path.join', 'os.path.join', (['self.directory', '"""got4.8"""', '"""grids_loadtide"""'], {}), "(self.directory, 'got4.8', 'grids_loadtide')\n", (25661, 25705), False, 'import os\n'), ((26760, 26819), 'os.path.join', 'os.path.join', (['self.directory', '"""GOT4.10c"""', '"""grids_oceantide"""'], {}), "(self.directory, 'GOT4.10c', 'grids_oceantide')\n", (26772, 26819), False, 'import os\n'), ((27913, 27971), 'os.path.join', 'os.path.join', (['self.directory', '"""GOT4.10c"""', '"""grids_loadtide"""'], {}), "(self.directory, 'GOT4.10c', 'grids_loadtide')\n", (27925, 27971), False, 'import os\n'), ((29027, 29080), 'os.path.join', 'os.path.join', (['self.directory', '"""fes2014"""', '"""ocean_tide"""'], {}), "(self.directory, 'fes2014', 'ocean_tide')\n", (29039, 29080), False, 'import os\n'), ((30757, 30809), 'os.path.join', 'os.path.join', (['self.directory', '"""fes2014"""', '"""load_tide"""'], {}), "(self.directory, 'fes2014', 'load_tide')\n", (30769, 30809), False, 'import os\n')]
|
#!/usr/bin/env python3
# coding: utf-8
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021 AntiCompositeNumber
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import urllib.parse
import flask
import mwparserfromhell
import requests
from stdnum import isbn
bp = flask.Blueprint("hyphenator", __name__, url_prefix="/hyphenator")
flash = []
def get_wikitext(url):
wikitext_url = url + "&action=raw"
headers = {
"user-agent": "anticompositetools/hyphenator "
"(https://anticompositetools.toolforge.org/hyphenator; "
"<EMAIL>) python-requests/"
+ requests.__version__
}
for i in range(1, 5):
try:
request = requests.get(wikitext_url, headers=headers)
request.raise_for_status()
except Exception:
if request.status_code == 404:
flash.append(("That page does not exist.", "danger"))
raise
elif i == 4:
flash.append(("Unable to retrieve wikitext.", "danger"))
raise
else:
time.sleep(5 * i)
continue
else:
start_time = time.strftime("%Y%m%d%H%M%S", time.gmtime())
timestruct = time.strptime(
request.headers["Last-Modified"], "%a, %d %b %Y %H:%M:%S %Z"
)
edit_time = time.strftime("%Y%m%d%H%M%S", timestruct)
return (request.text, (edit_time, start_time))
def find_isbns(code):
for template in code.ifilter_templates():
if template.name.matches("ISBN") or template.name.matches("ISBNT"):
try:
raw_isbn = template.get("1").value.strip()
except ValueError:
continue
para = "1"
elif template.has("isbn", ignore_empty=True):
raw_isbn = template.get("isbn").value.strip()
para = "isbn"
elif template.has("ISBN", ignore_empty=True):
raw_isbn = template.get("ISBN").value.strip()
para = "ISBN"
else:
continue
yield (template, raw_isbn, para)
def check_isbn(raw_isbn):
"""If the ISBN can be worked on, return True"""
if len(raw_isbn) == 17 or not isbn.is_valid(raw_isbn):
return False
else:
return True
def get_page_url(url):
parsed = urllib.parse.urlparse(url)
if parsed.path == "/w/index.php":
query_params = urllib.parse.parse_qs(parsed.query)
if "oldid" not in query_params:
title = query_params["title"][0]
else:
flash.append(("Invalid URL", "danger"))
raise ValueError # fix
elif "/wiki/" in parsed.path:
# Because some people expect invalid URLs to work anyway
title = urllib.parse.quote(urllib.parse.unquote(parsed.path[6:]), safe=":/")
else:
flash.append(("Invalid URL", "danger"))
raise ValueError # this one too
new_url = parsed.scheme + "://" + parsed.netloc + "/w/index.php?title=" + title
return new_url
def main(raw_url, convert=True):
url = get_page_url(raw_url)
wikitext, times = get_wikitext(url)
code = mwparserfromhell.parse(wikitext)
count = 0
for template, raw_isbn, para in find_isbns(code):
if not check_isbn(raw_isbn):
continue
new_isbn = isbn.format(raw_isbn, convert=convert)
if raw_isbn != new_isbn:
count += 1
template.add(para, new_isbn)
return code, times, count, url
@bp.route("/", methods=["GET"])
def form():
return flask.render_template("hyphenator-form.html")
@bp.route("/output", methods=["POST"])
def output():
def check_err(messages):
for message in messages:
if message[1] == "danger":
return True
return False
if flask.request.method == "POST":
pageurl = flask.request.form["page_url"]
convert = bool(flask.request.form.get("convert", False))
try:
newtext, times, count, url = main(pageurl, convert)
except Exception as err:
if not check_err(flash):
flash.append(
("An unhandled {0} exception occurred.".format(err), "danger")
)
for message in flash:
flask.flash(message[0], message[1])
return flask.redirect(flask.url_for("hyphenator.form"))
submit_url = url + "&action=submit"
return flask.render_template(
"hyphenator-output.html",
count=count,
submit_url=submit_url,
newtext=newtext,
edit_time=times[0],
start_time=times[1],
)
|
[
"stdnum.isbn.is_valid",
"flask.flash",
"flask.Blueprint",
"flask.request.form.get",
"time.gmtime",
"mwparserfromhell.parse",
"time.strftime",
"time.sleep",
"flask.url_for",
"flask.render_template",
"requests.get",
"stdnum.isbn.format",
"time.strptime"
] |
[((774, 839), 'flask.Blueprint', 'flask.Blueprint', (['"""hyphenator"""', '__name__'], {'url_prefix': '"""/hyphenator"""'}), "('hyphenator', __name__, url_prefix='/hyphenator')\n", (789, 839), False, 'import flask\n'), ((3668, 3700), 'mwparserfromhell.parse', 'mwparserfromhell.parse', (['wikitext'], {}), '(wikitext)\n', (3690, 3700), False, 'import mwparserfromhell\n'), ((4076, 4121), 'flask.render_template', 'flask.render_template', (['"""hyphenator-form.html"""'], {}), "('hyphenator-form.html')\n", (4097, 4121), False, 'import flask\n'), ((3847, 3885), 'stdnum.isbn.format', 'isbn.format', (['raw_isbn'], {'convert': 'convert'}), '(raw_isbn, convert=convert)\n', (3858, 3885), False, 'from stdnum import isbn\n'), ((4976, 5122), 'flask.render_template', 'flask.render_template', (['"""hyphenator-output.html"""'], {'count': 'count', 'submit_url': 'submit_url', 'newtext': 'newtext', 'edit_time': 'times[0]', 'start_time': 'times[1]'}), "('hyphenator-output.html', count=count, submit_url=\n submit_url, newtext=newtext, edit_time=times[0], start_time=times[1])\n", (4997, 5122), False, 'import flask\n'), ((1187, 1230), 'requests.get', 'requests.get', (['wikitext_url'], {'headers': 'headers'}), '(wikitext_url, headers=headers)\n', (1199, 1230), False, 'import requests\n'), ((1737, 1812), 'time.strptime', 'time.strptime', (["request.headers['Last-Modified']", '"""%a, %d %b %Y %H:%M:%S %Z"""'], {}), "(request.headers['Last-Modified'], '%a, %d %b %Y %H:%M:%S %Z')\n", (1750, 1812), False, 'import time\n'), ((1867, 1908), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""', 'timestruct'], {}), "('%Y%m%d%H%M%S', timestruct)\n", (1880, 1908), False, 'import time\n'), ((2737, 2760), 'stdnum.isbn.is_valid', 'isbn.is_valid', (['raw_isbn'], {}), '(raw_isbn)\n', (2750, 2760), False, 'from stdnum import isbn\n'), ((4439, 4479), 'flask.request.form.get', 'flask.request.form.get', (['"""convert"""', '(False)'], {}), "('convert', False)\n", (4461, 4479), False, 'import flask\n'), ((1697, 1710), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (1708, 1710), False, 'import time\n'), ((4810, 4845), 'flask.flash', 'flask.flash', (['message[0]', 'message[1]'], {}), '(message[0], message[1])\n', (4821, 4845), False, 'import flask\n'), ((4881, 4913), 'flask.url_for', 'flask.url_for', (['"""hyphenator.form"""'], {}), "('hyphenator.form')\n", (4894, 4913), False, 'import flask\n'), ((1585, 1602), 'time.sleep', 'time.sleep', (['(5 * i)'], {}), '(5 * i)\n', (1595, 1602), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 15:53:15 2020
@author: syful
"""
import xml.etree.ElementTree as et
import re
import pandas as pd
from datetime import datetime
start = datetime.now()
from tqdm.auto import tqdm
import numpy as np
from collections import Counter
import os
#Please specify your dataset directory.
os.chdir("your dataset directory")
Id=[]
CreationDate=[]
Score=[]
ViewCount=[]
Title=[]
Body=[]
Tags=[]
Tag_list=[]
df2 = pd.read_csv("syc01_PM_related_initial_post_detailed_syc.csv")
for element in df2['Tags']:
res = re.findall(r'\<(.*?)\>', element)
for tag_element in res:
Tag_list.append(tag_element)
tag=[]
count=[]
data=Counter(Tag_list)
for i in data:
tag.append(i)
count.append(data[i])
dict={'tags':tag, 'count':count}
df1=pd.DataFrame(dict)
df1.to_csv('syc02_filtered_tag.csv', header=True, index=False, encoding='utf-8')
|
[
"pandas.DataFrame",
"pandas.read_csv",
"re.findall",
"collections.Counter",
"datetime.datetime.now",
"os.chdir"
] |
[((188, 202), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (200, 202), False, 'from datetime import datetime\n'), ((333, 367), 'os.chdir', 'os.chdir', (['"""your dataset directory"""'], {}), "('your dataset directory')\n", (341, 367), False, 'import os\n'), ((461, 522), 'pandas.read_csv', 'pd.read_csv', (['"""syc01_PM_related_initial_post_detailed_syc.csv"""'], {}), "('syc01_PM_related_initial_post_detailed_syc.csv')\n", (472, 522), True, 'import pandas as pd\n'), ((689, 706), 'collections.Counter', 'Counter', (['Tag_list'], {}), '(Tag_list)\n', (696, 706), False, 'from collections import Counter\n'), ((806, 824), 'pandas.DataFrame', 'pd.DataFrame', (['dict'], {}), '(dict)\n', (818, 824), True, 'import pandas as pd\n'), ((563, 597), 're.findall', 're.findall', (['"""\\\\<(.*?)\\\\>"""', 'element'], {}), "('\\\\<(.*?)\\\\>', element)\n", (573, 597), False, 'import re\n')]
|
import numpy as np
import os
from pwtools import num, common
rand = np.random.rand
def equal(a,b):
assert (a == b).all()
def test_extend_array():
arr = rand(3,3)
nrep = 5
a0 = num.extend_array(arr, nrep, axis=0)
a1 = num.extend_array(arr, nrep, axis=1)
a2 = num.extend_array(arr, nrep, axis=2)
am1 = num.extend_array(arr, nrep, axis=-1)
assert a0.shape == (nrep,3,3)
assert a1.shape == (3,nrep,3)
assert a2.shape == (3,3,nrep)
assert am1.shape == (3,3,nrep)
equal(a2, am1)
for axis, aa in enumerate([a0, a1, a2]):
for ii in range(nrep):
# slicetake(a0, 3, 0) -> a0[3,:,:]
equal(arr, num.slicetake(aa, ii, axis=axis))
|
[
"pwtools.num.slicetake",
"pwtools.num.extend_array"
] |
[((194, 229), 'pwtools.num.extend_array', 'num.extend_array', (['arr', 'nrep'], {'axis': '(0)'}), '(arr, nrep, axis=0)\n', (210, 229), False, 'from pwtools import num, common\n'), ((239, 274), 'pwtools.num.extend_array', 'num.extend_array', (['arr', 'nrep'], {'axis': '(1)'}), '(arr, nrep, axis=1)\n', (255, 274), False, 'from pwtools import num, common\n'), ((284, 319), 'pwtools.num.extend_array', 'num.extend_array', (['arr', 'nrep'], {'axis': '(2)'}), '(arr, nrep, axis=2)\n', (300, 319), False, 'from pwtools import num, common\n'), ((330, 366), 'pwtools.num.extend_array', 'num.extend_array', (['arr', 'nrep'], {'axis': '(-1)'}), '(arr, nrep, axis=-1)\n', (346, 366), False, 'from pwtools import num, common\n'), ((670, 702), 'pwtools.num.slicetake', 'num.slicetake', (['aa', 'ii'], {'axis': 'axis'}), '(aa, ii, axis=axis)\n', (683, 702), False, 'from pwtools import num, common\n')]
|
import time
for i in range(10, -1, -1):
print(i)
time.sleep(1)
print('FOGOSSSSSSSSS!!!!!!!!! \o/ \o/ \o/ \o/ ')
|
[
"time.sleep"
] |
[((58, 71), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (68, 71), False, 'import time\n')]
|
# Day 6: The Central Limit Theorem I
# Enter your code here. Read input from STDIN. Print output to STDOUT
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2019"
import math
class Day6(object):
e = 2.71829
def __init__(self):
pass
# Define functions
def the_central_limit_theorem_i(self, mean, std, value):
return 0.5 * (1 + math.erf((value - mean) / (std * (2 ** 0.5))))
if __name__ == '__main__':
d6 = Day6()
# Set data
max_weight = float(input())
n = float(input())
mean = float(input())
std = float(input())
new_mean = mean * n
new_std = math.sqrt(n) * std
# Gets the result and show on the screen
result = d6.the_central_limit_theorem_i(new_mean, new_std, max_weight)
print('{:.4f}'.format(result))
|
[
"math.erf",
"math.sqrt"
] |
[((640, 652), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (649, 652), False, 'import math\n'), ((388, 431), 'math.erf', 'math.erf', (['((value - mean) / (std * 2 ** 0.5))'], {}), '((value - mean) / (std * 2 ** 0.5))\n', (396, 431), False, 'import math\n')]
|
import logging
import numpy as np
from keras_text_cls.embedding.base_embedder import BaseEmbedder
from keras_text_cls.vocab import Vocabulary, SYMBOL_PADDING, SYMBOL_UNKNOWN
from gensim.models.word2vec import Word2Vec
class Word2vecEmbedder(BaseEmbedder):
"""
Word2vec Embedder is a wrapper of gensim word2vec model
Reference to: https://radimrehurek.com/gensim/models/word2vec.html
Attributes
----------
dim: int
embedding vector dimension, default 300
seed: int
random seed, default is None
"""
def __init__(self, dim=300, seed=None):
super().__init__(dim, seed)
def transform(self, word):
if not self.is_fitted:
raise ValueError("model needs to be fitted first")
if word in self._model.wv.vocab:
return self._model.wv[word]
elif word == SYMBOL_PADDING:
return self._PADDING_VEC
else:
return self._UNKNOWN_VEC
def fit_on_words(self, words, sg=0, window=5, min_count=5, workers=4, iter=5, negative=5, **kwargs):
"""
fit word2vec model on words, vector size is assigned internally by default (equal to model._dim)
parameters are the same as gensim word2vec model
:param words: 2-dim list of words
:param sg: int {1, 0}
Defines the training algorithm. If 1, skip-gram is employed; otherwise, CBOW is used.
:param window: int
The maximum distance between the current and predicted word within a sentence.
:param min_count: int
The maximum distance between the current and predicted word within a sentence.
:param workers: int
Use these many worker threads to train the model (=faster training with multicore machines).
:param iter: int
Number of iterations (epochs) over the corpus.
:param negative: int
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
:param kwargs: more arguments can assigned by referring to gensim word2vec model
:return: fitted model
"""
if self.is_fitted:
raise ValueError("model is already fitted")
sentences = words
vector_size = self._dim
word2vec_model = Word2Vec(sentences, size=vector_size, sg=sg, window=window, min_count=min_count,
workers=workers, iter=iter, negative=negative, **kwargs)
self._model = word2vec_model
logging.info("word2ec model is fitted successfully")
self.is_fitted = True
return self
def get_vocabs(self):
if not self.is_fitted:
raise ValueError("model needs to be fitted first")
vocabs_set = set(self._predefined_vocabs)
vocabs = self._predefined_vocabs.copy()
for w in self._model.wv.vocab:
if w not in vocabs_set:
vocabs_set.add(w)
vocabs.append(w)
return vocabs
def get_dim(self):
if self.is_fitted:
return self._model.vector_size
else:
return self._dim
def save_model(self, path):
if not self.is_fitted:
raise ValueError("model needs to be fitted first")
self._model.save(path)
logging.info("saving model into: " + path)
@staticmethod
def load_model(path):
w2v = Word2vecEmbedder()
w2v._model = Word2Vec.load(path)
logging.info("loaded word2vec model from: " + path)
w2v.is_fitted = True
return w2v
|
[
"gensim.models.word2vec.Word2Vec.load",
"logging.info",
"gensim.models.word2vec.Word2Vec"
] |
[((2405, 2547), 'gensim.models.word2vec.Word2Vec', 'Word2Vec', (['sentences'], {'size': 'vector_size', 'sg': 'sg', 'window': 'window', 'min_count': 'min_count', 'workers': 'workers', 'iter': 'iter', 'negative': 'negative'}), '(sentences, size=vector_size, sg=sg, window=window, min_count=\n min_count, workers=workers, iter=iter, negative=negative, **kwargs)\n', (2413, 2547), False, 'from gensim.models.word2vec import Word2Vec\n'), ((2622, 2674), 'logging.info', 'logging.info', (['"""word2ec model is fitted successfully"""'], {}), "('word2ec model is fitted successfully')\n", (2634, 2674), False, 'import logging\n'), ((3411, 3453), 'logging.info', 'logging.info', (["('saving model into: ' + path)"], {}), "('saving model into: ' + path)\n", (3423, 3453), False, 'import logging\n'), ((3553, 3572), 'gensim.models.word2vec.Word2Vec.load', 'Word2Vec.load', (['path'], {}), '(path)\n', (3566, 3572), False, 'from gensim.models.word2vec import Word2Vec\n'), ((3581, 3632), 'logging.info', 'logging.info', (["('loaded word2vec model from: ' + path)"], {}), "('loaded word2vec model from: ' + path)\n", (3593, 3632), False, 'import logging\n')]
|
import typing
import torch
import torchvision
import numpy as np
from PIL import Image
from torch.autograd import Variable
from src.final_work.transformer import Transformer
from enum import Enum
class ModelType(Enum):
HOSODA = "hosoda_mamoru"
KON = "kon_satoshi"
MIYAZAKI = "miyazaki_hayao"
SHINKAI = "shinkai_makoto"
class Device(Enum):
CPU = "cpu"
GPU = torch.device("cuda")
class ImageConverter:
MODELS_DIRECTORY = "models"
def __init__(self):
self.device = self._define_device()
self._init_models()
def _get_model(self, model_type: ModelType) -> Transformer:
return self.models[model_type.value]
@staticmethod
def _define_device() -> Device:
_is_gpu_enable = torch.cuda.is_available()
if _is_gpu_enable:
return Device.GPU
else:
return Device.CPU
def _init_models(self):
self.models = dict()
for model_type in ModelType:
self.models[model_type.value] = self._create_model(model_type)
def _load_model_parameters(self, model: ModelType):
return torch.load(f"{self.MODELS_DIRECTORY}/{model.value}.pth", self.device.value)
def _create_model(self, model_type: ModelType) -> Transformer:
new_model = Transformer()
new_model_parameters = self._load_model_parameters(model_type)
new_model.load_state_dict(new_model_parameters)
if self.device == Device.GPU:
new_model.to(self.device.value)
new_model.eval()
return new_model
def convert_image(self, image: Image, model_type: ModelType) -> Image:
image = image.convert("RGB")
image = np.asarray(image)
image = image[:, :, [2, 1, 0]]
image = torchvision.transforms.ToTensor()(image).unsqueeze(0)
image = -1 + 2 * image
if self.device == Device.GPU:
image = Variable(image).to(self.device.value)
else:
image = Variable(image).float()
model = self._get_model(model_type)
converted_image = model(image)
converted_image = converted_image[0]
converted_image = converted_image[[2, 1, 0], :, :]
converted_image = converted_image.data.cpu().float() * 0.5 + 0.5
return torchvision.transforms.ToPILImage()(converted_image)
|
[
"src.final_work.transformer.Transformer",
"torch.autograd.Variable",
"torch.load",
"numpy.asarray",
"torchvision.transforms.ToPILImage",
"torch.cuda.is_available",
"torch.device",
"torchvision.transforms.ToTensor"
] |
[((385, 405), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (397, 405), False, 'import torch\n'), ((749, 774), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (772, 774), False, 'import torch\n'), ((1118, 1193), 'torch.load', 'torch.load', (['f"""{self.MODELS_DIRECTORY}/{model.value}.pth"""', 'self.device.value'], {}), "(f'{self.MODELS_DIRECTORY}/{model.value}.pth', self.device.value)\n", (1128, 1193), False, 'import torch\n'), ((1282, 1295), 'src.final_work.transformer.Transformer', 'Transformer', ([], {}), '()\n', (1293, 1295), False, 'from src.final_work.transformer import Transformer\n'), ((1686, 1703), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1696, 1703), True, 'import numpy as np\n'), ((2275, 2310), 'torchvision.transforms.ToPILImage', 'torchvision.transforms.ToPILImage', ([], {}), '()\n', (2308, 2310), False, 'import torchvision\n'), ((1759, 1792), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (1790, 1792), False, 'import torchvision\n'), ((1902, 1917), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (1910, 1917), False, 'from torch.autograd import Variable\n'), ((1974, 1989), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (1982, 1989), False, 'from torch.autograd import Variable\n')]
|
# -*- coding: utf-8 -*-
"""
Created on 2017-6-27
@author: cheng.li
"""
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
from alphamind.portfolio.optimizers import (
QuadraticOptimizer,
TargetVolOptimizer
)
from alphamind.exceptions.exceptions import PortfolioBuilderException
def _create_bounds(lbound,
ubound,
bm,
risk_exposure,
risk_target):
if lbound is not None:
lbound = lbound - bm
if ubound is not None:
ubound = ubound - bm
if risk_exposure is not None:
cons_mat = risk_exposure.T
bm_risk = cons_mat @ bm
clbound = (risk_target[0] - bm_risk).reshape((-1, 1))
cubound = (risk_target[1] - bm_risk).reshape((-1, 1))
else:
cons_mat = None
clbound = None
cubound = None
return lbound, ubound, cons_mat, clbound, cubound
def _create_result(optimizer, bm):
if optimizer.status() == "optimal" or optimizer.status() == "optimal_inaccurate":
return optimizer.status(), optimizer.feval(), optimizer.x_value() + bm
else:
raise PortfolioBuilderException(optimizer.status())
def mean_variance_builder(er: np.ndarray,
risk_model: Dict[str, Union[None, np.ndarray]],
bm: np.ndarray,
lbound: Union[np.ndarray, float, None],
ubound: Union[np.ndarray, float, None],
risk_exposure: Optional[np.ndarray],
risk_target: Optional[Tuple[np.ndarray, np.ndarray]],
lam: float = 1.,
linear_solver: str = 'deprecated') -> Tuple[str, float, np.ndarray]:
lbound, ubound, cons_mat, clbound, cubound = _create_bounds(lbound, ubound, bm, risk_exposure,
risk_target)
if cons_mat is not None:
cons_matrix = np.concatenate([cons_mat, clbound, cubound], axis=1)
else:
cons_matrix = None
cov = risk_model['cov']
special_risk = risk_model['idsync']
risk_cov = risk_model['factor_cov']
risk_exposure = risk_model['factor_loading']
prob = QuadraticOptimizer(objective=-er,
cons_matrix=cons_matrix,
lbound=lbound,
ubound=ubound,
penalty=lam,
cov=cov,
factor_cov=risk_cov,
factor_load=risk_exposure,
factor_special=special_risk)
if prob.status() == "optimal" or prob.status() == 'optimal_inaccurate':
return prob.status(), prob.feval(), prob.x_value() + bm
else:
raise PortfolioBuilderException(prob.status())
def target_vol_builder(er: np.ndarray,
risk_model: Dict[str, Union[None, np.ndarray]],
bm: np.ndarray,
lbound: Union[np.ndarray, float],
ubound: Union[np.ndarray, float],
risk_exposure: Optional[np.ndarray],
risk_target: Optional[Tuple[np.ndarray, np.ndarray]],
vol_target: float = 1.,
linear_solver: str = 'ma27') -> Tuple[str, float, np.ndarray]:
lbound, ubound, cons_mat, clbound, cubound = _create_bounds(lbound, ubound, bm, risk_exposure,
risk_target)
if cons_mat is not None:
cons_matrix = np.concatenate([cons_mat, clbound, cubound], axis=1)
else:
cons_matrix = None
cov = risk_model['cov']
special_risk = risk_model['idsync']
risk_cov = risk_model['factor_cov']
risk_exposure = risk_model['factor_loading']
prob = TargetVolOptimizer(objective=-er,
cons_matrix=cons_matrix,
lbound=lbound,
ubound=ubound,
target_vol=vol_target,
factor_cov=risk_cov,
factor_load=risk_exposure,
factor_special=special_risk,
cov=cov)
if prob.status() == "optimal" or prob.status() == 'optimal_inaccurate':
return prob.status(), prob.feval(), prob.x_value() + bm
else:
raise PortfolioBuilderException(prob.status())
|
[
"alphamind.portfolio.optimizers.TargetVolOptimizer",
"numpy.concatenate",
"alphamind.portfolio.optimizers.QuadraticOptimizer"
] |
[((2311, 2507), 'alphamind.portfolio.optimizers.QuadraticOptimizer', 'QuadraticOptimizer', ([], {'objective': '(-er)', 'cons_matrix': 'cons_matrix', 'lbound': 'lbound', 'ubound': 'ubound', 'penalty': 'lam', 'cov': 'cov', 'factor_cov': 'risk_cov', 'factor_load': 'risk_exposure', 'factor_special': 'special_risk'}), '(objective=-er, cons_matrix=cons_matrix, lbound=lbound,\n ubound=ubound, penalty=lam, cov=cov, factor_cov=risk_cov, factor_load=\n risk_exposure, factor_special=special_risk)\n', (2329, 2507), False, 'from alphamind.portfolio.optimizers import QuadraticOptimizer, TargetVolOptimizer\n'), ((3968, 4174), 'alphamind.portfolio.optimizers.TargetVolOptimizer', 'TargetVolOptimizer', ([], {'objective': '(-er)', 'cons_matrix': 'cons_matrix', 'lbound': 'lbound', 'ubound': 'ubound', 'target_vol': 'vol_target', 'factor_cov': 'risk_cov', 'factor_load': 'risk_exposure', 'factor_special': 'special_risk', 'cov': 'cov'}), '(objective=-er, cons_matrix=cons_matrix, lbound=lbound,\n ubound=ubound, target_vol=vol_target, factor_cov=risk_cov, factor_load=\n risk_exposure, factor_special=special_risk, cov=cov)\n', (3986, 4174), False, 'from alphamind.portfolio.optimizers import QuadraticOptimizer, TargetVolOptimizer\n'), ((2051, 2103), 'numpy.concatenate', 'np.concatenate', (['[cons_mat, clbound, cubound]'], {'axis': '(1)'}), '([cons_mat, clbound, cubound], axis=1)\n', (2065, 2103), True, 'import numpy as np\n'), ((3708, 3760), 'numpy.concatenate', 'np.concatenate', (['[cons_mat, clbound, cubound]'], {'axis': '(1)'}), '([cons_mat, clbound, cubound], axis=1)\n', (3722, 3760), True, 'import numpy as np\n')]
|
import pacman
import autograder
"""
run.py runs things that look like command-line arguments
for Berkeley Python. Leave the 'python pacman.py' part
at the beginning, just like running from the command line.
You should comment out all lines in the file except the one
you wan to run!
"""
#pacman.main('python pacman.py --layout tinyMaze --pacman GoWestAgent')
#pacman.main('python pacman.py -l tinyMaze -p SearchAgent -a fn=tinyMazeSearch')
pacman.main('python pacman.py -l mediumMaze -p SearchAgent -a fn=dfs')
# autograder
#autograder.run('python autograder.py')
#autograder.run('python autograder -q q1')
#autograder.run('python autograder.py -t test_cases/q2/graph_bfs_vs_dfs')
|
[
"pacman.main"
] |
[((443, 513), 'pacman.main', 'pacman.main', (['"""python pacman.py -l mediumMaze -p SearchAgent -a fn=dfs"""'], {}), "('python pacman.py -l mediumMaze -p SearchAgent -a fn=dfs')\n", (454, 513), False, 'import pacman\n')]
|
# SPDX-FileCopyrightText: Copyright 2021-present Open Networking Foundation.
# SPDX-License-Identifier: Apache-2.0
import ipaddress
import logging
import re
from collections import Counter, defaultdict
import kubernetes as k8s
import requests
from netaddr import IPAddress
log = logging.getLogger("DeepInsightTopoUtility")
INT_HOST_REPORTER_TOPO_API="http://{}:4048/api/v1/topology"
PORT_MAPPINGS = {}
def parse_port_id(port_string):
# Port string can be "[port/channel](id)" or just "id".
# Only return the ID of port.
match = re.match(r"\[([0-9]+/[0-9]+)\]\(([0-9]+)\)", port_string)
if not match:
return int(port_string)
else:
return int(match.group(2))
def parse_port_name(port_string):
# Port string can be "[port/channel](id)" or just "id".
# Return the "port/channel" string, if exists.
# Otherwise, return the ID of port.
match = re.match(r"\[([0-9]+/[0-9]+)\]\(([0-9]+)\)", port_string)
if not match:
return port_string
elif match.group(1):
return match.group(1)
else:
return match.group(2)
def gen_topo(
onos_url="http://localhost:8181/onos/v1",
onos_user="onos",
onos_pass="<PASSWORD>",
with_end_host=False,
k8s_clusters=[],
):
"""
Generate topology based on ONOS and K8s cluster topology.
:param onos_url: The ONOS URL, default is http://localhost:8181/onos/v1
:param onos_user: The ONOS user, default is onos
:param onos_pass: The ONOS password, default is <PASSWORD>
:param with_end_host: Include end hosts(k8s nodes), default is False
:param k8s_clusters: [For end host] The list of K8s cluster info, default is empty
"""
log.info(
"Using ONOS REST APIs at %s (user:%s, password:%s)",
onos_url,
onos_user,
onos_pass,
)
auth = requests.auth.HTTPBasicAuth(onos_user, onos_pass)
netcfg = requests.get(onos_url + "/network/configuration", auth=auth)
if not netcfg.ok:
log.fatal("Unable to retrieve netcfg\n%s", netcfg.text)
netcfg = netcfg.json()
topo = {"switches": [], "links": [], "subnets": [], "hosts": []}
for key, value in netcfg["devices"].items():
topo["switches"].append(
{
"switchId": value["segmentrouting"]["ipv4NodeSid"],
"name": key,
"deviceType": "legacy",
"ip": value["segmentrouting"]["ipv4Loopback"],
}
)
devices = requests.get(onos_url + "/devices", auth=auth)
if not devices.ok:
log.fatal("Unable to retrieve devices\n%s", devices.text)
devices = devices.json()["devices"]
for device in devices:
device_ports = requests.get(onos_url + "/devices/" + device['id'] + "/ports", auth=auth)
if not device_ports.ok:
log.fatal("Unable to retrieve ports of device\n%s", device_ports.text)
for elem in device_ports.json()['ports']:
port_name = parse_port_name(elem['port'])
port_id = parse_port_id(elem['port'])
if not device['id'] in PORT_MAPPINGS:
PORT_MAPPINGS[device['id']] = {}
PORT_MAPPINGS[device['id']][port_id] = port_name
print(PORT_MAPPINGS)
subnets = defaultdict(lambda: {})
for key, value in netcfg["ports"].items():
if "interfaces" not in value:
continue
ifaces = value["interfaces"]
for iface in ifaces:
for ip in iface["ips"]:
ip = ipaddress.ip_interface(ip)
subnets[str(ip.network)][key] = True
subnet_id = 1
for subnet, ports in subnets.items():
topo["subnets"].append(
{"ip_subnet": subnet, "name": subnet, "subnet_id": subnet_id}
)
for port in ports:
switch_id, port_num = port.split("/")
topo["links"].append(
{
"node1": switch_id,
"port1": PORT_MAPPINGS[switch_id][int(port_num)],
"node2": subnet,
"port2": "-1",
"switchPort1": int(port_num),
}
)
subnet_id = subnet_id + 1
hosts = requests.get(onos_url + "/hosts", auth=auth)
if not hosts.ok:
log.fatal("Unable to retrieve hosts\n%s", hosts.text)
hosts = hosts.json()["hosts"]
# A dictionary stores mapping from host IP to locations.
# Later we will use this dictionary to find the location of next hop for each routes.
host_ip_to_locations = {}
# Host names in ONOS are not unique, in case of duplicates, append count
# suffix (e.h., myhost_1, myhost_2). Similarly, we use different names for hosts with
# multiple IP addresses.
name_ctr = Counter()
for host in hosts:
try:
name = host["annotations"]["name"]
except KeyError:
name = host["id"]
for ip in host["ipAddresses"]:
name_ctr.update([name])
unique_name = "%s_%s" % (name, name_ctr[name])
topo["hosts"].append(
{
"ip": ip,
"name": unique_name,
}
)
for location in host["locations"]:
port_num = parse_port_id(location["port"])
topo["links"].append(
{
"node1": location["elementId"],
"port1": PORT_MAPPINGS[location["elementId"]][int(port_num)],
"node2": unique_name,
"port2": "-1",
"switchPort1": port_num,
}
)
host_ip_to_locations[ip] = host["locations"]
links = requests.get(onos_url + "/links", auth=auth)
if not links.ok:
log.fatal("Unable to retrieve hosts\n%s", links.text)
links = links.json()["links"]
for app, value in netcfg["apps"].items():
if app == "org.omecproject.up4":
if "up4" not in value:
continue
up4 = value["up4"]
if "devices" in up4:
up4_switch_ids = up4["devices"]
else:
# TODO: For backward compatibility
# remove this when we are no longer need it.
up4_switch_ids = [up4["deviceId"]]
s1uaddr = up4["s1uAddr"]
s1uaddr = ipaddress.ip_address(s1uaddr)
uepools = set([str(ipaddress.ip_network(n)) for n in up4["uePools"]])
for uepool in uepools:
topo["subnets"].append(
{"ip_subnet": uepool, "name": uepool, "subnet_id": subnet_id}
)
subnet_id = subnet_id + 1
subnets_with_ue = []
for s in subnets:
if s1uaddr in ipaddress.ip_network(s):
subnets_with_ue.append(s)
if len(subnets_with_ue) == 0:
log.warning("Unable to map UP4 S1U address to switch port: %s", s1uaddr)
continue
for s in subnets_with_ue:
for port in subnets[s]:
switch_id, port_num = port.split("/")
if switch_id in up4_switch_ids:
for uepool in uepools:
topo["links"].append(
{
"node1": switch_id,
"port1": PORT_MAPPINGS[switch_id][int(port_num)],
"node2": uepool,
"port2": "-1",
"switchPort1": int(port_num),
}
)
elif app == "org.onosproject.route-service":
if "routes" not in value:
continue
for route in value["routes"]:
if "prefix" not in route or "nextHop" not in route:
continue
prefix = route["prefix"]
next_hop = route["nextHop"]
topo["subnets"].append(
{"ip_subnet": prefix, "name": prefix, "subnet_id": subnet_id}
)
subnet_id = subnet_id + 1
route_locations = host_ip_to_locations.get(next_hop, [])
for route_location in route_locations:
port_num = parse_port_id(route_location["port"])
topo["links"].append(
{
"node1": route_location["elementId"],
"port1": PORT_MAPPINGS[route_location["elementId"]][int(port_num)],
"node2": prefix,
"port2": "-1",
"switchPort1": port_num,
}
)
# ONOS returns an entry for each direction of a bidirectional link, but
# DeepInsight expects only one entry for both directions.
bidi_links = {}
for link in links:
key = [str(link["src"]), str(link["dst"])]
key.sort()
key = tuple(key)
port1_num = parse_port_id(link["src"]["port"])
port2_num = parse_port_id(link["dst"]["port"])
bidi_links[key] = {
"node1": link["src"]["device"],
"port1": PORT_MAPPINGS[link["src"]["device"]][int(port1_num)],
"node2": link["dst"]["device"],
"port2": PORT_MAPPINGS[link["dst"]["device"]][int(port2_num)],
"switchPort1": port1_num,
"switchPort2": port2_num,
}
topo["links"].extend(bidi_links.values())
if not with_end_host:
return topo
# End hosts topology config
for idx, cluster in enumerate(k8s_clusters):
if not 'subnet' in cluster:
log.error("Missing 'subnet' in K8s cluster info [argument index=%d]: %s, skipping to add K8s cluster to topology file.", idx, cluster)
continue
k8s_config = cluster['config'] if 'config' in cluster else None
k8s_cluster_subnet = cluster['subnet']
k8s.config.load_kube_config(config_file=k8s_config)
k8s_node_ips = []
for node in k8s.client.CoreV1Api().list_node().items:
k8s_node_ips += [
item.address for item in node.status.addresses if item.type == "InternalIP"
]
for subnet in topo["subnets"]:
if subnet["ip_subnet"] == k8s_cluster_subnet:
k8s_subnet = subnet
subnet_id = subnet["subnet_id"]
break
else:
k8s_subnet = {
"name": k8s_cluster_subnet,
"ip_subnet": k8s_cluster_subnet,
"subnet_id": subnet_id,
}
subnet_id += 1
k8s_node_cidrs = []
ipam_blocks = k8s.client.CustomObjectsApi().list_cluster_custom_object(
group="crd.projectcalico.org", version="v1", plural="ipamblocks"
)
for item in ipam_blocks["items"]:
cidr = item["spec"]["cidr"]
k8s_node_cidrs.append(
{"name": str(cidr), "ip_subnet": str(cidr), "subnet_id": subnet_id}
)
subnet_id += 1
vswitch_links = dict()
vswitches = []
for node_id, node_ip in enumerate(k8s_node_ips):
url = INT_HOST_REPORTER_TOPO_API.format(node_ip)
host_topology = requests.get(url)
if not host_topology.ok:
log.fatal("Unable to access Topology API from K8s node %s\n%s", node_ip, host_topology.text)
for link in host_topology.json()["links"]:
if link["is-node-iface"]:
node_iface = link["id"]
vswitch_ip = link["ip-addresses"][0]
hostname = [host["name"] for host in topo["hosts"] if host["ip"] == vswitch_ip]
hostname = hostname[0] if len(hostname) != 0 else ""
name = "device:vswitch" + str(node_id)
vswitches.append(
{
"name": name,
"ip": vswitch_ip,
"default-intf": str(node_iface),
"deviceType": "legacy",
"switchId": int(IPAddress(node_ip)),
"hostname": hostname,
}
)
vswitch_links[name] = host_topology.json()["links"]
topo['switches'].extend(vswitches)
all_host_subnets = k8s_node_cidrs + [k8s_subnet]
# Overrides links in the topology config.
# Connects the physical switch to the host vswitch
for link in topo["links"]:
for sw in vswitches:
# find IP of an attached host
host_ip = [host["ip"] for host in topo["hosts"] if host["name"] == link["node2"]]
host_ip = host_ip[0] if len(host_ip) != 0 else ""
if host_ip == sw["ip"]:
link["port2"] = sw["default-intf"]
link["node2"] = sw["name"]
link["switchPort2"] = int(sw["default-intf"])
# Connect vswitch to all possible subnets with all possible ports.
for sw in vswitches:
for host_subnet in all_host_subnets:
for link in vswitch_links[sw["name"]]:
if link["is-node-iface"]:
# skip data interfaces
continue
topo["links"].append(
{
"node1": sw["name"],
"node2": host_subnet["name"],
"port1": str(link["id"]),
"port2": "-1",
"switchPort1": int(link["id"]),
}
)
# Overrides subnets in the topology config.
if k8s_subnet not in topo["subnets"]:
topo["subnets"].append(k8s_subnet)
topo["subnets"] += k8s_node_cidrs
return topo
|
[
"ipaddress.ip_network",
"ipaddress.ip_interface",
"netaddr.IPAddress",
"re.match",
"ipaddress.ip_address",
"kubernetes.client.CoreV1Api",
"collections.defaultdict",
"kubernetes.config.load_kube_config",
"kubernetes.client.CustomObjectsApi",
"requests.get",
"collections.Counter",
"requests.auth.HTTPBasicAuth",
"logging.getLogger"
] |
[((282, 325), 'logging.getLogger', 'logging.getLogger', (['"""DeepInsightTopoUtility"""'], {}), "('DeepInsightTopoUtility')\n", (299, 325), False, 'import logging\n'), ((546, 606), 're.match', 're.match', (['"""\\\\[([0-9]+/[0-9]+)\\\\]\\\\(([0-9]+)\\\\)"""', 'port_string'], {}), "('\\\\[([0-9]+/[0-9]+)\\\\]\\\\(([0-9]+)\\\\)', port_string)\n", (554, 606), False, 'import re\n'), ((897, 957), 're.match', 're.match', (['"""\\\\[([0-9]+/[0-9]+)\\\\]\\\\(([0-9]+)\\\\)"""', 'port_string'], {}), "('\\\\[([0-9]+/[0-9]+)\\\\]\\\\(([0-9]+)\\\\)', port_string)\n", (905, 957), False, 'import re\n'), ((1835, 1884), 'requests.auth.HTTPBasicAuth', 'requests.auth.HTTPBasicAuth', (['onos_user', 'onos_pass'], {}), '(onos_user, onos_pass)\n', (1862, 1884), False, 'import requests\n'), ((1899, 1959), 'requests.get', 'requests.get', (["(onos_url + '/network/configuration')"], {'auth': 'auth'}), "(onos_url + '/network/configuration', auth=auth)\n", (1911, 1959), False, 'import requests\n'), ((2479, 2525), 'requests.get', 'requests.get', (["(onos_url + '/devices')"], {'auth': 'auth'}), "(onos_url + '/devices', auth=auth)\n", (2491, 2525), False, 'import requests\n'), ((3249, 3273), 'collections.defaultdict', 'defaultdict', (['(lambda : {})'], {}), '(lambda : {})\n', (3260, 3273), False, 'from collections import Counter, defaultdict\n'), ((4200, 4244), 'requests.get', 'requests.get', (["(onos_url + '/hosts')"], {'auth': 'auth'}), "(onos_url + '/hosts', auth=auth)\n", (4212, 4244), False, 'import requests\n'), ((4755, 4764), 'collections.Counter', 'Counter', ([], {}), '()\n', (4762, 4764), False, 'from collections import Counter, defaultdict\n'), ((5744, 5788), 'requests.get', 'requests.get', (["(onos_url + '/links')"], {'auth': 'auth'}), "(onos_url + '/links', auth=auth)\n", (5756, 5788), False, 'import requests\n'), ((2705, 2778), 'requests.get', 'requests.get', (["(onos_url + '/devices/' + device['id'] + '/ports')"], {'auth': 'auth'}), "(onos_url + '/devices/' + device['id'] + '/ports', auth=auth)\n", (2717, 2778), False, 'import requests\n'), ((10134, 10185), 'kubernetes.config.load_kube_config', 'k8s.config.load_kube_config', ([], {'config_file': 'k8s_config'}), '(config_file=k8s_config)\n', (10161, 10185), True, 'import kubernetes as k8s\n'), ((6406, 6435), 'ipaddress.ip_address', 'ipaddress.ip_address', (['s1uaddr'], {}), '(s1uaddr)\n', (6426, 6435), False, 'import ipaddress\n'), ((11469, 11486), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (11481, 11486), False, 'import requests\n'), ((3503, 3529), 'ipaddress.ip_interface', 'ipaddress.ip_interface', (['ip'], {}), '(ip)\n', (3525, 3529), False, 'import ipaddress\n'), ((10880, 10909), 'kubernetes.client.CustomObjectsApi', 'k8s.client.CustomObjectsApi', ([], {}), '()\n', (10907, 10909), True, 'import kubernetes as k8s\n'), ((6828, 6851), 'ipaddress.ip_network', 'ipaddress.ip_network', (['s'], {}), '(s)\n', (6848, 6851), False, 'import ipaddress\n'), ((10232, 10254), 'kubernetes.client.CoreV1Api', 'k8s.client.CoreV1Api', ([], {}), '()\n', (10252, 10254), True, 'import kubernetes as k8s\n'), ((6467, 6490), 'ipaddress.ip_network', 'ipaddress.ip_network', (['n'], {}), '(n)\n', (6487, 6490), False, 'import ipaddress\n'), ((12295, 12313), 'netaddr.IPAddress', 'IPAddress', (['node_ip'], {}), '(node_ip)\n', (12304, 12313), False, 'from netaddr import IPAddress\n')]
|
import json
import datetime, time
from os import path
import dash
import dash_echarts
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
basepath = path.dirname(__file__)
filepath = path.abspath(path.join(basepath+'/static', 'life-expectancy-table.json'))
with open(filepath) as json_file:
raw_data = json.load(json_file)
def get_countries():
return list(set([e[3] for e in raw_data[1:]]))
dataset_with_filters = [
{
"id": f"dataset_{country}",
"fromDatasetId": "dataset_raw",
"transform": {
"type": "filter",
"config": {
"and": [
{"dimension": "Year", "gte": 1950},
{"dimension": "Country", "=": country},
]
},
},
}
for country in get_countries()
]
series_list = [
{
"type": "line",
"datasetId": f"dataset_{country}",
"showSymbol": False,
"name": country,
"endLabel": {
"show": True,
"formatter": "line_race_formatter"
},
"labelLayout": {"moveOverlap": "shiftY"},
"emphasis": {"focus": "series"},
"encode": {
"x": "Year",
"y": "Income",
"label": ["Country", "Income"],
"itemName": "Year",
"tooltip": ["Income"],
},
}
for country in get_countries()
]
option = {
"animationDuration": 10000,
"animation": True,
"dataset": [{"id": "dataset_raw", "source": raw_data}] + dataset_with_filters,
"title": {"text": "Income since 1950"},
"tooltip": {"order": "valueDesc", "trigger": "axis"},
"xAxis": {"type": "category", "nameLocation": "middle"},
"yAxis": {"name": "Income"},
"grid": {"right": 140},
"series": series_list,
}
layout = html.Div([
dash_echarts.DashECharts(
option = option,
id='echarts',
style={
"width": '100%',
"height": '100vh',
},
funs={
"line_race_formatter":
'''
function(params){
return params.value[3] + ': ' + params.value[0];
}
'''
},
fun_values=['line_race_formatter']
),
dbc.Button('restart', color='success',
id='line-race-button',
style={
'position': 'absolute',
'height': 50, 'width': '5%',
'top': '25%', 'right': '15%',
'opacity': 0.8
}
),
])
def main():
app = dash.Dash(
external_stylesheets=[dbc.themes.BOOTSTRAP],
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1"}
],
suppress_callback_exceptions=True,
)
app.layout = layout
@app.callback(
Output('echarts', 'reset_id'),
[Input("line-race-button", "n_clicks")],
)
def update_line_race(n_clicks):
triggered = dash.callback_context.triggered
# value = triggered[0]['value']
prop_id, event = triggered[0]['prop_id'].split('.')
if n_clicks:
if 'line-race-button' in prop_id:
dtime = datetime.datetime.now()
int_time = int(time.mktime(dtime.timetuple()))
return int_time
raise PreventUpdate
app.run_server(debug=True)
if __name__ == '__main__':
main()
|
[
"json.load",
"dash.Dash",
"os.path.dirname",
"datetime.datetime.now",
"dash_bootstrap_components.Button",
"dash.dependencies.Input",
"dash_echarts.DashECharts",
"dash.dependencies.Output",
"os.path.join"
] |
[((302, 324), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (314, 324), False, 'from os import path\n'), ((349, 410), 'os.path.join', 'path.join', (["(basepath + '/static')", '"""life-expectancy-table.json"""'], {}), "(basepath + '/static', 'life-expectancy-table.json')\n", (358, 410), False, 'from os import path\n'), ((459, 479), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (468, 479), False, 'import json\n'), ((2713, 2894), 'dash.Dash', 'dash.Dash', ([], {'external_stylesheets': '[dbc.themes.BOOTSTRAP]', 'meta_tags': "[{'name': 'viewport', 'content': 'width=device-width, initial-scale=1'}]", 'suppress_callback_exceptions': '(True)'}), "(external_stylesheets=[dbc.themes.BOOTSTRAP], meta_tags=[{'name':\n 'viewport', 'content': 'width=device-width, initial-scale=1'}],\n suppress_callback_exceptions=True)\n", (2722, 2894), False, 'import dash\n'), ((1973, 2280), 'dash_echarts.DashECharts', 'dash_echarts.DashECharts', ([], {'option': 'option', 'id': '"""echarts"""', 'style': "{'width': '100%', 'height': '100vh'}", 'funs': '{\'line_race_formatter\':\n """\n function(params){ \n return params.value[3] + \': \' + params.value[0];\n }\n """\n }', 'fun_values': "['line_race_formatter']"}), '(option=option, id=\'echarts\', style={\'width\':\n \'100%\', \'height\': \'100vh\'}, funs={\'line_race_formatter\':\n """\n function(params){ \n return params.value[3] + \': \' + params.value[0];\n }\n """\n }, fun_values=[\'line_race_formatter\'])\n', (1997, 2280), False, 'import dash_echarts\n'), ((2390, 2567), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""restart"""'], {'color': '"""success"""', 'id': '"""line-race-button"""', 'style': "{'position': 'absolute', 'height': 50, 'width': '5%', 'top': '25%', 'right':\n '15%', 'opacity': 0.8}"}), "('restart', color='success', id='line-race-button', style={\n 'position': 'absolute', 'height': 50, 'width': '5%', 'top': '25%',\n 'right': '15%', 'opacity': 0.8})\n", (2400, 2567), True, 'import dash_bootstrap_components as dbc\n'), ((2993, 3022), 'dash.dependencies.Output', 'Output', (['"""echarts"""', '"""reset_id"""'], {}), "('echarts', 'reset_id')\n", (2999, 3022), False, 'from dash.dependencies import Input, Output, State\n'), ((3033, 3070), 'dash.dependencies.Input', 'Input', (['"""line-race-button"""', '"""n_clicks"""'], {}), "('line-race-button', 'n_clicks')\n", (3038, 3070), False, 'from dash.dependencies import Input, Output, State\n'), ((3358, 3381), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3379, 3381), False, 'import datetime, time\n')]
|
import os
import sys
from azureml.core.model import Model
sys.path.append(os.getcwd())
import config as f # noqa: E402
model_name = f.params["registered_model_name"]
if f.params['remote_run'] is True:
model_path = os.environ['MODEL_PATH']
elif f.params['remote_run'] is False:
model_path = os.path.join('models', model_name, 'best_model_data')
else:
raise Exception('remote_run unknown value. The value was: ' +
f.params['remote_run'])
print(f'Registering {model_name} from {model_path}')
model = Model.register(
model_path=model_path,
model_name=model_name,
tags={
'industry': 'retail',
'type': 'regression'
},
description="Retail AutoML regression model.",
workspace=f.ws)
print(f'{model.name} successfully registered to {f.ws.name}')
|
[
"os.getcwd",
"azureml.core.model.Model.register",
"os.path.join"
] |
[((535, 719), 'azureml.core.model.Model.register', 'Model.register', ([], {'model_path': 'model_path', 'model_name': 'model_name', 'tags': "{'industry': 'retail', 'type': 'regression'}", 'description': '"""Retail AutoML regression model."""', 'workspace': 'f.ws'}), "(model_path=model_path, model_name=model_name, tags={\n 'industry': 'retail', 'type': 'regression'}, description=\n 'Retail AutoML regression model.', workspace=f.ws)\n", (549, 719), False, 'from azureml.core.model import Model\n'), ((76, 87), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (85, 87), False, 'import os\n'), ((303, 356), 'os.path.join', 'os.path.join', (['"""models"""', 'model_name', '"""best_model_data"""'], {}), "('models', model_name, 'best_model_data')\n", (315, 356), False, 'import os\n')]
|
import sublime
import os
from ..clipboard import clipboard
plat = sublime.platform()
if plat == "osx":
from ..applescript import osascript
RSTUDIOAPPLESCRIPT = os.path.join(os.path.dirname(__file__), "rstudio.applescript")
def send_to_rstudio(cmd):
osascript(RSTUDIOAPPLESCRIPT, cmd)
elif plat == "windows":
from .. import winauto
def send_to_rstudio(cmd, from_view):
rid = winauto.find_rstudio()
clipboard.set_clipboard(cmd)
winauto.paste_to_rstudio(rid, from_view=from_view)
clipboard.reset_clipboard()
elif plat == "linux":
from ..xdotool import xdotool
def send_to_rstudio(cmd):
wid = xdotool("search", "--onlyvisible", "--class", "rstudio")
if wid:
wid = wid.decode("utf-8").strip().split("\n")[-1]
clipboard.set_clipboard(cmd)
xdotool("key", "--window", wid, "ctrl+v")
xdotool("key", "--window", wid, "--clearmodifiers", "Return")
clipboard.reset_clipboard()
|
[
"sublime.platform",
"os.path.dirname"
] |
[((67, 85), 'sublime.platform', 'sublime.platform', ([], {}), '()\n', (83, 85), False, 'import sublime\n'), ((184, 209), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (199, 209), False, 'import os\n')]
|
from django.urls import path, include
from rest_framework.urlpatterns import format_suffix_patterns
from webvep_api.views import ping_view, vcf_view, vep_view
urlpatterns = [path("ping/", ping_view), path("vcf/", vcf_view), path("vep/", vep_view)]
urlpatterns = format_suffix_patterns(urlpatterns)
|
[
"rest_framework.urlpatterns.format_suffix_patterns",
"django.urls.path"
] |
[((264, 299), 'rest_framework.urlpatterns.format_suffix_patterns', 'format_suffix_patterns', (['urlpatterns'], {}), '(urlpatterns)\n', (286, 299), False, 'from rest_framework.urlpatterns import format_suffix_patterns\n'), ((176, 200), 'django.urls.path', 'path', (['"""ping/"""', 'ping_view'], {}), "('ping/', ping_view)\n", (180, 200), False, 'from django.urls import path, include\n'), ((202, 224), 'django.urls.path', 'path', (['"""vcf/"""', 'vcf_view'], {}), "('vcf/', vcf_view)\n", (206, 224), False, 'from django.urls import path, include\n'), ((226, 248), 'django.urls.path', 'path', (['"""vep/"""', 'vep_view'], {}), "('vep/', vep_view)\n", (230, 248), False, 'from django.urls import path, include\n')]
|
import os
from flytekitplugins.papermill import NotebookTask
from flytekitplugins.spark import Spark
from flytekit import kwtypes
from flytekit.types.schema import FlyteSchema
def _get_nb_path(name: str, suffix: str = "", abs: bool = True, ext: str = ".ipynb") -> str:
"""
Creates a correct path no matter where the test is run from
"""
_local_path = os.path.dirname(__file__)
path = f"{_local_path}/testdata/{name}{suffix}{ext}"
return os.path.abspath(path) if abs else path
def test_notebook_task_simple():
nb_name = "nb-spark"
nb = NotebookTask(
name="test",
notebook_path=_get_nb_path(nb_name, abs=False),
outputs=kwtypes(df=FlyteSchema[kwtypes(name=str, age=int)]),
task_config=Spark(spark_conf={"x": "y"}),
)
n, out, render = nb.execute()
assert nb.python_interface.outputs.keys() == {"df", "out_nb", "out_rendered_nb"}
assert nb.output_notebook_path == out == _get_nb_path(nb_name, suffix="-out")
assert nb.rendered_output_path == render == _get_nb_path(nb_name, suffix="-out", ext=".html")
|
[
"flytekitplugins.spark.Spark",
"os.path.dirname",
"os.path.abspath",
"flytekit.kwtypes"
] |
[((371, 396), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (386, 396), False, 'import os\n'), ((465, 486), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (480, 486), False, 'import os\n'), ((753, 781), 'flytekitplugins.spark.Spark', 'Spark', ([], {'spark_conf': "{'x': 'y'}"}), "(spark_conf={'x': 'y'})\n", (758, 781), False, 'from flytekitplugins.spark import Spark\n'), ((703, 729), 'flytekit.kwtypes', 'kwtypes', ([], {'name': 'str', 'age': 'int'}), '(name=str, age=int)\n', (710, 729), False, 'from flytekit import kwtypes\n')]
|
import logging
from six.moves import http_client
from .. import utils
log = logging.getLogger(__name__)
class SwarmApiMixin(object):
def create_swarm_spec(self, *args, **kwargs):
return utils.SwarmSpec(*args, **kwargs)
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, swarm_spec=None):
url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary')
data = {
'AdvertiseAddr': advertise_addr,
'ListenAddr': listen_addr,
'ForceNewCluster': force_new_cluster,
'Spec': swarm_spec,
}
response = self._post_json(url, data=data)
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def inspect_swarm(self):
url = self._url('/swarm')
return self._result(self._get(url), True)
@utils.check_resource
@utils.minimum_version('1.24')
def inspect_node(self, node_id):
url = self._url('/nodes/{0}', node_id)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
def join_swarm(self, remote_addrs, join_token, listen_addr=None,
advertise_addr=None):
data = {
"RemoteAddrs": remote_addrs,
"ListenAddr": listen_addr,
"JoinToken": join_token,
"AdvertiseAddr": advertise_addr,
}
url = self._url('/swarm/join')
response = self._post_json(url, data=data)
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def leave_swarm(self, force=False):
url = self._url('/swarm/leave')
response = self._post(url, params={'force': force})
# Ignore "this node is not part of a swarm" error
if force and response.status_code == http_client.NOT_ACCEPTABLE:
return True
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def nodes(self, filters=None):
url = self._url('/nodes')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
def update_node(self, node_id, version, node_spec=None):
url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
res = self._post_json(url, data=node_spec)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
rotate_manager_token=False):
url = self._url('/swarm/update')
response = self._post_json(url, data=swarm_spec, params={
'rotateWorkerToken': rotate_worker_token,
'rotateManagerToken': rotate_manager_token,
'version': version
})
self._raise_for_status(response)
return True
|
[
"logging.getLogger"
] |
[((76, 103), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (93, 103), False, 'import logging\n')]
|
import pytest
import numpy as np
from unpackqa import (unpack_to_array,
unpack_to_dict,
list_products,
list_qa_flags,
list_sensors,
)
from unpackqa.tools.validation import (product_info_has_required_entries,
flag_info_is_non_empty_dict,
flag_info_bit_list_non_empty,
flag_info_bits_non_neg_ints,
flag_info_flag_is_str,
bits_are_only_used_once,
bits_are_reasonable,
bits_do_not_exceed_bit_size,
max_value_matches_num_bits,
bits_are_ordered,
)
from unpackqa.tools.validation import InvalidProductSpec
from unpackqa.product_loader import all_products
"""
Validating product definitions.
"""
qa_array = np.array([[8,8,8],
[16,16,16],
[255,255,255]])
all_product_identifiers = list_products(sensor='all')
def test_product_ids_are_unique():
"""No duplicate product identifers"""
assert len(all_product_identifiers) == len(set(all_product_identifiers))
def test_list_sensors():
"""Should have some output"""
assert len(list_sensors()) > 0
def test_list_products():
"""Each sensor should have some products"""
sensors = list_sensors()
sensor_has_products = [len(list_products(sensor=s))>0 for s in sensors]
assert all(sensor_has_products)
def test_list_products_invalid_sensor():
"""Should raise error with unknown sensor"""
with pytest.raises(ValueError):
list_products(sensor='asdf')
def test_list_flags_invalid_product():
"""Should raise error with unknown product ID"""
with pytest.raises(ValueError):
list_qa_flags(product = 'asdf')
@pytest.mark.parametrize('product', all_product_identifiers)
def test_qa_flag_list(product):
"""Lists of flags should be available for each product"""
flags = list_qa_flags(product = product)
assert len(flags) > 0
@pytest.mark.parametrize('product', all_product_identifiers)
def test_product_info_is_dict(product):
"""product_info entry should be dictonary"""
product_info = all_products[product]
assert isinstance(product_info, dict)
"""
Several tests for all products configred within the package.
Within product_info the 'flag_info' entry
should be a dictonary with key value pairs:
'flag name':[bit0,bit1,..]
Where flag name is a str, with a value of a list. List entries
are non-negative ints.
These same tests are used to validate user passed custom specifications, so
instead of essentially writing a new test function for each, just iterate
over them and create some informative output if 1 or more fails.
"""
test_list = [('product info does not have required entries',product_info_has_required_entries),
('flag_info is not dictionary, or is empty', flag_info_is_non_empty_dict),
('flag_info has empty lists',flag_info_bit_list_non_empty),
('flag_info has negative and/or non-int values',flag_info_bits_non_neg_ints),
('flag_info keys are not strings',flag_info_flag_is_str),
('duplicate bits detected',bits_are_only_used_once),
('bits are larger than needed for even a 32 bit int', bits_are_reasonable),
('largest bit is greater than num_bits',bits_do_not_exceed_bit_size),
('max_value is >= 2**num_bits',max_value_matches_num_bits),
('bits are out of order',bits_are_ordered),
]
@pytest.mark.parametrize('product', all_product_identifiers)
def test_product_info(product):
product_info = all_products[product]
failed_tests = []
tests_failed = False
for test_message, test_function in test_list:
try:
test_function(product_info)
except InvalidProductSpec:
tests_failed = True
failed_tests.append(test_message)
if tests_failed:
error_message = '{} failed tests for {}\n'.format(len(failed_tests), product)
error_message = error_message + '\n'.join(['{}. {}'.format(i+1,m) for i,m in enumerate(failed_tests)])
assert False, error_message
|
[
"unpackqa.list_sensors",
"unpackqa.list_qa_flags",
"pytest.raises",
"unpackqa.list_products",
"numpy.array",
"pytest.mark.parametrize"
] |
[((1116, 1168), 'numpy.array', 'np.array', (['[[8, 8, 8], [16, 16, 16], [255, 255, 255]]'], {}), '([[8, 8, 8], [16, 16, 16], [255, 255, 255]])\n', (1124, 1168), True, 'import numpy as np\n'), ((1232, 1259), 'unpackqa.list_products', 'list_products', ([], {'sensor': '"""all"""'}), "(sensor='all')\n", (1245, 1259), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n'), ((2061, 2120), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""product"""', 'all_product_identifiers'], {}), "('product', all_product_identifiers)\n", (2084, 2120), False, 'import pytest\n'), ((2292, 2351), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""product"""', 'all_product_identifiers'], {}), "('product', all_product_identifiers)\n", (2315, 2351), False, 'import pytest\n'), ((3825, 3884), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""product"""', 'all_product_identifiers'], {}), "('product', all_product_identifiers)\n", (3848, 3884), False, 'import pytest\n'), ((1599, 1613), 'unpackqa.list_sensors', 'list_sensors', ([], {}), '()\n', (1611, 1613), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n'), ((2227, 2257), 'unpackqa.list_qa_flags', 'list_qa_flags', ([], {'product': 'product'}), '(product=product)\n', (2240, 2257), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n'), ((1826, 1851), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1839, 1851), False, 'import pytest\n'), ((1861, 1889), 'unpackqa.list_products', 'list_products', ([], {'sensor': '"""asdf"""'}), "(sensor='asdf')\n", (1874, 1889), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n'), ((1992, 2017), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2005, 2017), False, 'import pytest\n'), ((2027, 2056), 'unpackqa.list_qa_flags', 'list_qa_flags', ([], {'product': '"""asdf"""'}), "(product='asdf')\n", (2040, 2056), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n'), ((1490, 1504), 'unpackqa.list_sensors', 'list_sensors', ([], {}), '()\n', (1502, 1504), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n'), ((1645, 1668), 'unpackqa.list_products', 'list_products', ([], {'sensor': 's'}), '(sensor=s)\n', (1658, 1668), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n')]
|
# Um programa que resolve a hipotenusa #
from math import hypot
co = float(input('Comprimento do cateto opsoto: '))
ca = float(input('Comprimento do cateto adjacente: '))
print(f'A hipotenusa vai medir: {hypot(co, ca):.2f}')
|
[
"math.hypot"
] |
[((205, 218), 'math.hypot', 'hypot', (['co', 'ca'], {}), '(co, ca)\n', (210, 218), False, 'from math import hypot\n')]
|
import sys
sys.path.append("C:/Users/900143/Desktop/Certo")
from controller.squad_controller import BackController, FrontController,SGBDController , SquadController, BackEnd, FrontEnd, SGBD, Squad
from flask import Flask, render_template, request, redirect
app = Flask(__name__)
bc = BackController()
fc = FrontController()
sc = SGBDController()
sqc = SquadController()
@app.route('/')
def inicio():
return render_template('index.html')
@app.route('/listar')
def listar():
l = sqc.select_all()
return render_template('listar.html', lista = l)
@app.route('/excluir')
def excluir():
id = int(request.args['id'])
sqc.delete(sqc.select_byId(id))
return redirect('/listar')
@app.route('/cadastrar')
def cadastrar():
if 'id' in request.args:
squad = sqc.select_byId(request.args['id'])
else:
squad = Squad(0,'','','')
return render_template('cadastrar.html', squad = squad)
@app.route('/salvar')
def salvar():
id = int(request.args['id'])
nome = request.args['nome']
desc = request.args['desc']
qtdPessoas = request.args['numPessoas']
squad = Squad(id,nome,desc,qtdPessoas)
idF = request.args['idF']
if idF:
idF = int(idF)
else:
idF = 0
nomeF = request.args['nomeF']
descF = request.args['descF']
versaoF = request.args['versaoF']
front = FrontEnd(idF,nomeF,descF,versaoF)
idB = request.args['idB']
if idB:
idB = int(idB)
else:
idB = 0
nomeB = request.args['nomeB']
descB = request.args['descB']
versaoB = request.args['versaoB']
back = BackEnd(idB, nomeB, descB, versaoB)
idS = request.args['idS']
if idS:
idS = int(idS)
else:
idS = 0
nomeS = request.args['nomeS']
descS = request.args['descS']
versaoS = request.args['versaoS']
sgbd = SGBD(idS,nomeS,descS,versaoS)
if id == 0:
squad.id_linguagemFront = fc.insert(front)
squad.id_linguagemBack = bc.insert(back)
squad.id_sgbd = sc.insert(sgbd)
sqc.insert(squad)
else:
squad.linguagemFront = front
squad.linguagemBack = back
squad.sgbd = sgbd
squad.id_linguagemFront = idF
squad.id_linguagemBack = idB
squad.id_sgbd = idS
sqc.update(squad)
return redirect('/listar')
app.run(debug=True)
|
[
"sys.path.append",
"controller.squad_controller.BackController",
"flask.redirect",
"controller.squad_controller.FrontEnd",
"controller.squad_controller.SGBDController",
"controller.squad_controller.BackEnd",
"flask.Flask",
"controller.squad_controller.Squad",
"flask.render_template",
"controller.squad_controller.SGBD",
"controller.squad_controller.FrontController",
"controller.squad_controller.SquadController"
] |
[((11, 59), 'sys.path.append', 'sys.path.append', (['"""C:/Users/900143/Desktop/Certo"""'], {}), "('C:/Users/900143/Desktop/Certo')\n", (26, 59), False, 'import sys\n'), ((265, 280), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (270, 280), False, 'from flask import Flask, render_template, request, redirect\n'), ((287, 303), 'controller.squad_controller.BackController', 'BackController', ([], {}), '()\n', (301, 303), False, 'from controller.squad_controller import BackController, FrontController, SGBDController, SquadController, BackEnd, FrontEnd, SGBD, Squad\n'), ((309, 326), 'controller.squad_controller.FrontController', 'FrontController', ([], {}), '()\n', (324, 326), False, 'from controller.squad_controller import BackController, FrontController, SGBDController, SquadController, BackEnd, FrontEnd, SGBD, Squad\n'), ((332, 348), 'controller.squad_controller.SGBDController', 'SGBDController', ([], {}), '()\n', (346, 348), False, 'from controller.squad_controller import BackController, FrontController, SGBDController, SquadController, BackEnd, FrontEnd, SGBD, Squad\n'), ((355, 372), 'controller.squad_controller.SquadController', 'SquadController', ([], {}), '()\n', (370, 372), False, 'from controller.squad_controller import BackController, FrontController, SGBDController, SquadController, BackEnd, FrontEnd, SGBD, Squad\n'), ((415, 444), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (430, 444), False, 'from flask import Flask, render_template, request, redirect\n'), ((518, 557), 'flask.render_template', 'render_template', (['"""listar.html"""'], {'lista': 'l'}), "('listar.html', lista=l)\n", (533, 557), False, 'from flask import Flask, render_template, request, redirect\n'), ((679, 698), 'flask.redirect', 'redirect', (['"""/listar"""'], {}), "('/listar')\n", (687, 698), False, 'from flask import Flask, render_template, request, redirect\n'), ((883, 929), 'flask.render_template', 'render_template', (['"""cadastrar.html"""'], {'squad': 'squad'}), "('cadastrar.html', squad=squad)\n", (898, 929), False, 'from flask import Flask, render_template, request, redirect\n'), ((1128, 1161), 'controller.squad_controller.Squad', 'Squad', (['id', 'nome', 'desc', 'qtdPessoas'], {}), '(id, nome, desc, qtdPessoas)\n', (1133, 1161), False, 'from controller.squad_controller import BackController, FrontController, SGBDController, SquadController, BackEnd, FrontEnd, SGBD, Squad\n'), ((1370, 1406), 'controller.squad_controller.FrontEnd', 'FrontEnd', (['idF', 'nomeF', 'descF', 'versaoF'], {}), '(idF, nomeF, descF, versaoF)\n', (1378, 1406), False, 'from controller.squad_controller import BackController, FrontController, SGBDController, SquadController, BackEnd, FrontEnd, SGBD, Squad\n'), ((1614, 1649), 'controller.squad_controller.BackEnd', 'BackEnd', (['idB', 'nomeB', 'descB', 'versaoB'], {}), '(idB, nomeB, descB, versaoB)\n', (1621, 1649), False, 'from controller.squad_controller import BackController, FrontController, SGBDController, SquadController, BackEnd, FrontEnd, SGBD, Squad\n'), ((1860, 1892), 'controller.squad_controller.SGBD', 'SGBD', (['idS', 'nomeS', 'descS', 'versaoS'], {}), '(idS, nomeS, descS, versaoS)\n', (1864, 1892), False, 'from controller.squad_controller import BackController, FrontController, SGBDController, SquadController, BackEnd, FrontEnd, SGBD, Squad\n'), ((2322, 2341), 'flask.redirect', 'redirect', (['"""/listar"""'], {}), "('/listar')\n", (2330, 2341), False, 'from flask import Flask, render_template, request, redirect\n'), ((849, 869), 'controller.squad_controller.Squad', 'Squad', (['(0)', '""""""', '""""""', '""""""'], {}), "(0, '', '', '')\n", (854, 869), False, 'from controller.squad_controller import BackController, FrontController, SGBDController, SquadController, BackEnd, FrontEnd, SGBD, Squad\n')]
|
# main.py
from app import app
import views
if __name__ == '__main__':
app.run(host='0.0.0.0',port='5000')
|
[
"app.app.run"
] |
[((75, 111), 'app.app.run', 'app.run', ([], {'host': '"""0.0.0.0"""', 'port': '"""5000"""'}), "(host='0.0.0.0', port='5000')\n", (82, 111), False, 'from app import app\n')]
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2018 <NAME>.
# License: BSD, see LICENSE for more details.
# doctest lino/core/site.py
"""
Defines the :class:`Site` class. For an overview see
:doc:`/dev/site` and :doc:`/dev/plugins`.
.. doctest init:
>>> import lino
>>> lino.startup('lino.projects.std.settings_test')
"""
from __future__ import unicode_literals, print_function
from builtins import map
from builtins import str
import six
import os
import sys
from os.path import normpath, dirname, join, isdir, relpath, exists
import inspect
import datetime
import warnings
import collections
from importlib import import_module
from six.moves.urllib.parse import urlencode
from unipath import Path
from atelier.utils import AttrDict, date_offset, tuple_py2
from atelier import rstgen
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language
# from django.core.exceptions import ImproperlyConfigured
from lino.core.plugin import Plugin
from lino import assert_django_code, DJANGO_DEFAULT_LANGUAGE
from etgen.html import E
from lino.core.utils import simplify_name, get_models
# from lino.utils.html2text import html2text
# from html2text import html2text
from lino.core.exceptions import ChangedAPI
# from .roles import SiteUser
from html2text import HTML2Text
# _INSTANCES = []
def html2text(html):
text_maker = HTML2Text()
text_maker.unicode_snob = True
return text_maker.handle(html)
PRINT_EMAIL = """send email
Sender: {sender}
To: {recipients}
Subject: {subject}
{body}
"""
LanguageInfo = collections.namedtuple(
'LanguageInfo', ('django_code', 'name', 'index', 'suffix'))
"""
A named tuple with four fields:
- `django_code` -- how Django calls this language
- `name` -- how Lino calls it
- `index` -- the position in the :attr:`Site.languages` tuple
- `suffix` -- the suffix to append to babel fields for this language
"""
def to_locale(language):
"""Simplified copy of `django.utils.translation.to_locale`, but we
need it while the `settings` module is being loaded, i.e. we
cannot yet import django.utils.translation. Also we don't need
the to_lower argument.
"""
p = language.find('-')
if p >= 0:
# Get correct locale for sr-latn
if len(language[p + 1:]) > 2:
return language[:p].lower() + '_' \
+ language[p + 1].upper() + language[p + 2:].lower()
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def class2str(cl):
return cl.__module__ + '.' + cl.__name__
gettext_noop = lambda s: s
PLUGIN_CONFIGS = {}
def configure_plugin(app_label, **kwargs):
"""
Set one or several configuration settings of the given plugin
*before* the :setting:`SITE` has been instantiated. See
:doc:`/dev/plugins`.
"""
# if PLUGIN_CONFIGS is None:
# raise ImproperlyConfigured(
# "Tried to call configure_plugin after Site instantiation")
cfg = PLUGIN_CONFIGS.setdefault(app_label, {})
cfg.update(kwargs)
# from django.db.models.fields import NOT_PROVIDED
class NOT_PROVIDED(object):
pass
class Site(object):
"""
The base class for a Lino application. This class is designed to
be overridden by both application developers and local site
administrators. Your :setting:`SITE` setting is expected to
contain an instance of a subclass of this.
.. attribute:: plugins
An :class:`AttrDict <atelier.utils.AttrDict>` with one entry
for each installed plugin, mapping the `app_label` of every
plugin to the corresponding :class:`lino.core.plugin.Plugin`
instance.
This attribute is automatically filled by Lino and available as
:attr:`dd.plugins <lino.api.dd>` already before Django starts to
import :xfile:`models.py` modules.
.. attribute:: modules
Old name for :attr:`models`. Deprecated.
.. attribute:: models
An :class:`AttrDict <atelier.utils.AttrDict>` which maps every
installed `app_label` to the corresponding :xfile:`models.py`
module object.
This is also available as the shortcut :attr:`rt.models
<lino.api.rt.models>`.
See :doc:`/dev/plugins`
.. attribute:: LANGUAGE_CHOICES
A tuple in the format expected by Django's `choices
<https://docs.djangoproject.com/en/1.11/ref/models/fields/#choices>`__
attribute, used e.g. by :class:`LanguageField
<lino.utils.mldbc.fields.LanguageField>`. It's content is
automatically populated from :attr:`languages` and application
code should not change it's value.
"""
auto_fit_column_widths = True
"""
The default value for the :attr:`auto_fit_column_widths
<lino.core.tables.AbstractTable.auto_fit_column_widths>` of tables
in this application.
"""
confdirs = None
"""
This attribute is available only after site startup. See
:mod:`lino.utils.config`.
"""
kernel = None
"""
This attribute is available only after :meth:`startup`.
See :mod:`lino.core.kernel`.
"""
# ui = None
# """
# Deprecated alias for :attr:`kernel`.
# """
readonly = False
"""Setting this to `True` turns this site in a readonly site. This
means that :setting:`DATABASES` must point to the
:setting:`DATABASES` of some other (non-readonly) site, and that
:manage:`initdb` will do nothing.
"""
history_aware_logging = False
"""Whether to log a message :message:`Started %s (using %s) --> PID
%s` at process startup (and a message :message:`Done PID %s` at
termination).
These two messages are interesting e.g. when a system
administrator wants to know which processes have been running on a
given production site, but they are usually disturbing during
development.
TODO: Replace this setting by an aproach using a second logger
`lino.archive`. Also tidy up usage of
:mod:`lino.utils.dblogger`. To be meditated.
See also :ref:`lino.logging`.
"""
the_demo_date = None
"""A hard-coded constant date to be used as reference by :meth:`today`
and :meth:`demo_date`. For example many demo databases have this
set because certain tests rely on a constant reference date.
"""
title = None
"""The title of this web site to appear in the browser window. If
this is None, Lino will use :attr:`verbose_name` as default value.
"""
hoster_status_url = "http://bugs.saffre-rumma.net/"
"""This is mentioned in :xfile:`500.html`.
"""
verbose_name = "yet another Lino application"
"""The name of this application, to be displayed to end-users at
different places.
Note the difference between :attr:`title` and
:attr:`verbose_name`:
- :attr:`title` may be None, :attr:`verbose_name` not.
- :attr:`title` is used by the
:srcref:`index.html <lino/modlib/extjs/config/extjs/index.html>` for
:mod:`lino.modlib.extjs`.
- :attr:`title` and :attr:`verbose_name` are used by
:xfile:`admin_main.html` to generate the fragments "Welcome to the
**title** site" and "We are running **verbose_name** version
**x.y**" (the latter only if :attr:`version` is set).
- :meth:`site_version` uses :attr:`verbose_name` (not :attr:`title`)
IOW, the :attr:`title` is rather for usage by local system
administrators, while the :attr:`verbose_name` is rather for usage
by application developers.
"""
version = None
"The version number."
url = None
"""
The URL of the website that describes this application.
Used e.g. in a :menuselection:`Site --> About` dialog box.
"""
# server_url = None
server_url = "http://127.0.0.1:8000/"
"""The "official" URL used by "normal" users when accessing this Lino
site. This is used by templates such as the email sent by
:class:`lino.modlib.notify.Message`
"""
device_type = 'desktop'
"""
The default device type used on this server. Should be one of
``'desktop'``, ``'tablet'`` or ``'mobile'``.
This is used by :class:`DeviceTypeMiddleware
<lino.core.auth.middleware.DeviceTypeMiddleware>`.
"""
obj2text_template = "*{0}*"
"""The format template to use when rendering a ForeignKey as plain
text.
Note: reSTructuredText uses *italic* and **bold**. Changing this
can cause lots of trivial failures in test suites. It is also
used by :mod:`lino.modlib.notify` when generating the mail body.
"""
make_missing_dirs = True
"""Set this to `False` if you don't want Lino to automatically create
missing directories when needed. If this is False, Lino will
raise an exception in these cases, asking you to create it
yourself.
"""
userdocs_prefix = ''
project_name = None
"""A nickname for this project. This is used to set :attr:`cache_dir`
and therefore should be unique for all Lino projects in a given
development environment.
If this is None, Lino will find a default value by splitting
:attr:`project_dir` and taking the last part (or the second-last
if the last part is 'settings'.
"""
cache_dir = None
"""The directory where Lino will create temporary data for this
project, including the :xfile:`media` directory and the
:xfile:`default.db` file.
This is either the same as :attr:`project_dir` or (if
:envvar:`LINO_CACHE_ROOT` is set), will be set to
:envvar:`LINO_CACHE_ROOT` + :attr:`project_name`.
"""
project_dir = None
"""Full path to your local project directory.
Lino automatically sets this to the directory of the
:xfile:`settings.py` file (or however your
:envvar:`DJANGO_SETTINGS_MODULE` is named).
It is recommended to not override this variable.
Note that when using a *settings package*, :attr:`project_dir`
points to the :file:`settings` subdir of what we would intuitively
consider the project directory.
If the :attr:`project_dir` contains a :xfile:`config` directory,
this will be added to the config search path.
"""
languages = None
"""The language distribution used on this site. It has its own
chapter :doc:`/dev/languages` in the Developers Guide.
"""
not_found_msg = '(not installed)'
django_settings = None
"""This is a reference to the `globals()` dictionary of your
:xfile:`settings.py` file (the one you provided when instantiating
the Site object).
"""
startup_time = None
"""
The time when this Site has been instantiated,
in other words the startup time of this Django process.
Don't modify this.
"""
plugins = None
models = None
top_level_menus = [
("master", _("Master")),
("main", None),
("reports", _("Reports")),
("config", _("Configure")),
("explorer", _("Explorer")),
("site", _("Site")),
]
"The list of top-level menu items. See :meth:`setup_menu`."
# is_local_project_dir = False
# """Contains `True` if this is a "local" project. For local projects,
# Lino checks for local fixtures and config directories and adds
# them to the default settings.
# This is automatically set when a :class:`Site` is instantiated.
# """
ignore_model_errors = False
"""Not yet sure whether this is needed. Maybe when generating
documentation.
"""
loading_from_dump = False
"""Whether the process is currently loading data from a Python dump.
When loading from a python dump, application code should not
generate certain automatic data because that data is also part of
the dump.
This is normally `False`, but a Python dump created with
:manage:`dump2py` explicitly calls :meth:`install_migrations`
which sets this to `True`.
Application code should not change this setting except for certain
special test cases.
"""
# see docs/settings.rst
migration_class = None
"""
If you maintain a data migrator module for your application,
specify its name here.
See :ref:`datamig` and/or :func:`lino.utils.dpy.install_migrations`.
TODO: rename this to `migrator_class`
"""
migration_module = None
"""The full Python path of a module to use for all migrations.
"""
hidden_languages = None
"""A string with a space-separated list of django codes of languages
that should be hidden.
:ref:`welfare` uses this because the demo database has 4
languages, but `nl` is currently hidden bu default.
"""
BABEL_LANGS = tuple()
partners_app_label = 'contacts'
"""
Temporary setting, see :ref:`polymorphism`.
"""
# three constants used by lino_xl.lib.workflows:
max_state_value_length = 20
max_action_name_length = 50
max_actor_name_length = 100
trusted_templates = False
"""
Set this to True if you are sure that the users of your site won't try to
misuse Jinja's capabilities.
"""
allow_duplicate_cities = False
"""In a default configuration (when :attr:`allow_duplicate_cities` is
False), Lino declares a UNIQUE clause for :class:`Places
<lino_xl.lib.countries.models.Places>` to make sure that your
database never contains duplicate cities. This behaviour mighr
disturb e.g. when importing legacy data that did not have this
restriction. Set it to True to remove the UNIQUE clause.
Changing this setting might affect your database structure and
thus require a :doc:`/topics/datamig` if your application uses
:mod:`lino_xl.lib.countries`.
"""
uid = 'myuid'
"""A universal identifier for this Site. This is needed when
synchronizing with CalDAV server. Locally created calendar
components in remote calendars will get a UID based on this
parameter, using ``"%s@%s" % (self.pk, settings.SITE.kernel)``.
The default value is ``'myuid'``, and you should certainly
override this on a production server that uses remote calendars.
"""
project_model = None
"""
Optionally set this to the full name of a model used as "central
project" in your application. Models which inherit from
:class:`ProjectRelated <lino.mixins.ProjectRelated>` then have an
additional ForeignKey to this model.
"""
user_model = None
"""
If :mod:`lino.modlib.users` is installed, this holds a reference to
the model class which represents a user of the system. Default
value is `None`, meaning that this application has no user
management. See also :meth:`set_user_model`
"""
social_auth_backends = None
"""
A list of backends for `Python Social Auth
<https://github.com/python-social-auth>`__ (PSA).
Having this at a value different from `None` means that this site
uses authentication via third-party providers.
Sites which use this must also install PSA into their
environment::
$ pip install social-auth-app-django
Depending on the backend you must also add credentials in your
local :xfile:`settings.py` file, e.g.::
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = \
'1234567890-a1b2c3d4e5.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'SH6da...'
A working example is in the :mod:`lino_book.projects.team` demo
project.
"""
use_security_features = False
"""
Set this to `True` in order to activate a selection of security
features to protect against miscellaneous attacks. You can do
this only if your application is being served via HTTPS. The idea
is to provide a reasonable security out of the box.
This will activate some middleware and set some security-related
settings. This is a new feature and not much tested. As a hoster
you may prefer adding security manually using your established
standards (regarding security Lino does not add anything to plain
Django). See also :doc:`/admin/security`.
"""
use_ipdict = False
"""
Whether this site uses :mod:`lino.modlib.ipdict`.
Note that :mod:`lino.modlib.ipdict` unlike normal plugins should
not be installed by adding it to your :meth:`get_installed_apps`
method but by setting this attribute. This approach has the
advantage of also setting :setting:`MIDDLEWARE_CLASSES`
automatically.
"""
# use_auth = True
# """Whether this site uses authentication. If this is set to `False`,
# all requests are anonymous (as if :attr:`user_model` was `None`).
# This is ignored when :attr:`user_model` is `None`.
# """
auth_middleware = None
"""
Override used Authorisation middlewares with supplied tuple of
middleware class names.
If None, use logic described in :doc:`/topics/auth`
"""
workflows_module = None
"""The full Python path of the **workflows module** to be used on
this site.
"""
user_types_module = None
"""The name of the **user types module** to be used on this site.
Default value is `None`, meaning that permission control is
inactive: everything is permitted. But note that
:meth:`set_user_model` sets it to :mod:`lino.core.user_types`.
This must be set if you want to enable permission control based on
user roles defined in :attr:`Permittable.required_roles
<lino.core.permissions.Permittable.required_roles>` and
:attr:`UserType.role
<lino.modlib.users.choicelists.UserType.role>`.
If set, Lino will import the named module during site startup. It
is expected to define application-specific user roles (if
necessary) and to fill the :class:`UserTypes
<lino.modlib.users.choicelists.UserTypes>` choicelist.
For example::
class Site(Site):
user_types_module = 'myapp.user_types'
Examples of such user types modules are
:mod:`lino.core.user_types` and
:mod:`lino_noi.lib.noi.user_types`.
"""
custom_layouts_module = None
"""The full Python path of the **custom layouts module** used on this
site.
"""
legacy_data_path = None
"""
Used by custom fixtures that import data from some legacy
database.
"""
propvalue_max_length = 200
"""
Used by :mod:`lino_xl.lib.properties`.
"""
show_internal_field_names = True
"""Whether the internal field names should be visible. ExtUI
implements this by prepending them to the tooltip, which means
that :attr:`use_quicktips` must also be `True`. Default is
`True`.
"""
never_build_site_cache = False
"""Set this to `True` if you want that Lino never (re)builds the site
cache, even when asked. This can be useful on a development
server when you are debugging directly on the generated
:xfile:`lino*.js`. Or for certain unit test cases.
"""
build_js_cache_on_startup = False
"""Whether the Javascript cache files should be built on startup for
all user profiles and languages.
On a production server this should be `True` for best performance,
but often this is not necessary, so default value is `False`,
which means that each file is built upon need (when a first
request comes in).
You can also set it to `None`, which means that Lino decides
automatically during startup: it becomes `False` if either
:func:`lino.core.utils.is_devserver` returns True or
setting:`DEBUG` is set.
.. envvar:: LINO_BUILD_CACHE_ON_STARTUP
If a variable of that name is set, then Lino will override the
code value and set :attr:`build_js_cache_on_startup` to True.
"""
keep_erroneous_cache_files = False
"""When some exception occurs during
:meth:`lino.core.kernel.Kernel.make_cache_file`, Lino usually
removes the partly generated file to make sure that it will try to
generate it again (and report the same error message) for every
subsequent next request.
Set this to `True` if you need to see the partly generated cache
file. **Don't forget to remove this** when you have inspected the
file and fixed the reason of the exception, because if this is
`True` and some next exception occurs (which will happen sooner or
later), then all subsequent requests will usually end up to the
user with a blank screen and (if they notice it), a message
:message:`TypeError: Lino.main_menu is undefined` in their
Javascript console.
"""
use_websockets = False
"""Set this to `True` in order to activate use of websockets and
channels.
This setting is currently used only by :mod:`lino.modlib.notify`,
so its setting is ignored if your application doesn't use that
plugin.
If you use :mod:`lino.modlib.notify` and change this setting to
True, then you need to install `django-channels`::
pip install channels
"""
use_java = True
"""
A site-wide option to disable everything that needs Java. Note
that it is up to the plugins which include Java applications to
respect this setting. Usage example is :mod:`lino_xl.lib.beid`.
"""
use_silk_icons = False
"""
If this is `True`, certain Lino plugins use the deprecated `silk
icons library <http://www.famfamfam.com/lab/icons/silk/>`__ for
representing workflows.
The recommended but not yet fully implemented "modern" style is to
use unicode symbols instead of icons.
"""
use_new_unicode_symbols = False
"""Whether to use "new" unicode symbols (e.g. from the `Miscellaneous
Symbols and Pictographs
<https://en.wikipedia.org/wiki/Miscellaneous_Symbols_and_Pictographs>`__
block) which are not yet implemented in all fonts.
Currently used by :mod:`lino_noi.lib.noi.workflows`
"""
use_experimental_features = False
"""Whether to include "experimental features". Deprecated.
lino_xl.lib.inspect
"""
site_config_defaults = {}
"""
Default values to be used when creating the :attr:`site_config`.
Usage example::
site_config_defaults = dict(default_build_method='appypdf')
"""
# default_build_method = "appypdf"
# default_build_method = "appyodt"
# default_build_method = "wkhtmltopdf"
default_build_method = None
"""The default build method to use when rendering printable documents.
This is the last default value, used only when
:attr:`default_build_method
<lino.modlib.system.models.SiteConfig.default_build_method>` in
:class:`SiteConfig <lino.modlib.system.models.SiteConfig>` is
empty.
"""
is_demo_site = True
"""When this is `True`, then this site runs in "demo" mode. "Demo
mode" means:
- the welcome text for anonymous users says "This demo site has X
users, they all have "<PASSWORD>" as password", followed by a list of
available usernames.
Default value is `True`. On a production site you will of course
set this to `False`.
See also :attr:`demo_fixtures` and :attr:`the_demo_date`.
"""
demo_email = '<EMAIL>'
# demo_fixtures = ['std', 'demo', 'demo2']
demo_fixtures = []
"""
The list of fixtures to be loaded by the :manage:`prep`
command.
"""
use_spinner = False # doesn't work. leave this to False
#~ django_admin_prefix = '/django'
django_admin_prefix = None
"""
The prefix to use for Django admin URLs.
Leave this unchanged as long as :srcref:`docs/tickets/70` is not solved.
"""
time_format_extjs = 'H:i'
"""
Format (in ExtJS syntax) to use for displaying dates to the user.
If you change this setting, you also need to override :meth:`parse_time`.
"""
alt_time_formats_extjs = "g:ia|g:iA|g:i a|g:i A|h:i|g:i|H:i|ga|ha|gA|h a|g a|g A|gi|hi" \
"|gia|hia|g|H|gi a|hi a|giA|hiA|gi A|hi A" \
"|Hi|g.ia|g.iA|g.i a|g.i A|h.i|g.i|H.i"
"""Alternative time entry formats accepted by ExtJS time widgets.
ExtJS default is:
"g:ia|g:iA|g:i a|g:i A|h:i|g:i|H:i|ga|ha|gA|h a|g a|g A|gi|hi|gia|hia|g|H|gi a|hi a|giA|hiA|gi A|hi A"
Lino's extended default also includes:
"Hi" (1900) and "g.ia|g.iA|g.i a|g.i A|h.i|g.i|H.i" (Using . in replacement of ":")
"""
date_format_extjs = 'd.m.Y'
"""Format (in ExtJS syntax) to use for displaying dates to the user.
If you change this setting, you also need to override :meth:`parse_date`.
"""
alt_date_formats_extjs = 'd/m/Y|Y-m-d'
"""Alternative date entry formats accepted by ExtJS Date widgets.
"""
default_number_format_extjs = '0,000.00/i'
# default_number_format_extjs = '0,00/i'
uppercase_last_name = False
"""
Whether last name of persons should (by default) be printed with
uppercase letters. See :mod:`lino.test_apps.human`
"""
jasmine_root = None
"""Path to the Jasmine root directory. Only used on a development
server if the `media` directory has no symbolic link to the
Jasmine root directory and only if :attr:`use_jasmine` is True.
"""
default_user = None
"""Username of the user to be used for all incoming requests. Setting
this to a nonempty value will disable authentication on this site.
The special value `'anonymous'` will cause anonymous requests
(whose `user` attribute is the :class:`AnonymousUser
<lino.core.auth.utils.AnonymousUser>` singleton).
See also :meth:`get_auth_method`.
This setting should be `None` when :attr:`user_model` is `None`.
"""
remote_user_header = None
"""The name of the header (set by the web server) that Lino should
consult for finding the user of a request. The default value
`None` means that http authentication is not used. Apache's
default value is ``"REMOTE_USER"``.
"""
ldap_auth_server = None
"""
This should be a string with the domain name and DNS (separated by a
space) of the LDAP server to be used for authentication.
Example::
ldap_auth_server = 'DOMAIN_NAME SERVER_DNS'
"""
use_gridfilters = True
use_eid_applet = False
"""
Whether to include functionality to read Belgian id cards using the
official `eid-applet <http://code.google.com/p/eid-applet>`_.
This option is experimental and doesn't yet work. See
`/blog/2012/1105`.
"""
use_esteid = False
"""
Whether to include functionality to read Estonian id cards. This
option is experimental and doesn't yet work.
"""
use_filterRow = not use_gridfilters
"""
See `/blog/2011/0630`.
This option was experimental and doesn't yet work (and maybe never will).
"""
use_awesome_uploader = False
"""
Whether to use AwesomeUploader.
This option was experimental and doesn't yet work (and maybe never will).
"""
use_tinymce = True
"""Replaced by :mod:`lino.modlib.tinymce`.
"""
use_jasmine = False
"""Whether to use the `Jasmine <https://github.com/pivotal/jasmine>`_
testing library.
"""
use_quicktips = True
"""Whether to make use of `Ext.QuickTips
<http://docs.sencha.com/ext-js/3-4/#!/api/Ext.QuickTips>`_ for
displaying :ref:`help_texts` and internal field names (if
:attr:`show_internal_field_names`).
"""
use_css_tooltips = False
"""
Whether to make use of CSS tooltips
when displaying help texts defined in :class:`lino.models.HelpText`.
"""
use_vinylfox = False
"""
Whether to use VinylFox extensions for HtmlEditor.
This feature was experimental and doesn't yet work (and maybe never will).
See `/blog/2011/0523`.
"""
webdav_root = None
"""
The path on server to store webdav files.
Default is :attr:`cache_dir` + ´/media/webdav'.
"""
webdav_url = None
"""
The URL prefix for webdav files. In a normal production
configuration you should leave this to `None`, Lino will set a
default value "/media/webdav/", supposing that your Apache is
configured as described in :doc:`/admin/webdav`.
This may be used to simulate a :term:`WebDAV` location on a
development server. For example on a Windows machine, you may set
it to ``w:\``, and before invoking :manage:`runserver`, you issue in
a command prompt::
subst w: <dev_project_path>\media\webdav
"""
webdav_protocol = None
"""
Set this to a string like e.g. 'wdav' in order to use a custom
protocol for opening editable printable documents. In this case
Lino expects the browser to be configured to understand the given
protocol.
If this is non-empty, Lino ignores whether
:mod:`lino.modlib.davlink` is installed or not.
When an *editable* printable document has been generated, Lino
does not open a new browser window on that document but invokes
the client's Office application. That application accesses the
document either via a WebDAV link (on a production server) or a
``file://`` link (on a development server).
"""
beid_protocol = None
"""
Set this to a string like e.g. 'beid' in order to use a custom
protocal for reading eid cards.
"""
sidebar_width = 0
"""
Used by :mod:`lino.modlib.plain`.
Width of the sidebar in 1/12 of total screen width.
Meaningful values are 0 (no sidebar), 2 or 3.
"""
config_id = 1
"""
The primary key of the one and only :class:`SiteConfig
<lino.modlib.system.models.SiteConfig>` instance of this
:class:`Site`. Default value is 1.
This is Lino's equivalent of Django's :setting:`SITE_ID` setting.
Lino applications don't need ``django.contrib.sites`` (`The
"sites" framework
<https://docs.djangoproject.com/en/dev/ref/contrib/sites/>`_)
because an analog functionality is provided by
:mod:`lino.modlib.system`.
"""
preview_limit = 15
"""
Default value for the :attr:`preview_limit
<lino.core.tables.AbstractTable.preview_limit>` parameter of all
tables who don't specify their own one. Default value is 15.
"""
# default_ui = 'lino_extjs6.extjs6'
default_ui = 'lino.modlib.extjs'
"""
The full Python name of the plugin which is to be used as default
user interface on this :class:`Site`.
Default value is :mod:`lino.modlib.extjs`. Other candidates are
:mod:`lino.modlib.bootstrap3`, :mod:`lino_xl.lib.pages` and
:mod:`lino_extjs6.extjs6` .
Another possibility is to set it to `None`. In that case you will
probably also set :attr:`root_urlconf` to a custom URL dispatcher.
Usage example for this see :mod:`lino.projects.cms`.
"""
admin_ui = None
mobile_view = False
"""
When this is `False` (the default), then Lino uses an attribute
named :attr:`main <lino.core.layouts.BaseLayout.main>` as the main
element of a detail window and :attr:`column_names
<lino.core.tables.AbstractTable.column_names>` as the table's
column layout.
When this is `True`, then Lino uses :attr:`main_m
<lino.core.layouts.BaseLayout.main_m>` and :attr:`column_names_m
<lino.core.tables.AbstractTable.column_names_m>` respectively.
"""
detail_main_name = 'main'
# detail_main_name = 'main_m'
design_name = 'desktop'
"""
The name of the design to use. The default value is
``'desktop'``. The value should be one of ``'desktop'`` or
``'mobile'``.
For every plugin, Lino will try to import its "design module".
For example if :attr:`design_name` is ``'desktop'``, then the
design module for a plugin ``'foo.bar'`` is ``'foo.bar.desktop'``.
If such a module exists, Lino imports it and adds it to
:attr:`models.bar`. The result is the same as if there were a
``from .desktop import *`` statement at the end of the
:xfile:`models.py` module.
"""
root_urlconf = 'lino.core.urls'
"""
The value to be attribute to :setting:`ROOT_URLCONF` when this
:class:`Site` instantiates.
The default value is :mod:`lino.core.urls`.
"""
textfield_format = 'plain'
"""
The default format for text fields. Valid choices are currently
'plain' and 'html'.
Text fields are either Django's `models.TextField` or
:class:`lino.core.fields.RichTextField`.
You'll probably better leave the global option as 'plain',
and specify explicitly the fields you want as html by declaring
them::
foo = fields.RichTextField(...,format='html')
We even recommend that you declare your *plain* text fields also
using `fields.RichTextField` and not `models.TextField`::
foo = fields.RichTextField()
Because that gives subclasses of your application the possibility to
make that specific field html-formatted::
resolve_field('Bar.foo').set_format('html')
"""
log_each_action_request = False
"""
Whether Lino should log every incoming request for non
:attr:`readonly <lino.core.actions.Action.readonly>` actions.
This is experimental. Theoretically it is useless to ask Lino for
logging every request since Apache does this. OTOH Lino can
produce more readable logs.
Note also that there is no warranty that actually *each* request
is being logged. It corrently works only for requests that are
being processed by the kernel's :meth:`run_action
<lino.core.kernel.Kernel.run_action>` or
:meth:`run_callback
<lino.core.kernel.Kernel.run_callback>` methods.
"""
verbose_client_info_message = False
"""
Set this to True if actions should send debug messages to the client.
These will be shown in the client's Javascript console only.
"""
help_url = "http://www.lino-framework.org"
help_email = "<EMAIL>"
"""
An e-mail address where users can get help. This is included in
:xfile:`admin_main.html`.
"""
catch_layout_exceptions = True
"""
Lino usually catches any exception during startup (in
:func:`create_layout_element
<lino.core.layouts.create_layout_element>`) to report errors of
style "Unknown element "postings.PostingsByController
('postings')" referred in layout <PageDetail on pages.Pages>."
Setting this to `False` is useful when there's some problem
*within* the framework.
"""
strict_dependencies = True
"""
This should be True unless this site is being used just for autodoc
or similar applications.
"""
strict_choicelist_values = True
"""
Whether invalid values in a ChoiceList should raise an exception.
This should be `True` except for exceptional situations.
"""
csv_params = dict()
"""
Site-wide default parameters for CSV generation. This must be a
dictionary that will be used as keyword parameters to Python
`csv.writer()
<http://docs.python.org/library/csv.html#csv.writer>`_
Possible keys include:
- encoding :
the charset to use when responding to a CSV request.
See
http://docs.python.org/library/codecs.html#standard-encodings
for a list of available values.
- many more allowed keys are explained in
`Dialects and Formatting Parameters
<http://docs.python.org/library/csv.html#csv-fmt-params>`_.
"""
logger_filename = 'lino.log'
"""
The name of Lino's main log file, created in :meth:`setup_logging`.
See also :ref:`lino.logging`.
"""
auto_configure_logger_names = 'schedule atelier django lino radicale'
"""
A string with a space-separated list of logger names to be
automatically configured. See :meth:`setup_logging`.
"""
# appy_params = dict(ooPort=8100)
appy_params = dict(
ooPort=8100, pythonWithUnoPath='/usr/bin/python3',
raiseOnError=True)
"""
Used by :class:`lino_xl.lib.appypod.choicelist.AppyBuildMethod`.
Allowed keyword arguments for `appy.pod.renderer.Render` are::
pythonWithUnoPath=None,
ooPort=2002
stylesMapping={}
forceOoCall=False,
finalizeFunction=None
overwriteExisting=False
raiseOnError=False
imageResolver=None
See `the source code
<http://bazaar.launchpad.net/~appy-dev/appy/trunk/view/head:/pod/renderer.py>`_
for details.
See also :doc:`/admin/oood`
"""
#~ decimal_separator = '.'
decimal_separator = ','
"""
Set this to either ``'.'`` or ``','`` to define wether to use comma
or dot as decimal point separator when entering a `DecimalField`.
"""
# decimal_group_separator = ','
# decimal_group_separator = ' '
# decimal_group_separator = '.'
decimal_group_separator = u"\u00A0"
"""
Decimal group separator for :meth:`decfmt`.
"""
time_format_strftime = '%H:%M'
"""
Format (in strftime syntax) to use for displaying dates to the user.
If you change this setting, you also need to override :meth:`parse_time`.
"""
date_format_strftime = '%d.%m.%Y'
"""
Format (in strftime syntax) to use for displaying dates to the user.
If you change this setting, you also need to override :meth:`parse_date`.
"""
date_format_regex = "/^[0123]?\d\.[01]?\d\.-?\d+$/"
"""
Format (in Javascript regex syntax) to use for displaying dates to
the user. If you change this setting, you also need to override
:meth:`parse_date`.
"""
datetime_format_strftime = '%Y-%m-%dT%H:%M:%S'
"""
Format (in strftime syntax) to use for formatting timestamps in
AJAX responses. If you change this setting, you also need to
override :meth:`parse_datetime`.
"""
datetime_format_extjs = 'Y-m-d\TH:i:s'
"""
Format (in ExtJS syntax) to use for formatting timestamps in AJAX
calls. If you change this setting, you also need to override
:meth:`parse_datetime`.
"""
# for internal use:
_site_config = None
_logger = None
_starting_up = False
override_modlib_models = None
"""
A dictionary which maps model class names to the plugin which
overrides them.
This is automatically filled at startup. You can inspect it, but
you should not modify it. Needed for :meth:`is_abstract_model`.
The challenge is that we want to know exactly where every model's
concrete class will be defined *before* actually starting to
import the :xfile:`models.py` modules. That's why we need
:attr:`extends_models <lino.core.plugin.Plugin.extends_models>`.
This can be tricky, see e.g. 20160205.
"""
installed_plugin_modules = None
"""
Used internally by :meth:`is_abstract_model`. Don't modify.
A set of the full Python paths of all imported plugin modules. Not
just the plugin modules themselves but also those they inherit
from.
"""
def __init__(self, settings_globals=None, local_apps=[], **kwargs):
"""Every Lino application calls this once in it's
:file:`settings.py` file.
See :doc:`/usage`.
`settings_globals` is the `globals()` dictionary of your
:xfile:`settings.py`.
"""
if hasattr(self, 'setup_choicelists'):
raise ChangedAPI("setup_choicelists is no longer supported")
if hasattr(self, 'setup_workflows'):
raise ChangedAPI("setup_workflows is no longer supported")
# if len(_INSTANCES):
# raise Exception("20161219")
# # happens e.g. during sphinx-build
# _INSTANCES.append(self)
# self.logger.info("20140226 Site.__init__() a %s", self)
#~ print "20130404 ok?"
if 'no_local' in kwargs:
kwargs.pop('no_local')
# For the moment we just silently ignore it, but soon:
# if False:
raise ChangedAPI("The no_local argument is no longer needed.")
self._welcome_handlers = []
self._help_texts = dict()
self.plugins = AttrDict()
self.models = AttrDict()
self.modules = self.models # backwards compat
# self.actors = self.models # backwards compat
# self.actors = AttrDict()
if settings_globals is None:
settings_globals = {}
self.init_before_local(settings_globals, local_apps)
self.setup_logging()
self.run_lino_site_module()
self.override_settings(**kwargs)
self.load_plugins()
for p in self.installed_plugins:
p.on_plugins_loaded(self)
if self.migration_module is not None:
self.django_settings.update(
MIGRATION_MODULES={
p.app_label:self.migration_module
for p in self.installed_plugins})
self.setup_plugins()
self.install_settings()
from lino.utils.config import ConfigDirCache
self.confdirs = ConfigDirCache(self)
for k in ('ignore_dates_before', 'ignore_dates_after'):
if hasattr(self, k):
msg = "{0} is no longer a site attribute"
msg += " but a plugin attribute on lino_xl.lib.cal."
msg = msg.format(k)
raise ChangedAPI(msg)
self.load_help_texts()
def init_before_local(self, settings_globals, local_apps):
"""If your :attr:`project_dir` contains no :xfile:`models.py`, but
*does* contain a `fixtures` subdir, then Lino automatically adds this
as a local fixtures directory to Django's :setting:`FIXTURE_DIRS`.
But only once: if your application defines its own local
fixtures directory, then this directory "overrides" those of
parent applications. E.g. lino_noi.projects.care does not want
to load the application-specific fixtures of
lino_noi.projects.team.
"""
if not isinstance(settings_globals, dict):
raise Exception("""
The first argument when instantiating a %s
must be your settings.py file's `globals()`
and not %r
""" % (self.__class__.__name__, settings_globals))
if isinstance(local_apps, six.string_types):
local_apps = [local_apps]
self.local_apps = local_apps
self.django_settings = settings_globals
project_file = settings_globals.get('__file__', '.')
self.project_dir = Path(dirname(project_file)).absolute().resolve()
# inherit `project_name` from parent?
# if self.__dict__.get('project_name') is None:
if self.project_name is None:
parts = reversed(self.project_dir.split(os.sep))
# print(20150129, list(parts))
for part in parts:
if part != 'settings':
self.project_name = part
break
cache_root = os.environ.get('LINO_CACHE_ROOT', None)
if cache_root:
cr = Path(cache_root).absolute()
if not cr.exists():
msg = "LINO_CACHE_ROOT ({0}) does not exist!".format(cr)
raise Exception(msg)
self.cache_dir = cr.child(self.project_name).resolve()
self.setup_cache_directory()
else:
self.cache_dir = Path(self.project_dir).absolute()
self._startup_done = False
self.startup_time = datetime.datetime.now()
db = self.get_database_settings()
if db is not None:
self.django_settings.update(DATABASES=db)
self.update_settings(SERIALIZATION_MODULES={
"py": "lino.utils.dpy",
})
if self.site_prefix != '/':
if not self.site_prefix.endswith('/'):
raise Exception("`site_prefix` must end with a '/'!")
if not self.site_prefix.startswith('/'):
raise Exception("`site_prefix` must start with a '/'!")
self.update_settings(
SESSION_COOKIE_PATH=self.site_prefix[:-1])
# self.update_settings(SESSION_COOKIE_NAME='ssid')
# ## Local project directory
# modname = self.__module__
# i = modname.rfind('.')
# if i != -1:
# modname = modname[:i]
# self.is_local_project_dir = modname not in self.local_apps
self.VIRTUAL_FIELDS = []
def setup_logging(self):
"""Modifies the :data:`DEFAULT_LOGGING
<django.utils.log.DEFAULT_LOGGING>` dictionary *before* Django
passes it to the `logging.config.dictConfig
<https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig>`__
function.
Note that this is called *before* any plugins are loaded.
It is designed to work with the :setting:`LOGGING` and
:setting:`LOGGER_CONFIG` settings unmodified.
It does the following modifications:
- (does not) configure the console handler to write to stdout
instead of Django's default stderr (as explained `here
<http://codeinthehole.com/writing/console-logging-to-stdout-in-django/>`__)
because that breaks testing.
- Define a *default logger configuration* which is initially
the same as the one used by Django::
{
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
}
- If the :attr:`project_dir` has a subdirectory named ``log``,
and if :attr:`logger_filename` is not empty, add a handler
named ``file`` and a formatter named ``verbose``, and add
that handler to the default logger configuration.
- Apply the default logger configuration to every logger name
in :attr:`auto_configure_logger_names`.
It does nothing at all if :attr:`auto_configure_logger_names`
is set to `None` or empty.
See also :ref:`lino.logging`.
See also Django's doc about `Logging
<https://docs.djangoproject.com/en/1.11/topics/logging/>`__.
"""
if not self.auto_configure_logger_names:
return
from django.utils.log import DEFAULT_LOGGING
d = DEFAULT_LOGGING
level = os.environ.get('LINO_LOGLEVEL') or 'INFO'
file_level = os.environ.get('LINO_FILE_LOGLEVEL') or 'INFO'
loggercfg = {
'handlers': ['console', 'mail_admins'],
'level': level,
}
handlers = d.setdefault('handlers', {})
if True:
# We override Django's default config: write to stdout (not
# stderr) and remove the 'require_debug_true' filter.
console = handlers.setdefault('console', {})
console['stream'] = sys.stdout
console['filters'] = []
console['level'] = level
if self.logger_filename and 'file' not in handlers:
logdir = self.project_dir.child('log')
if logdir.isdir():
# if self.history_aware_logging is None:
# self.history_aware_logging = True
formatters = d.setdefault('formatters', {})
formatters.setdefault('verbose', dict(
format='%(asctime)s %(levelname)s '
'[%(module)s %(process)d %(thread)d] : %(message)s',
datefmt='%Y%m-%d %H:%M:%S'))
handlers['file'] = {
'level': file_level,
'class': 'logging.FileHandler',
'filename': logdir.child(self.logger_filename),
'encoding': 'UTF-8',
'formatter': 'verbose',
}
loggercfg['handlers'].append('file')
for name in self.auto_configure_logger_names.split():
# if name not in d['loggers']:
d['loggers'][name] = loggercfg
# set schedule logger level to WARNING
# TODO: find a more elegant way to do this.
if 'schedule' in d['loggers']:
d['loggers']['schedule'] = {
'handlers': loggercfg['handlers'],
'level': 'WARNING',
}
dblogger = d['loggers'].setdefault('django.db.backends', {})
dblogger['propagate'] = False
dblogger['level'] = os.environ.get('LINO_SQL_LOGLEVEL', 'WARNING')
dblogger['handlers'] = loggercfg['handlers']
# self.update_settings(LOGGING=d)
# from pprint import pprint
# pprint(d)
# print("20161126 Site %s " % d['loggers'].keys())
# import yaml
# print(yaml.dump(d))
def get_database_settings(self):
"""Return a dict to be set as the :setting:`DATABASE` setting.
The default behaviour uses SQLiite on a file named
:xfile:`default.db` in the :attr:`cache_dir`, and in
``:memory:`` when :attr:`cache_dir` is `None`.
And alternative might be for example::
def get_database_settings(self):
return {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test_' + self.project_name,
'USER': 'django',
'PASSWORD': os.environ['MYSQL_PASSWORD'],
'HOST': 'localhost',
'PORT': 3306,
'OPTIONS': {
"init_command": "SET storage_engine=MyISAM",
}
}
}
"""
if self.cache_dir is None:
pass # raise Exception("20160516 No cache_dir")
else:
dbname = self.cache_dir.child('default.db')
return {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': dbname
}
}
def run_lino_site_module(self):
"""See :ref:`lino.site_module`.
"""
site_module = os.environ.get('LINO_SITE_MODULE', None)
if site_module:
mod = import_module(site_module)
func = getattr(mod, 'setup_site', None)
if func:
func(self)
# try:
# from djangosite_local import setup_site
# except ImportError:
# pass
# else:
# setup_site(self)
def override_settings(self, **kwargs):
# Called internally during `__init__` method.
# Also called from :mod:`lino.utils.djangotest`
#~ logger.info("20130404 lino.site.Site.override_defaults")
for k, v in kwargs.items():
if not hasattr(self, k):
raise Exception("%s has no attribute %s" % (self.__class__, k))
setattr(self, k, v)
self.apply_languages()
def load_plugins(self):
"""Load all plugins and build the :setting:`INSTALLED_APPS` setting
for Django.
This includes a call to :meth:`get_apps_modifiers` and
:meth:`get_installed_apps`.
"""
# Called internally during `__init__` method.
requested_apps = []
apps_modifiers = self.get_apps_modifiers()
if hasattr(self, 'hidden_apps'):
raise ChangedAPI("Replace hidden_apps by get_apps_modifiers()")
def add(x):
if isinstance(x, six.string_types):
app_label = x.split('.')[-1]
x = apps_modifiers.pop(app_label, x)
if x:
requested_apps.append(x)
else:
# if it's not a string, then it's an iterable of strings
for xi in x:
add(xi)
for x in self.get_installed_apps():
add(x)
for x in self.local_apps:
add(x)
# actual_apps = []
plugins = []
disabled_plugins = set()
def install_plugin(app_name, needed_by=None):
# print("20170505 install_plugin({})".format(app_name))
# Django does not accept newstr, and we don't want to see
# ``u'applabel'`` in doctests.
app_name = six.text_type(app_name)
# print("20160524 install_plugin(%r)" % app_name)
app_mod = import_module(app_name)
# print "Loading plugin", app_name
k = app_name.rsplit('.')[-1]
x = apps_modifiers.pop(k, 42)
if x is None:
return
elif x == 42:
pass
else:
raise Exception("20160712")
if k in self.plugins:
other = self.plugins[k]
if other.app_name == app_name:
# If a plugin is installed more than once, only
# the first one counts and all others are ignored
# silently. Happens e.g. in Lino Noi where
# lino_noi.lib.noi is both a required plugin and
# the default_ui.
return
raise Exception("Tried to install {} where {} "
"is already installed.".format(
app_name, other))
# Can an `__init__.py` file explicitly set ``Plugin =
# None``? Is that feature being used?
app_class = getattr(app_mod, 'Plugin', None)
if app_class is None:
app_class = Plugin
p = app_class(self, k, app_name, app_mod, needed_by)
cfg = PLUGIN_CONFIGS.pop(k, None)
if cfg:
p.configure(**cfg)
needed_by = p
while needed_by.needed_by is not None:
needed_by = needed_by.needed_by
for dep in p.needs_plugins:
k2 = dep.rsplit('.')[-1]
if k2 not in self.plugins:
install_plugin(dep, needed_by=needed_by)
# plugins.append(dep)
# actual_apps.append(app_name)
plugins.append(p)
self.plugins.define(k, p)
for dp in p.disables_plugins:
disabled_plugins.add(dp)
# lino_startup is always the first plugin:
# install_plugin(str('lino.modlib.lino_startup'))
install_plugin(str('lino'))
for app_name in requested_apps:
install_plugin(app_name)
if apps_modifiers:
raise Exception(
"Invalid app_label '{0}' in your get_apps_modifiers!".format(
list(apps_modifiers.keys())[0]))
# The return value of get_auth_method() may depend on a
# plugin, so if needed we must add the django.contrib.sessions
# afterwards.
# if self.get_auth_method() == 'session':
if self.user_model:
k = str('django.contrib.sessions')
if not k in self.plugins:
install_plugin(k)
for p in plugins:
if p.app_label in disabled_plugins \
or p.app_name in disabled_plugins:
plugins.remove(p)
del self.plugins[p.app_label]
# self.update_settings(INSTALLED_APPS=tuple(actual_apps))
self.update_settings(
INSTALLED_APPS=tuple([p.app_name for p in plugins]))
self.installed_plugins = tuple(plugins)
if self.override_modlib_models is not None:
raise ChangedAPI("override_modlib_models no longer allowed")
self.override_modlib_models = dict()
# def reg(p, pp, m):
# name = pp.__module__ + '.' + m
# self.override_modlib_models[name] = p
def plugin_parents(pc):
for pp in pc.__mro__:
if issubclass(pp, Plugin):
# if pp not in (Plugin, p.__class__):
if pp is not Plugin:
yield pp
def reg(pc):
# If plugin p extends some models, then tell all parent
# plugins to make their definition of each model abstract.
extends_models = pc.__dict__.get('extends_models')
if extends_models is not None:
for m in extends_models:
if "." in m:
raise Exception(
"extends_models in %s still uses '.'" % pc)
for pp in plugin_parents(pc):
if pp is pc:
continue
name = pp.__module__ + '.' + m
self.override_modlib_models[name] = pc
# if m == "Company":
# print("20160524 tell %s that %s extends %s" % (
# pp, p.app_name, m))
for pp in plugin_parents(pc):
if pp is pc:
continue
reg(pp)
# msg = "{0} declares to extend_models {1}, but " \
# "cannot find parent plugin".format(p, m)
# raise Exception(msg)
for p in self.installed_plugins:
reg(p.__class__)
# for pp in plugin_parents(p.__class__):
# if p.app_label == 'contacts':
# print("20160524c %s" % pp)
# reg(p.__class__)
# for m, p in self.override_modlib_models.items():
# print("20160524 %s : %s" % (m, p))
self.installed_plugin_modules = set()
for p in self.installed_plugins:
self.installed_plugin_modules.add(p.app_module.__name__)
for pp in plugin_parents(p.__class__):
self.installed_plugin_modules.add(pp.__module__)
# print("20160524 %s", self.installed_plugin_modules)
# raise Exception("20140825 %s", self.override_modlib_models)
# Tried to prevent accidental calls to configure_plugin()
# *after* Site initialization.
# global PLUGIN_CONFIGS
# PLUGIN_CONFIGS = None
def load_help_texts(self):
"""Collect :xfile:`help_texts.py` modules"""
for p in self.installed_plugins:
mn = p.app_name + '.help_texts'
try:
m = import_module(mn)
# print("20160725 Loading help texts from", mn)
self._help_texts.update(m.help_texts)
except ImportError:
pass
def load_actors(self):
"""Collect :xfile:`desktop.py` modules.
Note the situation when a :xfile:`desktop.py` module exists
but causes itself an ImportError because it contains a
programming mistake. In that case we want the traceback to
occur, not to silently do as if no :xfile:`desktop.py` module
existed.
"""
for p in self.installed_plugins:
mn = p.app_name + '.' + self.design_name
fn = join(
dirname(p.app_module.__file__), self.design_name + '.py')
if exists(fn):
# self.actors[p.app_label] = import_module(mn)
m = import_module(mn)
self.models[p.app_label].__dict__.update(m.__dict__)
# try:
# # print("20160725 Loading actors from", mn)
# self.actors[p.app_label] = import_module(mn)
# except ImportError:
# pass
def install_help_text(self, fld, cls=None, attrname=None):
"""Install a `help_text` from collected :xfile:`help_texts.py` for
this field.
"""
if cls is None:
cls = fld
debug = False
# if attrname.startswith('mun'):
# debug = True
# from lino.core.actions import Action
# if isinstance(fld, Action) and fld.__class__.__name__ == 'ChangePassword':
# debug = True
# if isinstance(fld, type) and fld.__name__ == 'ChangePassword':
# # if isinstance(fld, Action) and fld.__class__.__name__ == 'ChangePassword':
# debug = True
if not hasattr(fld, 'help_text'): # e.g. virtual fields don't
# have a help_text attribute
if debug:
print("20170824 {!r} has no help_text".format(fld))
return
# if fld.help_text:
# # if debug:
# # print("20170824 {} on {} has already a help_text {}".format(
# # attrname, cls, repr(fld.help_text)))
# return
# if debug:
# print(20160829, cls)
# if isinstance(fld, type):
# cls = fld
# else:
# cls = fld.model
for m in cls.mro():
# useless = ['lino.core', 'lino.mixins']
# if m.__module__.startswith(useless):
# continue
# if m in self.unhelpful_classes:
# continue
k = m.__module__ + '.' + m.__name__
k = simplify_name(k)
# debug = k.startswith('users')
if attrname:
k += '.' + attrname
txt = self._help_texts.get(k, None)
# if attrname == "nationality":
# print("20180313 {} {}".format(k, txt))
if txt is None:
if debug:
print("20170824 {}.{} : no help_text using {!r}".format(
cls, attrname, k))
if fld.help_text:
# coded help text gets overridden only if docs
# provide a more specific help text.
return
else:
if debug:
print("20170824 {}.{}.help_text found using {}".format(
cls, attrname, k))
fld.help_text = txt
return
if debug:
print("20170824 {}.{} : no help_text".format(
cls, attrname))
def setup_plugins(self):
"""This method is called exactly once during site startup, after
:meth:`load_plugins` but before populating the models
registry.
See :ref:`dev.plugins`.
"""
pass
def install_settings(self):
assert not self.help_url.endswith('/')
# import django
# django.setup()
if self.cache_dir is not None:
if self.webdav_url is None:
self.webdav_url = self.site_prefix + 'media/webdav/'
if self.webdav_root is None:
self.webdav_root = join(self.cache_dir, 'media', 'webdav')
self.django_settings.update(
MEDIA_ROOT=join(self.cache_dir, 'media'))
self.update_settings(ROOT_URLCONF=self.root_urlconf)
self.update_settings(MEDIA_URL='/media/')
if not self.django_settings.get('STATIC_ROOT', False):
cache_root = os.environ.get('LINO_CACHE_ROOT', None)
if cache_root:
self.django_settings.update(
STATIC_ROOT=Path(cache_root).child('collectstatic'))
else:
self.django_settings.update(
STATIC_ROOT=self.cache_dir.child('static'))
if not self.django_settings.get('STATIC_URL', False):
self.update_settings(STATIC_URL='/static/')
# loaders = [
# 'lino.modlib.jinja.loader.Loader',
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# #~ 'django.template.loaders.eggs.Loader',
# ]
tcp = []
tcp += [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
]
# self.update_settings(TEMPLATE_LOADERS=tuple(loaders))
# self.update_settings(TEMPLATE_CONTEXT_PROCESSORS=tuple(tcp))
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': tcp,
# 'loaders': loaders
},
},
]
TEMPLATES.append(
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [],
'OPTIONS': {
'environment': 'lino.modlib.jinja.get_environment'
},
})
self.update_settings(TEMPLATES=TEMPLATES)
if self.user_model:
self.update_settings(AUTH_USER_MODEL='users.User')
if self.use_security_features:
self.update_settings(
CSRF_USE_SESSIONS=True,
SESSION_COOKIE_SECURE=True,
CSRF_COOKIE_SECURE=True)
# self.define_settings(AUTH_USER_MODEL=self.user_model)
self.define_settings(
MIDDLEWARE_CLASSES=tuple(self.get_middleware_classes()))
# if self.get_auth_method() == 'session':
# self.define_settings(AUTHENTICATION_BACKENDS=[
# 'django.contrib.auth.backends.RemoteUserBackend'
# ])
backends = []
# if self.use_ipdict:
# backends.append('lino.modlib.ipdict.backends.Backend')
if self.get_auth_method() == 'remote':
backends.append('lino.core.auth.backends.RemoteUserBackend')
else:
backends.append('lino.core.auth.backends.ModelBackend')
if self.social_auth_backends is not None:
backends += self.social_auth_backends
self.define_settings(AUTHENTICATION_BACKENDS=backends)
self.update_settings(
LOGIN_URL='/accounts/login/',
LOGIN_REDIRECT_URL = '/',
# LOGIN_REDIRECT_URL = '/accounts/profile/',
LOGOUT_REDIRECT_URL = None)
def collect_settings_subdirs(lst, name, max_count=None):
def add(p):
p = p.replace(os.sep, "/")
if p not in lst:
lst.append(p)
for p in self.get_settings_subdirs(name):
# if the parent of a settings subdir has a
# `models.py`, then it is a plugin and we must not add
# the subdir because Django does that.
if exists(join(p, '..', 'models.py')):
self.logger.debug(
"Not loading %s %s because Django does that",
p, name)
else:
add(p)
if (max_count is not None) and len(lst) >= max_count:
break
# local_dir = self.cache_dir.child(name)
# if local_dir.exists():
# print "20150427 adding local directory %s" % local_dir
# add(local_dir)
# The STATICFILES_DIRS setting should not contain the
# STATIC_ROOT setting
if False:
# If a plugin has no "fixtures" ("config") directory
# of its own, inherit it from parents. That would be
# nice and it even works, but with a stud: these
# fixtures will be loaded at the end.
for ip in self.installed_plugins:
if not ip.get_subdir(name):
pc = ip.extends_from()
while pc and issubclass(pc, Plugin):
p = pc.get_subdir(name)
if p:
add(p)
pc = pc.extends_from()
fixture_dirs = list(self.django_settings.get('FIXTURE_DIRS', []))
locale_paths = list(self.django_settings.get('LOCALE_PATHS', []))
sfd = list(self.django_settings.get('STATICFILES_DIRS', []))
# sfd.append(self.cache_dir.child('genjs'))
collect_settings_subdirs(fixture_dirs, 'fixtures', 1)
collect_settings_subdirs(locale_paths, 'locale')
collect_settings_subdirs(sfd, 'static')
self.update_settings(FIXTURE_DIRS=tuple(fixture_dirs))
self.update_settings(LOCALE_PATHS=tuple(locale_paths))
self.update_settings(STATICFILES_DIRS=tuple(sfd))
# print(20150331, self.django_settings['FIXTURE_DIRS'])
def setup_cache_directory(self):
"""When :envvar:`LINO_CACHE_ROOT` is set, Lino adds a stamp file
called :xfile:`lino_cache.txt` to every project's cache
directory in order to avoid duplicate use of same cache
directory.
.. xfile:: lino_cache.txt
A small text file with one line of text which contains the
path of the project which uses this cache directory.
"""
stamp = self.cache_dir.child('lino_cache.txt')
this = class2str(self.__class__)
if stamp.exists():
other = stamp.read_file()
if other == this:
ok = True
else:
ok = False
for parent in self.__class__.__mro__:
if other == class2str(parent):
ok = True
break
if not ok:
# Can happen e.g. when `python -m lino.hello` is
# called. in certain conditions.
msg = ("Cannot use {cache_dir} for {this} "
"because it is used for {other}. (Settings {settings})")
msg = msg.format(
cache_dir=self.cache_dir,
this=this,
settings=self.django_settings.get('SETTINGS_MODULE'),
other=other)
if True:
raise Exception(msg)
else:
# print(msg)
self.cache_dir = None
else:
self.makedirs_if_missing(self.cache_dir)
stamp.write_file(this)
def set_user_model(self, spec):
"""This can be called during the :meth:`on_init
<lino.core.plugin.Plugin.on_init>` of plugins which provide
user management (the only plugin which does this is currently
:mod:`lino.modlib.users`).
"""
# if self.user_model is not None:
# msg = "Site.user_model was already set!"
# Theoretically this should raise an exception. But in a
# transitional phase after 20150116 we just ignore it. A
# warning would be nice, but we cannot use the logger here
# since it is not yet configured.
# self.logger.warning(msg)
# raise Exception(msg)
self.user_model = spec
if self.user_types_module is None:
self.user_types_module = 'lino.core.user_types'
def get_auth_method(self):
"""Returns the authentication method used on this site. This is one of
`None`, `'remote'` or `'session'`.
It depends on the values in
:attr:`user_model`,
:attr:`default_user` and
:attr:`remote_user_header`.
It influences the results of
:meth:`get_middleware_classes` and
:meth:`get_installed_apps`, and the content of
:setting:`AUTHENTICATION_BACKENDS`.
"""
if self.user_model is None:
return None
if self.default_user is not None:
return None
if self.remote_user_header is None:
return 'session' # model backend
return 'remote' # remote user backend
def get_apps_modifiers(self, **kw):
"""Override or hide individual plugins of an existing application.
For example, if your site inherits from
:mod:`lino.projects.min2`::
def get_apps_modifiers(self, **kw):
kw.update(sales=None)
kw.update(courses='my.modlib.courses')
return kw
The default implementation returns an empty dict.
This method adds an additional level of customization because
it lets you remove or replace individual plugins from
:setting:`INSTALLED_APPS` without rewriting your own
:meth:`get_installed_apps`.
This will be called during Site instantiation and is expected to
return a dict of `app_label` to `full_python_path`
mappings which you want to override in the list of plugins
returned by :meth:`get_installed_apps`.
Mapping an `app_label` to `None` will remove that plugin from
:setting:`INSTALLED_APPS`.
It is theoretically possible but not recommended to replace an
existing `app_label` by an app with a different
`app_label`. For example, the following might work but is not
recommended::
kw.update(courses='my.modlib.mycourses')
"""
return kw
def is_hidden_app(self, app_label):
"""
Return True if the app is known, but has been disabled using
:meth:`get_apps_modifiers`.
"""
am = self.get_apps_modifiers()
if am.get(app_label, 1) is None:
return True
def update_settings(self, **kw):
"""This may be called from within a :xfile:`lino_local.py`.
"""
self.django_settings.update(**kw)
def define_settings(self, **kwargs):
"""Same as :meth:`update_settings`, but raises an exception if a
setting already exists.
TODO: Currently this exception is deactivated. Because it
doesn't work as expected. For some reason (maybe because
settings is being imported twice on a devserver) it raises a
false exception when :meth:`override_defaults` tries to use it
on :setting:`MIDDLEWARE_CLASSES`...
"""
if False:
for name in list(kwargs.keys()):
if name in self.django_settings:
raise Exception(
"Tried to define existing Django setting %s" % name)
self.django_settings.update(kwargs)
def startup(self):
"""Start up this Site.
You probably don't want to override this method as it might be
called several times. e.g. under mod_wsgi: another thread has
started and not yet finished `startup()`.
If you want to run custom code on
site startup, override :meth:`do_site_startup`.
"""
from lino.core.kernel import site_startup
site_startup(self)
self.clear_site_config()
def do_site_startup(self):
"""This method is called exactly once during site startup,
just between the pre_startup and the post_startup signals.
A hook for subclasses.
TODO: rename this to `on_startup`?
If you override it, don't forget to call the super method.
"""
# self.logger.info("20160526 %s do_site_startup() a", self.__class__)
# self.logger.info("20160526 %s do_site_startup() b", self.__class__)
@property
def logger(self):
"""This must not be used before Django has done it logging config. For
example don't use it in a :xfile:`settings.py` module.
"""
if self._logger is None:
import logging
self._logger = logging.getLogger(__name__)
return self._logger
def get_settings_subdirs(self, subdir_name):
"""Yield all (existing) directories named `subdir_name` of this Site's
project directory and it's inherited project directories.
"""
# if local settings.py doesn't subclass Site:
if self.project_dir != normpath(dirname(
inspect.getfile(self.__class__))):
pth = join(self.project_dir, subdir_name)
if isdir(pth):
yield pth
for cl in self.__class__.__mro__:
#~ logger.info("20130109 inspecting class %s",cl)
if cl is not object and not inspect.isbuiltin(cl):
pth = join(dirname(inspect.getfile(cl)), subdir_name)
if isdir(pth):
yield pth
def makedirs_if_missing(self, dirname):
"""Make missing directories if they don't exist and if
:attr:`make_missing_dirs` is `True`.
"""
if dirname and not isdir(dirname):
if self.make_missing_dirs:
os.makedirs(dirname)
else:
raise Exception("Please create yourself directory %s" %
dirname)
def is_abstract_model(self, module_name, model_name):
"""
Return True if the named model is declared as being extended by
:attr:`lino.core.plugin.Plugin.extends_models`.
Typical usage::
class MyModel(dd.Model):
class Meta:
abstract = dd.is_abstract_model(__name__, 'MyModel')
See :doc:`/dev/plugin_inheritance`.
"""
app_name = '.'.join(module_name.split('.')[:-1])
model_name = app_name + '.' + model_name
# if 'avanti' in model_name:
# print("20170120", model_name,
# self.override_modlib_models,
# [m for m in self.installed_plugin_modules])
rv = model_name in self.override_modlib_models
if not rv:
if app_name not in self.installed_plugin_modules:
return True
# if model_name.endswith('Company'):
# self.logger.info(
# "20160524 is_abstract_model(%s) -> %s", model_name, rv)
# self.logger.info(
# "20160524 is_abstract_model(%s) -> %s (%s, %s)",
# model_name, rv, self.override_modlib_models.keys(),
# os.getenv('DJANGO_SETTINGS_MODULE'))
return rv
def is_installed_model_spec(self, model_spec):
"""
Deprecated. This feature was a bit too automagic and caused bugs
to pass silently. See e.g. :blogref:`20131025`.
"""
if False: # mod_wsgi interprets them as error
warnings.warn("is_installed_model_spec is deprecated.",
category=DeprecationWarning)
if model_spec == 'self':
return True
app_label, model_name = model_spec.split(".")
return self.is_installed(app_label)
def is_installed(self, app_label):
"""
Return `True` if :setting:`INSTALLED_APPS` contains an item
which ends with the specified `app_label`.
"""
return app_label in self.plugins
def setup_model_spec(self, obj, name):
"""
If the value of the named attribute of `obj` is a string, replace
it by the model specified by that string.
Example usage::
# library code:
class ThingBase(object):
the_model = None
def __init__(self):
settings.SITE.setup_model_spec(self, 'the_model')
# user code:
class MyThing(ThingBase):
the_model = "contacts.Partner"
"""
spec = getattr(obj, name)
if spec and isinstance(spec, six.string_types):
if not self.is_installed_model_spec(spec):
setattr(obj, name, None)
return
from lino.core.utils import resolve_model
msg = "Unresolved model '%s' in {0}.".format(name)
msg += " ({})".format(str(self.installed_plugins))
setattr(obj, name, resolve_model(spec, strict=msg))
def on_each_app(self, methname, *args):
"""
Call the named method on the :xfile:`models.py` module of each
installed app.
Note that this mechanism is deprecated. It is still used (on
names like ``setup_workflows`` and ``setup_site``) for
historical reasons but will disappear one day.
"""
from django.apps import apps
apps = [a.models_module for a in apps.get_app_configs()]
for mod in apps:
meth = getattr(mod, methname, None)
if meth is not None:
if False: # 20150925 once we will do it for good...
raise ChangedAPI("{0} still has a function {1}".format(
mod, methname))
meth(self, *args)
def for_each_app(self, func, *args, **kw):
"""
Call the given function on each installed plugin. Successor of
:meth:`on_each_app`.
This also loops over plugins that don't have a models module
and the base plugins of plugins which extend some plugin.
"""
from importlib import import_module
done = set()
for p in self.installed_plugins:
for b in p.__class__.__mro__:
if b not in (object, Plugin):
if b.__module__ not in done:
done.add(b.__module__)
parent = import_module(b.__module__)
func(b.__module__, parent, *args, **kw)
if p.app_name not in done:
func(p.app_name, p.app_module, *args, **kw)
def demo_date(self, *args, **kwargs):
"""
Deprecated. Should be replaced by :meth:`today`. Compute a date
using :func:`atelier.utils.date_offset` based on the process
startup time (or :attr:`the_demo_date` if this is set).
Used in Python fixtures and unit tests.
"""
base = self.the_demo_date or self.startup_time.date()
return date_offset(base, *args, **kwargs)
def today(self, *args, **kwargs):
"""
Almost the same as :func:`datetime.date.today`.
One difference is that the system's *today* is replaced by
:attr:`the_demo_date` if that attribute is set.
Another difference is that arguments can be passed to add some
offset. See :func:`atelier.utils.date_offset`.
This feature is being used in many test cases where e.g. the
age of people would otherwise change.
"""
if self.site_config is not None:
base = self.site_config.simulate_today \
or self.the_demo_date or datetime.date.today()
else:
base = self.the_demo_date or datetime.date.today()
return date_offset(base, *args, **kwargs)
def welcome_text(self):
"""
Returns the text to display in a console window when this
application starts.
"""
return "This is %s using %s." % (
self.site_version(), self.using_text())
def using_text(self):
"""
Text to display in a console window when Lino starts.
"""
return ', '.join([u"%s %s" % (n, v)
for n, v, u in self.get_used_libs()])
def site_version(self):
"""
Used in footnote or header of certain printed documents.
"""
if self.version:
return self.verbose_name + ' ' + self.version
return self.verbose_name
def configure_plugin(self, app_label, **kw):
raise Exception("Replace SITE.configure_plugin by ad.configure_plugin")
def install_migrations(self, *args):
"""
See :func:`lino.utils.dpy.install_migrations`.
"""
from lino.utils.dpy import install_migrations
install_migrations(self, *args)
def parse_date(self, s):
"""
Convert a string formatted using :attr:`date_format_strftime` or
:attr:`date_format_extjs` into a `(y,m,d)` tuple (not a
`datetime.date` instance). See `/blog/2010/1130`.
"""
ymd = tuple(reversed(list(map(int, s.split('.')))))
assert len(ymd) == 3
return ymd
#~ return datetime.date(*ymd)
def parse_time(self, s):
"""
Convert a string formatted using :attr:`time_format_strftime` or
:attr:`time_format_extjs` into a `datetime.time` instance.
"""
hms = list(map(int, s.split(':')))
return datetime.time(*hms)
def parse_datetime(self, s):
"""
Convert a string formatted using :attr:`datetime_format_strftime`
or :attr:`datetime_format_extjs` into a `datetime.datetime`
instance.
"""
#~ print "20110701 parse_datetime(%r)" % s
#~ s2 = s.split()
s2 = s.split('T')
if len(s2) != 2:
raise Exception("Invalid datetime string %r" % s)
ymd = list(map(int, s2[0].split('-')))
hms = list(map(int, s2[1].split(':')))
return datetime.datetime(*(ymd + hms))
#~ d = datetime.date(*self.parse_date(s[0]))
#~ return datetime.combine(d,t)
def strftime(self, t):
if t is None:
return ''
return t.strftime(self.time_format_strftime)
def resolve_virtual_fields(self):
for vf in self.VIRTUAL_FIELDS:
vf.lino_resolve_type()
self.VIRTUAL_FIELDS = []
def register_virtual_field(self, vf):
self.VIRTUAL_FIELDS.append(vf)
def find_config_file(self, *args, **kwargs):
return self.confdirs.find_config_file(*args, **kwargs)
def find_template_config_files(self, *args, **kwargs):
return self.confdirs.find_template_config_files(*args, **kwargs)
def setup_actions(self):
"""
Hook for subclasses to add or modify actions.
"""
from lino.core.merge import MergeAction
for m in get_models():
if m.allow_merge_action:
m.define_action(merge_row=MergeAction(m))
def setup_layouts(self):
'''
Hook for subclasses to add or modify layouts.
Usage example::
def setup_layouts(self):
super(Site, self).setup_layouts()
self.models.system.SiteConfigs.set_detail_layout("""
site_company next_partner_id:10
default_build_method
clients_account sales_account
suppliers_account purchases_account
""")
self.models.accounts.Accounts.set_detail_layout("""
ref:10 name id:5
seqno group type clearable
ledger.MovementsByAccount
""")
'''
pass
def add_user_field(self, name, fld):
if self.user_model:
from lino.api import dd
dd.inject_field(self.user_model, name, fld)
def get_used_libs(self, html=None):
"""
Yield a list of (name, version, url) tuples describing the
third-party software used on this site.
This function is used by :meth:`using_text` and
:meth:`welcome_html`.
"""
import lino
yield ("Lino", lino.SETUP_INFO['version'], lino.SETUP_INFO['url'])
try:
import mod_wsgi
version = "{0}.{1}".format(*mod_wsgi.version)
yield ("mod_wsgi", version, "http://www.modwsgi.org/")
except ImportError:
pass
import django
yield ("Django", django.get_version(), "http://www.djangoproject.com")
import sys
version = "%d.%d.%d" % sys.version_info[:3]
yield ("Python", version, "http://www.python.org/")
import babel
yield ("Babel", babel.__version__, "http://babel.edgewall.org/")
#~ import tidylib
#~ version = getattr(tidylib,'__version__','')
#~ yield ("tidylib",version,"http://countergram.com/open-source/pytidylib")
#~ import pyPdf
#~ version = getattr(pyPdf,'__version__','')
#~ yield ("pyPdf",version,"http://countergram.com/open-source/pytidylib")
import jinja2
version = getattr(jinja2, '__version__', '')
yield ("Jinja", version, "http://jinja.pocoo.org/")
# import sphinx
# version = getattr(sphinx, '__version__', '')
# yield ("Sphinx", version, "http://sphinx-doc.org/")
import dateutil
version = getattr(dateutil, '__version__', '')
yield ("python-dateutil", version, "http://labix.org/python-dateutil")
#~ try:
#~ import Cheetah
#~ version = Cheetah.Version
#~ yield ("Cheetah",version ,"http://cheetahtemplate.org/")
#~ except ImportError:
#~ pass
# try:
# from odf import opendocument
# version = opendocument.__version__
# except ImportError:
# version = self.not_found_msg
# yield ("OdfPy", version, "http://pypi.python.org/pypi/odfpy")
# try:
# import docutils
# version = docutils.__version__
# except ImportError:
# version = self.not_found_msg
# yield ("docutils", version, "http://docutils.sourceforge.net/")
# import yaml
# version = getattr(yaml, '__version__', '')
# yield ("PyYaml", version, "http://pyyaml.org/")
if self.social_auth_backends is not None:
try:
import social_django
version = social_django.__version__
except ImportError:
version = self.not_found_msg
name = "social-django"
yield (name, version, "https://github.com/python-social-auth")
for p in self.installed_plugins:
for u in p.get_used_libs(html):
yield u
def get_social_auth_links(self):
# print("20171207 site.py")
# elems = []
if self.social_auth_backends is None:
return
from social_core.backends.utils import load_backends
# from collections import OrderedDict
# from django.conf import settings
# from social_core.backends.base import BaseAuth
# backend = module_member(auth_backend)
# if issubclass(backend, BaseAuth):
for b in load_backends(
self.social_auth_backends).values():
yield E.a(b.name, href="/oauth/login/"+b.name)
# print("20171207 a", elems)
# return E.div(*elems)
def apply_languages(self):
"""This function is called when a Site object gets instantiated,
i.e. while Django is still loading the settings. It analyzes
the :attr:`languages` attribute and converts it to a tuple of
:data:`LanguageInfo` objects.
"""
if isinstance(self.languages, tuple) \
and isinstance(self.languages[0], LanguageInfo):
# e.g. override_defaults() has been called explicitly, without
# specifying a languages keyword.
return
self.language_dict = dict() # maps simple_code -> LanguageInfo
self.LANGUAGE_CHOICES = []
self.LANGUAGE_DICT = dict() # used in lino.modlib.users
must_set_language_code = False
#~ self.AVAILABLE_LANGUAGES = (to_locale(self.DEFAULT_LANGUAGE),)
if self.languages is None:
self.languages = [DJANGO_DEFAULT_LANGUAGE]
#~ self.update_settings(USE_L10N = False)
#~ info = LanguageInfo(DJANGO_DEFAULT_LANGUAGE,to_locale(DJANGO_DEFAULT_LANGUAGE),0,'')
#~ self.DEFAULT_LANGUAGE = info
#~ self.languages = (info,)
#~ self.language_dict[info.name] = info
else:
if isinstance(self.languages, six.string_types):
self.languages = str(self.languages).split()
#~ lc = [x for x in self.django_settings.get('LANGUAGES' if x[0] in languages]
#~ lc = language_choices(*self.languages)
#~ self.update_settings(LANGUAGES = lc)
#~ self.update_settings(LANGUAGE_CODE = lc[0][0])
#~ self.update_settings(LANGUAGE_CODE = self.languages[0])
self.update_settings(USE_L10N=True)
must_set_language_code = True
languages = []
for i, django_code in enumerate(self.languages):
assert_django_code(django_code)
name = str(to_locale(django_code))
if name in self.language_dict:
raise Exception("Duplicate name %s for language code %r"
% (name, django_code))
if i == 0:
suffix = ''
else:
suffix = '_' + name
info = LanguageInfo(str(django_code), str(name), i, str(suffix))
self.language_dict[name] = info
languages.append(info)
new_languages = languages
for info in tuple(new_languages):
if '-' in info.django_code:
base, loc = info.django_code.split('-')
if not base in self.language_dict:
self.language_dict[base] = info
# replace the complicated info by a simplified one
#~ newinfo = LanguageInfo(info.django_code,base,info.index,info.suffix)
#~ new_languages[info.index] = newinfo
#~ del self.language_dict[info.name]
#~ self.language_dict[newinfo.name] = newinfo
#~ for base,lst in simple_codes.items():
#~ if len(lst) == 1 and and not base in self.language_dict:
#~ self.language_dict[base] = lst[0]
self.languages = tuple(new_languages)
self.DEFAULT_LANGUAGE = self.languages[0]
self.BABEL_LANGS = tuple(self.languages[1:])
if must_set_language_code:
self.update_settings(LANGUAGE_CODE=self.languages[0].django_code)
# Note: LANGUAGE_CODE is what *Django* believes to be the
# default language. This should be some variant of
# English ('en' or 'en-us') if you use
# `django.contrib.humanize`
# https://code.djangoproject.com/ticket/20059
self.setup_languages()
def setup_languages(self):
"""
Reduce Django's :setting:`LANGUAGES` to my `languages`.
Note that lng.name are not yet translated, we take these
from `django.conf.global_settings`.
"""
from django.conf.global_settings import LANGUAGES
def langtext(code):
for k, v in LANGUAGES:
if k == code:
return v
# returns None if not found
def _add_language(code, lazy_text):
self.LANGUAGE_DICT[code] = lazy_text
self.LANGUAGE_CHOICES.append((code, lazy_text))
if self.languages is None:
_add_language(DJANGO_DEFAULT_LANGUAGE, _("English"))
else:
for lang in self.languages:
code = lang.django_code
text = langtext(code)
if text is None:
# Django doesn't know these
if code == 'de-be':
text = gettext_noop("German (Belgium)")
elif code == 'de-ch':
text = gettext_noop("German (Swiss)")
elif code == 'de-at':
text = gettext_noop("German (Austria)")
elif code == 'en-us':
text = gettext_noop("American English")
else:
raise Exception(
"Unknown language code %r (must be one of %s)" % (
lang.django_code,
[x[0] for x in LANGUAGES]))
text = _(text)
_add_language(lang.django_code, text)
"""
Cannot activate the site's default language
because some test cases in django.contrib.humanize
rely on en-us as default language
"""
#~ set_language(self.get_default_language())
"""
reduce Django's LANGUAGES to my babel languages:
"""
self.update_settings(
LANGUAGES=[x for x in LANGUAGES
if x[0] in self.LANGUAGE_DICT])
def get_language_info(self, code):
"""Use this in Python fixtures or tests to test whether a Site
instance supports a given language. `code` must be a
Django-style language code.
On a site with only one locale of a language (and optionally
some other languages), you can use only the language code to
get a tuple of :data:`LanguageInfo` objects.
>>> from lino.core.site import TestSite as Site
>>> Site(languages="en-us fr de-be de").get_language_info('en')
LanguageInfo(django_code='en-us', name='en_US', index=0, suffix='')
On a site with two locales of a same language (e.g. 'en-us'
and 'en-gb'), the simple code 'en' yields that first variant:
>>> site = Site(languages="en-us en-gb")
>>> print(site.get_language_info('en'))
LanguageInfo(django_code='en-us', name='en_US', index=0, suffix='')
"""
return self.language_dict.get(code, None)
def resolve_languages(self, languages):
"""
This is used by `UserType`.
Examples:
>>> from lino.core.site import TestSite as Site
>>> lst = Site(languages="en fr de nl et pt").resolve_languages('en fr')
>>> [i.name for i in lst]
['en', 'fr']
You may not specify languages which don't exist on this site:
>>> Site(languages="en fr de").resolve_languages('en nl')
Traceback (most recent call last):
...
Exception: Unknown language code 'nl' (must be one of ['en', 'fr', 'de'])
"""
rv = []
if isinstance(languages, six.string_types):
languages = str(languages).split()
for k in languages:
if isinstance(k, six.string_types):
li = self.get_language_info(k)
if li is None:
raise Exception(
"Unknown language code %r (must be one of %s)" % (
str(k), [i.name for i in self.languages]))
rv.append(li)
else:
assert k in self.languages
rv.append(k)
return tuple(rv)
def language_choices(self, language, choices):
l = choices.get(language, None)
if l is None:
l = choices.get(self.DEFAULT_LANGUAGE)
return l
def get_default_language(self):
"""
The django code of the default language to use in every
:class:`dd.LanguageField`.
"""
return self.DEFAULT_LANGUAGE.django_code
def str2kw(self, name, txt, **kw):
"""
Return a dictionary which maps the internal field names for
babelfield `name` to their respective translation of the given
lazy translatable string `text`.
>>> from django.utils.translation import ugettext_lazy as _
>>> from lino.core.site import TestSite as Site
>>> site = Site(languages='de fr es')
>>> site.str2kw('name', _("January")) == {'name_fr': 'janvier', 'name': 'Januar', 'name_es': 'Enero'}
True
>>> site = Site(languages='fr de es')
>>> site.str2kw('name', _("January")) == {'name_de': 'Januar', 'name': 'janvier', 'name_es': 'Enero'}
True
"""
from django.utils import translation
for simple, info in self.language_dict.items():
with translation.override(simple):
kw[name + info.suffix] = six.text_type(txt)
return kw
def babelkw(self, name, **kw):
"""
Return a dict with appropriate resolved field names for a
BabelField `name` and a set of hard-coded values.
You have some hard-coded multilingual content in a fixture:
>>> from lino.core.site import TestSite as Site
>>> kw = dict(de="Hallo", en="Hello", fr="Salut")
The field names where this info gets stored depends on the
Site's `languages` distribution.
>>> Site(languages="de-be en").babelkw('name',**kw) == {'name_en': 'Hello', 'name': 'Hallo'}
True
>>> Site(languages="en de-be").babelkw('name',**kw) == {'name_de_BE': 'Hallo', 'name': 'Hello'}
True
>>> Site(languages="en-gb de").babelkw('name',**kw) == {'name_de': 'Hallo', 'name': 'Hello'}
True
>>> Site(languages="en").babelkw('name',**kw) == {'name': 'Hello'}
True
>>> Site(languages="de-be en").babelkw('name',de="Hallo",en="Hello") == {'name_en': 'Hello', 'name': 'Hallo'}
True
In the following example `babelkw` attributes the
keyword `de` to the *first* language variant:
>>> Site(languages="de-ch de-be").babelkw('name',**kw) == {'name': 'Hallo'}
True
"""
d = dict()
for simple, info in self.language_dict.items():
v = kw.get(simple, None)
if v is not None:
d[name + info.suffix] = six.text_type(v)
return d
def args2kw(self, name, *args):
"""
Takes the basename of a BabelField and the values for each language.
Returns a `dict` mapping the actual fieldnames to their values.
"""
assert len(args) == len(self.languages)
kw = {name: args[0]}
for i, lang in enumerate(self.BABEL_LANGS):
kw[name + '_' + lang] = args[i + 1]
return kw
def field2kw(self, obj, name, **known_values):
"""Return a `dict` with all values of the BabelField `name` in the
given object `obj`. The dict will have one key for each
:attr:`languages`.
Examples:
>>> from lino.core.site import TestSite as Site
>>> from atelier.utils import AttrDict
>>> def testit(site_languages):
... site = Site(languages=site_languages)
... obj = AttrDict(site.babelkw(
... 'name', de="Hallo", en="Hello", fr="Salut"))
... return site,obj
>>> site, obj = testit('de en')
>>> site.field2kw(obj, 'name') == {'de': 'Hallo', 'en': 'Hello'}
True
>>> site, obj = testit('fr et')
>>> site.field2kw(obj, 'name') == {'fr': 'Salut'}
True
"""
# d = { self.DEFAULT_LANGUAGE.name : getattr(obj,name) }
for lng in self.languages:
v = getattr(obj, name + lng.suffix, None)
if v:
known_values[lng.name] = v
return known_values
def field2args(self, obj, name):
"""
Return a list of the babel values of this field in the order of
this Site's :attr:`Site.languages` attribute.
"""
return [str(getattr(obj, name + li.suffix)) for li in self.languages]
#~ l = [ getattr(obj,name) ]
#~ for lang in self.BABEL_LANGS:
#~ l.append(getattr(obj,name+'_'+lang))
#~ return l
def babelitem(self, *args, **values):
"""
Given a dictionary with babel values, return the
value corresponding to the current language.
This is available in templates as a function `tr`.
>>> kw = dict(de="Hallo", en="Hello", fr="Salut")
>>> from lino.core.site import TestSite as Site
>>> from django.utils import translation
A Site with default language "de":
>>> site = Site(languages="de en")
>>> tr = site.babelitem
>>> with translation.override('de'):
... print(tr(**kw))
Hallo
>>> with translation.override('en'):
... print(tr(**kw))
Hello
If the current language is not found in the specified `values`,
then it returns the site's default language:
>>> with translation.override('jp'):
... print(tr(en="Hello", de="Hallo", fr="Salut"))
Hallo
Testing detail: default language should be "de" in our example, but
we are playing here with more than one Site instance while Django
knows only one "default language" which is the one specified in
`lino.projects.docs.settings`.
Another way is to specify an explicit default value using a
positional argument. In that case the language's default language
doesn'n matter:
>>> with translation.override('jp'):
... print(tr("Tere", de="Hallo", fr="Salut"))
Tere
>>> with translation.override('de'):
... print(tr("Tere", de="Hallo", fr="Salut"))
Hallo
You may not specify more than one default value:
>>> tr("Hello", "Hallo")
Traceback (most recent call last):
...
ValueError: ('Hello', 'Hallo') is more than 1 default value.
"""
if len(args) == 0:
info = self.language_dict.get(
get_language(), self.DEFAULT_LANGUAGE)
default_value = None
if info == self.DEFAULT_LANGUAGE:
return values.get(info.name)
x = values.get(info.name, None)
if x is None:
return values.get(self.DEFAULT_LANGUAGE.name)
return x
elif len(args) == 1:
info = self.language_dict.get(get_language(), None)
if info is None:
return args[0]
default_value = args[0]
return values.get(info.name, default_value)
args = tuple_py2(args)
# print(type(args))
raise ValueError("%(values)s is more than 1 default value." %
dict(values=args))
# babel_get(v) = babelitem(**v)
def babeldict_getitem(self, d, k):
v = d.get(k, None)
if v is not None:
assert type(v) is dict
return self.babelitem(**v)
def babelattr(self, obj, attrname, default=NOT_PROVIDED, language=None):
"""
Return the value of the specified babel field `attrname` of `obj`
in the current language.
This is to be used in multilingual document templates. For
example in a document template of a Contract you may use the
following expression::
babelattr(self.type, 'name')
This will return the correct value for the current language.
Examples:
>>> from __future__ import unicode_literals
>>> from django.utils import translation
>>> from lino.core.site import TestSite as Site
>>> from atelier.utils import AttrDict
>>> def testit(site_languages):
... site = Site(languages=site_languages)
... obj = AttrDict(site.babelkw(
... 'name', de="Hallo", en="Hello", fr="Salut"))
... return site, obj
>>> site,obj = testit('de en')
>>> with translation.override('de'):
... print(site.babelattr(obj,'name'))
Hallo
>>> with translation.override('en'):
... print(site.babelattr(obj,'name'))
Hello
If the object has no translation for a given language, return
the site's default language. Two possible cases:
The language exists on the site, but the object has no
translation for it:
>>> site,obj = testit('en es')
>>> with translation.override('es'):
... print(site.babelattr(obj, 'name'))
Hello
Or a language has been activated which doesn't exist on the site:
>>> with translation.override('fr'):
... print(site.babelattr(obj, 'name'))
Hello
"""
if language is None:
language = get_language()
info = self.language_dict.get(language, self.DEFAULT_LANGUAGE)
if info.index != 0:
v = getattr(obj, attrname + info.suffix, None)
if v:
return v
if default is NOT_PROVIDED:
return getattr(obj, attrname)
else:
return getattr(obj, attrname, default)
#~ if lang is not None and lang != self.DEFAULT_LANGUAGE:
#~ v = getattr(obj,attrname+"_"+lang,None)
#~ if v:
#~ return v
#~ return getattr(obj,attrname,*args)
def diagnostic_report_rst(self, *args):
"""Returns a string with a diagnostic report about this
site. :manage:`diag` is a command-line shortcut to this.
"""
s = ''
s += rstgen.header(1, "Plugins")
for n, kp in enumerate(self.plugins.items()):
s += "%d. " % (n + 1)
s += "%s : %s\n" % kp
# s += "config_dirs: %s\n" % repr(self.confdirs.config_dirs)
s += "\n"
s += rstgen.header(1, "Config directories")
for n, cd in enumerate(self.confdirs.config_dirs):
s += "%d. " % (n + 1)
ln = relpath(cd.name)
if cd.writeable:
ln += " [writeable]"
s += ln + '\n'
# for arg in args:
# p = self.plugins[arg]
return s
# def get_db_overview_rst(self):
# from lino.utils.diag import analyzer
# analyzer.show_db_overview()
def override_defaults(self, **kwargs):
self.override_settings(**kwargs)
self.install_settings()
def is_imported_partner(self, obj):
"""
Return whether the specified
:class:`Partner <ml.contacts.Partner>` instance
`obj` is to be considered as imported from some legacy database.
"""
#~ return obj.id is not None and (obj.id < 200000 or obj.id > 299999)
return False
#~ return obj.id is not None and (obj.id > 10 and obj.id < 21)
def site_header(self):
"""Used in footnote or header of certain printed documents.
The convention is to call it as follows from an appy.pod template
(use the `html` function, not `xhtml`)
::
do text
from html(settings.SITE.site_header())
Note that this is expected to return a unicode string possibly
containing valid HTML (not XHTML) tags for formatting.
"""
if self.is_installed('contacts'):
if self.site_config.site_company:
return self.site_config.site_company.get_address('<br/>')
#~ s = unicode(self.site_config.site_company) + " / " + s
#~ return ''
# def setup_main_menu(self):
# """
# To be implemented by applications.
# """
# pass
def get_dashboard_items(self, user):
"""Expected to yield a sequence of items to be rendered on the
dashboard (:xfile:`admin_main.html`).
The default implementation calls :meth:`get_dashboard_items
<lino.core.plugin.Plugin.get_dashboard_items>` on every
installed plugin and yields all items.
The items will be rendered in that order, except if
:mod:`lino.modlib.dashboard` is installed to enable per-user
customized dashboard.
"""
if user:
for p in self.installed_plugins:
for i in p.get_dashboard_items(user):
yield i
@property
def site_config(self):
"""
This property holds a cached version of the one and only
:class:`SiteConfig <lino.modlib.system.models.SiteConfig>` row
that holds site-wide database-stored and web-editable Site
configuration parameters.
If no instance exists (which happens in a virgin database), we
create it using default values from
:attr:`site_config_defaults`.
This is always `None` when :mod:`lino.modlib.system` is not
installed.
"""
if 'system' not in self.models:
return None
if not self._startup_done:
return None
if self._site_config is None:
#~ raise Exception(20130301)
#~ print '20130320 create _site_config'
#~ from lino.core.utils import resolve_model
from lino.core.utils import obj2str
SiteConfig = self.models.system.SiteConfig
#~ from django.db.utils import DatabaseError
try:
self._site_config = SiteConfig.real_objects.get(
id=self.config_id)
# print("20180502 loaded SiteConfig {}",
# obj2str(self._site_config, True))
#~ except (SiteConfig.DoesNotExist,DatabaseError):
except SiteConfig.DoesNotExist:
#~ except Exception,e:
kw = dict(id=self.config_id)
#~ kw.update(settings.SITE.site_config_defaults)
kw.update(self.site_config_defaults)
self._site_config = SiteConfig(**kw)
# print("20180502 Created SiteConfig {}".format(
# obj2str(self._site_config, True)))
# 20120725
# polls_tutorial menu selection `Config --> Site Parameters`
# said "SiteConfig 1 does not exist"
# cannot save the instance here because the db table possibly doesn't yet exit.
#~ self._site_config.save()
return self._site_config
#~ site_config = property(get_site_config)
#~ def shutdown(self):
#~ self.clear_site_config()
#~ return super(Site,self).shutdown()
def clear_site_config(self):
"""
Clear the cached SiteConfig instance.
This is needed e.g. when the test runner has created a new
test database.
"""
from lino.core.utils import obj2str
# print("20180502 clear_site_config {}".format(
# obj2str(self._site_config, True)))
self._site_config = None
def get_quicklinks(self, user):
from lino.core import menus
m = menus.Toolbar(user.user_type, 'quicklinks')
self.setup_quicklinks(user, m)
return m
def setup_quicklinks(self, user, m):
"""Override this in application-specific (or even local)
:xfile:`settings.py` files to define a series of *quick links*
to appear below the main menu bar.
"""
self.on_each_app('setup_quicklinks', user, m)
def get_site_menu(self, ui, user_type):
"""
Return this site's main menu for the given UserType.
Must be a :class:`lino.core.menus.Toolbar` instance.
Applications usually should not need to override this.
"""
from lino.core import menus
main = menus.Toolbar(user_type, 'main')
self.setup_menu(user_type, main)
main.compress()
return main
def setup_menu(self, user_type, main):
"""Set up the application's menu structure.
The default implementation uses a system of *predefined
top-level items* that are filled by the different installed
plugins.
- `setup_master_menu`
- `setup_main_menu`
- `setup_reports_menu`
- `setup_config_menu`
- `setup_explorer_menu`
- `setup_site_menu`
These predefined top-level items ("Master", "Main", "Reports",
"Configuration"... are themselves configurable in
:attr:`top_level_menus`)
"""
from django.apps import apps
apps = [a.models_module for a in apps.get_app_configs()]
for k, label in self.top_level_menus:
methname = "setup_{0}_menu".format(k)
for mod in apps:
if hasattr(mod, methname):
msg = "{0} still has a function {1}(). \
Please convert to Plugin method".format(mod, methname)
raise ChangedAPI(msg)
if label is None:
menu = main
else:
menu = main.add_menu(k, label)
for p in self.installed_plugins:
meth = getattr(p, methname, None)
if meth is not None:
meth(self, user_type, menu)
def get_middleware_classes(self):
"""Yields the strings to be stored in
the :setting:`MIDDLEWARE_CLASSES` setting.
In case you don't want to use this method for defining
:setting:`MIDDLEWARE_CLASSES`, you can simply set
:setting:`MIDDLEWARE_CLASSES` in your :xfile:`settings.py`
after the :class:`Site` has been instantiated.
`Django and standard HTTP authentication
<http://stackoverflow.com/questions/152248/can-i-use-http-basic-authentication-with-django>`_
"""
yield 'django.middleware.common.CommonMiddleware'
if self.languages and len(self.languages) > 1:
yield 'django.middleware.locale.LocaleMiddleware'
if self.user_model:
yield 'django.contrib.sessions.middleware.SessionMiddleware'
# yield 'django.contrib.auth.middleware.AuthenticationMiddleware'
yield 'lino.core.auth.middleware.AuthenticationMiddleware'
yield 'lino.core.auth.middleware.WithUserMiddleware'
yield 'lino.core.auth.middleware.DeviceTypeMiddleware'
else:
yield 'lino.core.auth.middleware.NoUserMiddleware'
if self.get_auth_method() == 'remote':
# yield 'django.contrib.auth.middleware.RemoteUserMiddleware'
yield 'lino.core.auth.middleware.RemoteUserMiddleware'
if self.use_ipdict:
yield 'lino.modlib.ipdict.middleware.Middleware'
if self.social_auth_backends:
yield 'social_django.middleware.SocialAuthExceptionMiddleware'
if True:
yield 'lino.utils.ajax.AjaxExceptionResponse'
if self.use_security_features:
yield 'django.middleware.security.SecurityMiddleware'
yield 'django.middleware.clickjacking.XFrameOptionsMiddleware'
# yield 'django.middleware.csrf.CsrfViewMiddleware'
if False:
#~ yield 'lino.utils.sqllog.ShortSQLLogToConsoleMiddleware'
yield 'lino.utils.sqllog.SQLLogToConsoleMiddleware'
#~ yield 'lino.utils.sqllog.SQLLogMiddleware'
# def get_main_action(self, user_type):
# """No longer used.
# Return the action to show as top-level "index.html".
# The default implementation returns `None`, which means
# that Lino will call :meth:`get_main_html`.
# """
# return None
def __deepcopy__(self):
raise Exception("Who is copying me?!")
def __copy__(self):
raise Exception("Who is copying me?!")
def get_main_html(self, request, **context):
"""Return a chunk of html to be displayed in the main area of the
admin index. This is being called only if
:meth:`get_main_action` returns `None`. The default
implementation renders the :xfile:`admin_main.html` template.
"""
return self.plugins.jinja.render_from_request(
request, 'admin_main.html', **context)
def get_welcome_messages(self, ar):
"""
Yields a list of "welcome messages" (see
:meth:`lino.core.actors.Actor.get_welcome_messages`) of all
actors. This is being called from :xfile:`admin_main.html`.
"""
for h in self._welcome_handlers:
for msg in h(ar):
yield msg
# for a in self._welcome_actors:
# for msg in a.get_welcome_messages(ar):
# yield msg
def add_welcome_handler(self, func, actor=None, msg=None):
"""Add the given callable as a "welcome handler". Lino will call
every welcome handler for every incoming request, passing them
a :class:`BaseRequest <lino.core.requests.BaseRequest>`
instance representing this request as positional argument.
The callable is expected to yield a series of messages
(usually either 0 or 1). Each message must be either a string
or a :class:`E.span <etgen.html.E>` element.
"""
# print(
# "20161219 add_welcome_handler {} {} {}".format(
# actor, msg, func))
self._welcome_handlers.append(func)
def get_installed_apps(self):
"""Yield the list of apps to be installed on this site. Each item
must be either a string (unicode being converted to str) or a
*generator* which will be iterated recursively (again
expecting either strings or generators of strings).
Lino will call this method exactly once when the :class:`Site`
instantiates. The resulting list of names will then possibly
altered by the :meth:`get_apps_modifiers` method before being
assigned to the :setting:`INSTALLED_APPS` setting.
"""
if self.django_admin_prefix:
yield 'django.contrib.admin' # not tested
yield 'django.contrib.staticfiles'
yield 'lino.modlib.about'
if self.use_ipdict:
yield 'lino.modlib.ipdict'
if self.social_auth_backends:
yield 'social_django'
yield self.default_ui
if self.admin_ui is not None:
if self.admin_ui == self.default_ui:
raise Exception(
"admin_ui (if specified) must be different "
"from default_ui")
yield self.admin_ui
# if self.default_ui == "extjs":
# yield 'lino.modlib.extjs'
# yield 'lino.modlib.bootstrap3'
# elif self.default_ui == "bootstrap3":
# yield 'lino.modlib.bootstrap3'
# yield "lino.modlib.lino_startup"
site_prefix = '/'
"""The string to prefix to every URL of the Lino web interface.
This must *start and end with a *slash*. Default value is
``'/'``.
This must be set if your project is not being served at the "root"
URL of your server.
If this is different from the default value, Lino also sets
:setting:`SESSION_COOKIE_PATH`.
When this Site is running under something else than a development
server, this setting must correspond to your web server's
configuration. For example if you have::
WSGIScriptAlias /foo /home/luc/mypy/lino_sites/foo/wsgi.py
Then your :xfile:`settings.py` should specify::
site_prefix = '/foo/'
See also :ref:`mass_hosting`.
"""
def buildurl(self, *args, **kw):
url = self.site_prefix + ("/".join(args))
if len(kw):
url += "?" + urlencode(kw, True)
return url
def build_media_url(self, *args, **kw):
from django.conf import settings
url = settings.MEDIA_URL + ("/".join(args))
if len(kw):
url += "?" + urlencode(kw, True)
return url
def build_static_url(self, *args, **kw):
from django.conf import settings
url = settings.STATIC_URL + ("/".join(args))
if len(kw):
url += "?" + urlencode(kw, True)
return url
def send_email(self, subject, sender, body, recipients):
"""Send an email message with the specified arguments (the same
signature as `django.core.mail.EmailMessage`.
`recipients` is an iterator over a list of strings with email
addresses. Any address containing '@example.com' will be
removed. Does nothing if the resulting list of recipients is
empty.
If `body` starts with "<", then it is considered to be HTML.
"""
if '@example.com' in sender:
self.logger.debug(
"Ignoring email '%s' because sender is %s", subject, sender)
print(PRINT_EMAIL.format(
subject=subject, sender=sender, body=body,
recipients=u', '.join(recipients)).encode(
'ascii', 'replace').decode())
return
recipients = [a for a in recipients if '@example.com' not in a]
if not len(recipients):
self.logger.info(
"Ignoring email '%s' because there is no recipient", subject)
return
self.logger.info(
"Send email '%s' from %s to %s", subject, sender, recipients)
from django.core.mail import send_mail
kw = {}
if body.startswith('<'):
kw['html_message'] = body
body = html2text(body)
# self.logger.info("20161008b %r %r %r %r", subject, sender, recipients, body)
try:
send_mail(subject, body, sender, recipients, **kw)
except Exception as e:
self.logger.warning("send_mail() failed : %s", e)
# msg = EmailMessage(subject=subject,
# from_email=sender, body=body, to=recipients)
# from django.core.mail import EmailMessage
# msg = EmailMessage(subject=subject,
# from_email=sender, body=body, to=recipients)
# self.logger.info(
# "Send email '%s' from %s to %s", subject, sender, recipients)
# msg.send()
def welcome_html(self, ui=None):
"""
Return a HTML version of the "This is APPLICATION
version VERSION using ..." text. to be displayed in the
About dialog, in the plain html footer, and maybe at other
places.
"""
from django.utils.translation import ugettext as _
p = []
sep = ''
if self.verbose_name:
p.append(_("This is "))
if self.url:
p.append(
E.a(str(self.verbose_name), href=self.url, target='_blank'))
else:
p.append(E.b(str(self.verbose_name)))
if self.version:
p.append(' ')
p.append(self.version)
sep = _(' using ')
for name, version, url in self.get_used_libs(html=E):
p.append(sep)
p.append(E.a(name, href=url, target='_blank'))
p.append(' ')
p.append(version)
sep = ', '
return E.span(*p)
def login(self, username=None, **kw):
"""Open a session as the user with the given `username`.
For usage from a shell or a tested document. Does not require
any password because when somebody has command-line access we
trust that she has already authenticated.
It returns a
:class:`BaseRequest <lino.core.requests.BaseRequest>` object.
"""
from lino.core import requests
self.startup()
User = self.user_model
if User and username:
try:
kw.update(user=User.objects.get(username=username))
except User.DoesNotExist:
raise Exception("'{0}' : no such user".format(username))
# if not 'renderer' in kw:
# kw.update(renderer=self.ui.text_renderer)
# import lino.core.urls # hack: trigger ui instantiation
return requests.BaseRequest(**kw)
def get_letter_date_text(self, today=None):
"""
Returns a string like "Eupen, den 26. August 2013".
"""
sc = self.site_config.site_company
if today is None:
today = self.today()
from lino.utils.format_date import fdl
if sc and sc.city:
return _("%(place)s, %(date)s") % dict(
place=str(sc.city.name), date=fdl(today))
return fdl(today)
def decfmt(self, v, places=2, **kw):
"""
Format a Decimal value using :func:`lino.utils.moneyfmt`, but
applying the site settings
:attr:`lino.Lino.decimal_group_separator` and
:attr:`lino.Lino.decimal_separator`.
>>> from lino.core.site import TestSite as Site
>>> from decimal import Decimal
>>> self = Site()
>>> print(self.decimal_group_separator)
\xa0
>>> print(self.decimal_separator)
,
>>> x = Decimal(1234)
>>> print(self.decfmt(x))
1\xa0234,00
>>> print(self.decfmt(x, sep="."))
1.234,00
>>> self.decimal_group_separator = '.'
>>> print(self.decfmt(x))
1.234,00
>>> self.decimal_group_separator = "oops"
>>> print(self.decfmt(x))
1oops234,00
"""
kw.setdefault('sep', self.decimal_group_separator)
kw.setdefault('dp', self.decimal_separator)
from lino.utils import moneyfmt
return moneyfmt(v, places=places, **kw)
LOOKUP_OP = '__iexact'
def lookup_filter(self, fieldname, value, **kw):
"""
Return a `models.Q` to be used if you want to search for a given
string in any of the languages for the given babel field.
"""
from django.db.models import Q
kw[fieldname + self.LOOKUP_OP] = value
#~ kw[fieldname] = value
flt = Q(**kw)
del kw[fieldname + self.LOOKUP_OP]
for lng in self.BABEL_LANGS:
kw[fieldname + lng.suffix + self.LOOKUP_OP] = value
flt = flt | Q(**kw)
del kw[fieldname + lng.suffix + self.LOOKUP_OP]
return flt
# def relpath(self, p):
# """Used by :class:`lino.mixins.printable.EditTemplate` in order to
# write a testable message...
# """
# if p.startswith(self.project_dir):
# p = "$(PRJ)" + p[len(self.project_dir):]
# return p
class TestSite(Site):
"""Used to simplify doctest strings because it inserts default values
for the two first arguments that are mandatory but not used in our
examples.
Example::
>> from lino.core.site import Site
>> Site(globals(), ...)
>> from lino.core.site import TestSite as Site
>> Site(...)
"""
def __init__(self, *args, **kwargs):
# kwargs.update(no_local=True)
g = dict(__file__=__file__)
g.update(SECRET_KEY="20227") # see :djangoticket:`20227`
super(TestSite, self).__init__(g, *args, **kwargs)
# 20140913 Hack needed for doctests in :mod:`ad`.
from django.utils import translation
translation._default = None
def _test():
# we want to raise an Exception if there is a failure, but
# doctest's raise_on_error=True option is not useful because it
# does not report the traceback if some test fails.
import doctest
res = doctest.testmod()
if res.failed > 0:
raise Exception("{0} (see earlier output)".format(res))
if __name__ == "__main__":
_test()
|
[
"lino.core.requests.BaseRequest",
"django.core.mail.send_mail",
"lino.core.merge.MergeAction",
"logging.getLogger",
"unipath.Path",
"etgen.html.E.a",
"datetime.time",
"etgen.html.E.span",
"six.moves.urllib.parse.urlencode",
"os.path.join",
"doctest.testmod",
"lino.core.menus.Toolbar",
"os.path.dirname",
"os.path.exists",
"django.utils.translation.override",
"lino.core.utils.resolve_model",
"atelier.utils.date_offset",
"lino.utils.dpy.install_migrations",
"django.utils.translation.ugettext",
"inspect.isbuiltin",
"django.utils.translation.get_language",
"datetime.datetime.now",
"django.get_version",
"importlib.import_module",
"atelier.utils.AttrDict",
"django.apps.apps.get_app_configs",
"lino.utils.moneyfmt",
"lino.api.dd.inject_field",
"datetime.date.today",
"lino.utils.format_date.fdl",
"datetime.datetime",
"six.text_type",
"inspect.getfile",
"atelier.utils.tuple_py2",
"builtins.str",
"social_core.backends.utils.load_backends",
"atelier.rstgen.header",
"lino.core.kernel.site_startup",
"lino.core.utils.get_models",
"html2text.HTML2Text",
"lino.core.exceptions.ChangedAPI",
"os.makedirs",
"os.path.isdir",
"django.db.models.Q",
"lino.core.utils.simplify_name",
"os.environ.get",
"collections.namedtuple",
"os.path.relpath",
"warnings.warn",
"lino.assert_django_code",
"lino.utils.config.ConfigDirCache"
] |
[((1576, 1662), 'collections.namedtuple', 'collections.namedtuple', (['"""LanguageInfo"""', "('django_code', 'name', 'index', 'suffix')"], {}), "('LanguageInfo', ('django_code', 'name', 'index',\n 'suffix'))\n", (1598, 1662), False, 'import collections\n'), ((1383, 1394), 'html2text.HTML2Text', 'HTML2Text', ([], {}), '()\n', (1392, 1394), False, 'from html2text import HTML2Text\n'), ((131089, 131106), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (131104, 131106), False, 'import doctest\n'), ((40264, 40274), 'atelier.utils.AttrDict', 'AttrDict', ([], {}), '()\n', (40272, 40274), False, 'from atelier.utils import AttrDict, date_offset, tuple_py2\n'), ((40297, 40307), 'atelier.utils.AttrDict', 'AttrDict', ([], {}), '()\n', (40305, 40307), False, 'from atelier.utils import AttrDict, date_offset, tuple_py2\n'), ((41201, 41221), 'lino.utils.config.ConfigDirCache', 'ConfigDirCache', (['self'], {}), '(self)\n', (41215, 41221), False, 'from lino.utils.config import ConfigDirCache\n'), ((43154, 43193), 'os.environ.get', 'os.environ.get', (['"""LINO_CACHE_ROOT"""', 'None'], {}), "('LINO_CACHE_ROOT', None)\n", (43168, 43193), False, 'import os\n'), ((43653, 43676), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (43674, 43676), False, 'import datetime\n'), ((48546, 48592), 'os.environ.get', 'os.environ.get', (['"""LINO_SQL_LOGLEVEL"""', '"""WARNING"""'], {}), "('LINO_SQL_LOGLEVEL', 'WARNING')\n", (48560, 48592), False, 'import os\n'), ((50286, 50326), 'os.environ.get', 'os.environ.get', (['"""LINO_SITE_MODULE"""', 'None'], {}), "('LINO_SITE_MODULE', None)\n", (50300, 50326), False, 'import os\n'), ((75058, 75076), 'lino.core.kernel.site_startup', 'site_startup', (['self'], {}), '(self)\n', (75070, 75076), False, 'from lino.core.kernel import site_startup\n'), ((82148, 82182), 'atelier.utils.date_offset', 'date_offset', (['base', '*args'], {}), '(base, *args, **kwargs)\n', (82159, 82182), False, 'from atelier.utils import AttrDict, date_offset, tuple_py2\n'), ((82918, 82952), 'atelier.utils.date_offset', 'date_offset', (['base', '*args'], {}), '(base, *args, **kwargs)\n', (82929, 82952), False, 'from atelier.utils import AttrDict, date_offset, tuple_py2\n'), ((83962, 83993), 'lino.utils.dpy.install_migrations', 'install_migrations', (['self', '*args'], {}), '(self, *args)\n', (83980, 83993), False, 'from lino.utils.dpy import install_migrations\n'), ((84642, 84661), 'datetime.time', 'datetime.time', (['*hms'], {}), '(*hms)\n', (84655, 84661), False, 'import datetime\n'), ((85179, 85210), 'datetime.datetime', 'datetime.datetime', (['*(ymd + hms)'], {}), '(*(ymd + hms))\n', (85196, 85210), False, 'import datetime\n'), ((86076, 86088), 'lino.core.utils.get_models', 'get_models', ([], {}), '()\n', (86086, 86088), False, 'from lino.core.utils import simplify_name, get_models\n'), ((106139, 106154), 'atelier.utils.tuple_py2', 'tuple_py2', (['args'], {}), '(args)\n', (106148, 106154), False, 'from atelier.utils import AttrDict, date_offset, tuple_py2\n'), ((109103, 109130), 'atelier.rstgen.header', 'rstgen.header', (['(1)', '"""Plugins"""'], {}), "(1, 'Plugins')\n", (109116, 109130), False, 'from atelier import rstgen\n'), ((109353, 109391), 'atelier.rstgen.header', 'rstgen.header', (['(1)', '"""Config directories"""'], {}), "(1, 'Config directories')\n", (109366, 109391), False, 'from atelier import rstgen\n'), ((114497, 114540), 'lino.core.menus.Toolbar', 'menus.Toolbar', (['user.user_type', '"""quicklinks"""'], {}), "(user.user_type, 'quicklinks')\n", (114510, 114540), False, 'from lino.core import menus\n'), ((115190, 115222), 'lino.core.menus.Toolbar', 'menus.Toolbar', (['user_type', '"""main"""'], {}), "(user_type, 'main')\n", (115203, 115222), False, 'from lino.core import menus\n'), ((126751, 126761), 'etgen.html.E.span', 'E.span', (['*p'], {}), '(*p)\n', (126757, 126761), False, 'from etgen.html import E\n'), ((127668, 127694), 'lino.core.requests.BaseRequest', 'requests.BaseRequest', ([], {}), '(**kw)\n', (127688, 127694), False, 'from lino.core import requests\n'), ((128129, 128139), 'lino.utils.format_date.fdl', 'fdl', (['today'], {}), '(today)\n', (128132, 128139), False, 'from lino.utils.format_date import fdl\n'), ((129170, 129202), 'lino.utils.moneyfmt', 'moneyfmt', (['v'], {'places': 'places'}), '(v, places=places, **kw)\n', (129178, 129202), False, 'from lino.utils import moneyfmt\n'), ((129582, 129589), 'django.db.models.Q', 'Q', ([], {}), '(**kw)\n', (129583, 129589), False, 'from django.db.models import Q\n'), ((10904, 10915), 'django.utils.translation.ugettext', '_', (['"""Master"""'], {}), "('Master')\n", (10905, 10915), True, 'from django.utils.translation import ugettext as _\n'), ((10962, 10974), 'django.utils.translation.ugettext', '_', (['"""Reports"""'], {}), "('Reports')\n", (10963, 10974), True, 'from django.utils.translation import ugettext as _\n'), ((10996, 11010), 'django.utils.translation.ugettext', '_', (['"""Configure"""'], {}), "('Configure')\n", (10997, 11010), True, 'from django.utils.translation import ugettext as _\n'), ((11034, 11047), 'django.utils.translation.ugettext', '_', (['"""Explorer"""'], {}), "('Explorer')\n", (11035, 11047), True, 'from django.utils.translation import ugettext as _\n'), ((11067, 11076), 'django.utils.translation.ugettext', '_', (['"""Site"""'], {}), "('Site')\n", (11068, 11076), True, 'from django.utils.translation import ugettext as _\n'), ((39511, 39565), 'lino.core.exceptions.ChangedAPI', 'ChangedAPI', (['"""setup_choicelists is no longer supported"""'], {}), "('setup_choicelists is no longer supported')\n", (39521, 39565), False, 'from lino.core.exceptions import ChangedAPI\n'), ((39629, 39681), 'lino.core.exceptions.ChangedAPI', 'ChangedAPI', (['"""setup_workflows is no longer supported"""'], {}), "('setup_workflows is no longer supported')\n", (39639, 39681), False, 'from lino.core.exceptions import ChangedAPI\n'), ((40113, 40169), 'lino.core.exceptions.ChangedAPI', 'ChangedAPI', (['"""The no_local argument is no longer needed."""'], {}), "('The no_local argument is no longer needed.')\n", (40123, 40169), False, 'from lino.core.exceptions import ChangedAPI\n'), ((46478, 46509), 'os.environ.get', 'os.environ.get', (['"""LINO_LOGLEVEL"""'], {}), "('LINO_LOGLEVEL')\n", (46492, 46509), False, 'import os\n'), ((46541, 46577), 'os.environ.get', 'os.environ.get', (['"""LINO_FILE_LOGLEVEL"""'], {}), "('LINO_FILE_LOGLEVEL')\n", (46555, 46577), False, 'import os\n'), ((50369, 50395), 'importlib.import_module', 'import_module', (['site_module'], {}), '(site_module)\n', (50382, 50395), False, 'from importlib import import_module\n'), ((51534, 51591), 'lino.core.exceptions.ChangedAPI', 'ChangedAPI', (['"""Replace hidden_apps by get_apps_modifiers()"""'], {}), "('Replace hidden_apps by get_apps_modifiers()')\n", (51544, 51591), False, 'from lino.core.exceptions import ChangedAPI\n'), ((52446, 52469), 'six.text_type', 'six.text_type', (['app_name'], {}), '(app_name)\n', (52459, 52469), False, 'import six\n'), ((52554, 52577), 'importlib.import_module', 'import_module', (['app_name'], {}), '(app_name)\n', (52567, 52577), False, 'from importlib import import_module\n'), ((54595, 54606), 'builtins.str', 'str', (['"""lino"""'], {}), "('lino')\n", (54598, 54606), False, 'from builtins import str\n'), ((55126, 55156), 'builtins.str', 'str', (['"""django.contrib.sessions"""'], {}), "('django.contrib.sessions')\n", (55129, 55156), False, 'from builtins import str\n'), ((55716, 55770), 'lino.core.exceptions.ChangedAPI', 'ChangedAPI', (['"""override_modlib_models no longer allowed"""'], {}), "('override_modlib_models no longer allowed')\n", (55726, 55770), False, 'from lino.core.exceptions import ChangedAPI\n'), ((59292, 59302), 'os.path.exists', 'exists', (['fn'], {}), '(fn)\n', (59298, 59302), False, 'from os.path import normpath, dirname, join, isdir, relpath, exists\n'), ((61263, 61279), 'lino.core.utils.simplify_name', 'simplify_name', (['k'], {}), '(k)\n', (61276, 61279), False, 'from lino.core.utils import simplify_name, get_models\n'), ((63180, 63219), 'os.environ.get', 'os.environ.get', (['"""LINO_CACHE_ROOT"""', 'None'], {}), "('LINO_CACHE_ROOT', None)\n", (63194, 63219), False, 'import os\n'), ((75870, 75897), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (75887, 75897), False, 'import logging\n'), ((76306, 76341), 'os.path.join', 'join', (['self.project_dir', 'subdir_name'], {}), '(self.project_dir, subdir_name)\n', (76310, 76341), False, 'from os.path import normpath, dirname, join, isdir, relpath, exists\n'), ((76357, 76367), 'os.path.isdir', 'isdir', (['pth'], {}), '(pth)\n', (76362, 76367), False, 'from os.path import normpath, dirname, join, isdir, relpath, exists\n'), ((78662, 78751), 'warnings.warn', 'warnings.warn', (['"""is_installed_model_spec is deprecated."""'], {'category': 'DeprecationWarning'}), "('is_installed_model_spec is deprecated.', category=\n DeprecationWarning)\n", (78675, 78751), False, 'import warnings\n'), ((87029, 87072), 'lino.api.dd.inject_field', 'dd.inject_field', (['self.user_model', 'name', 'fld'], {}), '(self.user_model, name, fld)\n', (87044, 87072), False, 'from lino.api import dd\n'), ((92561, 92592), 'lino.assert_django_code', 'assert_django_code', (['django_code'], {}), '(django_code)\n', (92579, 92592), False, 'from lino import assert_django_code, DJANGO_DEFAULT_LANGUAGE\n'), ((108321, 108335), 'django.utils.translation.get_language', 'get_language', ([], {}), '()\n', (108333, 108335), False, 'from django.utils.translation import get_language\n'), ((109502, 109518), 'os.path.relpath', 'relpath', (['cd.name'], {}), '(cd.name)\n', (109509, 109518), False, 'from os.path import normpath, dirname, join, isdir, relpath, exists\n'), ((125179, 125229), 'django.core.mail.send_mail', 'send_mail', (['subject', 'body', 'sender', 'recipients'], {}), '(subject, body, sender, recipients, **kw)\n', (125188, 125229), False, 'from django.core.mail import send_mail\n'), ((126496, 126508), 'django.utils.translation.ugettext', '_', (['""" using """'], {}), "(' using ')\n", (126497, 126508), True, 'from django.utils.translation import ugettext as _\n'), ((41505, 41520), 'lino.core.exceptions.ChangedAPI', 'ChangedAPI', (['msg'], {}), '(msg)\n', (41515, 41520), False, 'from lino.core.exceptions import ChangedAPI\n'), ((58520, 58537), 'importlib.import_module', 'import_module', (['mn'], {}), '(mn)\n', (58533, 58537), False, 'from importlib import import_module\n'), ((59219, 59249), 'os.path.dirname', 'dirname', (['p.app_module.__file__'], {}), '(p.app_module.__file__)\n', (59226, 59249), False, 'from os.path import normpath, dirname, join, isdir, relpath, exists\n'), ((59387, 59404), 'importlib.import_module', 'import_module', (['mn'], {}), '(mn)\n', (59400, 59404), False, 'from importlib import import_module\n'), ((62840, 62879), 'os.path.join', 'join', (['self.cache_dir', '"""media"""', '"""webdav"""'], {}), "(self.cache_dir, 'media', 'webdav')\n", (62844, 62879), False, 'from os.path import normpath, dirname, join, isdir, relpath, exists\n'), ((76652, 76662), 'os.path.isdir', 'isdir', (['pth'], {}), '(pth)\n', (76657, 76662), False, 'from os.path import normpath, dirname, join, isdir, relpath, exists\n'), ((76887, 76901), 'os.path.isdir', 'isdir', (['dirname'], {}), '(dirname)\n', (76892, 76901), False, 'from os.path import normpath, dirname, join, isdir, relpath, exists\n'), ((76958, 76978), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (76969, 76978), False, 'import os\n'), ((80052, 80079), 'builtins.str', 'str', (['self.installed_plugins'], {}), '(self.installed_plugins)\n', (80055, 80079), False, 'from builtins import str\n'), ((80112, 80143), 'lino.core.utils.resolve_model', 'resolve_model', (['spec'], {'strict': 'msg'}), '(spec, strict=msg)\n', (80125, 80143), False, 'from lino.core.utils import resolve_model\n'), ((80574, 80596), 'django.apps.apps.get_app_configs', 'apps.get_app_configs', ([], {}), '()\n', (80594, 80596), False, 'from django.apps import apps\n'), ((82804, 82825), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (82823, 82825), False, 'import datetime\n'), ((82881, 82902), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (82900, 82902), False, 'import datetime\n'), ((87697, 87717), 'django.get_version', 'django.get_version', ([], {}), '()\n', (87715, 87717), False, 'import django\n'), ((90508, 90548), 'social_core.backends.utils.load_backends', 'load_backends', (['self.social_auth_backends'], {}), '(self.social_auth_backends)\n', (90521, 90548), False, 'from social_core.backends.utils import load_backends\n'), ((90590, 90632), 'etgen.html.E.a', 'E.a', (['b.name'], {'href': "('/oauth/login/' + b.name)"}), "(b.name, href='/oauth/login/' + b.name)\n", (90593, 90632), False, 'from etgen.html import E\n'), ((92948, 92964), 'builtins.str', 'str', (['django_code'], {}), '(django_code)\n', (92951, 92964), False, 'from builtins import str\n'), ((92966, 92975), 'builtins.str', 'str', (['name'], {}), '(name)\n', (92969, 92975), False, 'from builtins import str\n'), ((92980, 92991), 'builtins.str', 'str', (['suffix'], {}), '(suffix)\n', (92983, 92991), False, 'from builtins import str\n'), ((95142, 95154), 'django.utils.translation.ugettext', '_', (['"""English"""'], {}), "('English')\n", (95143, 95154), True, 'from django.utils.translation import ugettext as _\n'), ((96071, 96078), 'django.utils.translation.ugettext', '_', (['text'], {}), '(text)\n', (96072, 96078), True, 'from django.utils.translation import ugettext as _\n'), ((100112, 100140), 'django.utils.translation.override', 'translation.override', (['simple'], {}), '(simple)\n', (100132, 100140), False, 'from django.utils import translation\n'), ((100183, 100201), 'six.text_type', 'six.text_type', (['txt'], {}), '(txt)\n', (100196, 100201), False, 'import six\n'), ((101725, 101741), 'six.text_type', 'six.text_type', (['v'], {}), '(v)\n', (101738, 101741), False, 'import six\n'), ((105563, 105577), 'django.utils.translation.get_language', 'get_language', ([], {}), '()\n', (105575, 105577), False, 'from django.utils.translation import get_language\n'), ((115988, 116010), 'django.apps.apps.get_app_configs', 'apps.get_app_configs', ([], {}), '()\n', (116008, 116010), False, 'from django.apps import apps\n'), ((123226, 123245), 'six.moves.urllib.parse.urlencode', 'urlencode', (['kw', '(True)'], {}), '(kw, True)\n', (123235, 123245), False, 'from six.moves.urllib.parse import urlencode\n'), ((123448, 123467), 'six.moves.urllib.parse.urlencode', 'urlencode', (['kw', '(True)'], {}), '(kw, True)\n', (123457, 123467), False, 'from six.moves.urllib.parse import urlencode\n'), ((123672, 123691), 'six.moves.urllib.parse.urlencode', 'urlencode', (['kw', '(True)'], {}), '(kw, True)\n', (123681, 123691), False, 'from six.moves.urllib.parse import urlencode\n'), ((126161, 126174), 'django.utils.translation.ugettext', '_', (['"""This is """'], {}), "('This is ')\n", (126162, 126174), True, 'from django.utils.translation import ugettext as _\n'), ((126619, 126655), 'etgen.html.E.a', 'E.a', (['name'], {'href': 'url', 'target': '"""_blank"""'}), "(name, href=url, target='_blank')\n", (126622, 126655), False, 'from etgen.html import E\n'), ((128023, 128047), 'django.utils.translation.ugettext', '_', (['"""%(place)s, %(date)s"""'], {}), "('%(place)s, %(date)s')\n", (128024, 128047), True, 'from django.utils.translation import ugettext as _\n'), ((129758, 129765), 'django.db.models.Q', 'Q', ([], {}), '(**kw)\n', (129759, 129765), False, 'from django.db.models import Q\n'), ((43234, 43250), 'unipath.Path', 'Path', (['cache_root'], {}), '(cache_root)\n', (43238, 43250), False, 'from unipath import Path\n'), ((43555, 43577), 'unipath.Path', 'Path', (['self.project_dir'], {}), '(self.project_dir)\n', (43559, 43577), False, 'from unipath import Path\n'), ((62948, 62977), 'os.path.join', 'join', (['self.cache_dir', '"""media"""'], {}), "(self.cache_dir, 'media')\n", (62952, 62977), False, 'from os.path import normpath, dirname, join, isdir, relpath, exists\n'), ((66925, 66951), 'os.path.join', 'join', (['p', '""".."""', '"""models.py"""'], {}), "(p, '..', 'models.py')\n", (66929, 66951), False, 'from os.path import normpath, dirname, join, isdir, relpath, exists\n'), ((76253, 76284), 'inspect.getfile', 'inspect.getfile', (['self.__class__'], {}), '(self.__class__)\n', (76268, 76284), False, 'import inspect\n'), ((76540, 76561), 'inspect.isbuiltin', 'inspect.isbuiltin', (['cl'], {}), '(cl)\n', (76557, 76561), False, 'import inspect\n'), ((98371, 98385), 'builtins.str', 'str', (['languages'], {}), '(languages)\n', (98374, 98385), False, 'from builtins import str\n'), ((105950, 105964), 'django.utils.translation.get_language', 'get_language', ([], {}), '()\n', (105962, 105964), False, 'from django.utils.translation import get_language\n'), ((116324, 116339), 'lino.core.exceptions.ChangedAPI', 'ChangedAPI', (['msg'], {}), '(msg)\n', (116334, 116339), False, 'from lino.core.exceptions import ChangedAPI\n'), ((76598, 76617), 'inspect.getfile', 'inspect.getfile', (['cl'], {}), '(cl)\n', (76613, 76617), False, 'import inspect\n'), ((81558, 81585), 'importlib.import_module', 'import_module', (['b.__module__'], {}), '(b.__module__)\n', (81571, 81585), False, 'from importlib import import_module\n'), ((86169, 86183), 'lino.core.merge.MergeAction', 'MergeAction', (['m'], {}), '(m)\n', (86180, 86183), False, 'from lino.core.merge import MergeAction\n'), ((92020, 92039), 'builtins.str', 'str', (['self.languages'], {}), '(self.languages)\n', (92023, 92039), False, 'from builtins import str\n'), ((126251, 126273), 'builtins.str', 'str', (['self.verbose_name'], {}), '(self.verbose_name)\n', (126254, 126273), False, 'from builtins import str\n'), ((126355, 126377), 'builtins.str', 'str', (['self.verbose_name'], {}), '(self.verbose_name)\n', (126358, 126377), False, 'from builtins import str\n'), ((128078, 128095), 'builtins.str', 'str', (['sc.city.name'], {}), '(sc.city.name)\n', (128081, 128095), False, 'from builtins import str\n'), ((128102, 128112), 'lino.utils.format_date.fdl', 'fdl', (['today'], {}), '(today)\n', (128105, 128112), False, 'from lino.utils.format_date import fdl\n'), ((42702, 42723), 'os.path.dirname', 'dirname', (['project_file'], {}), '(project_file)\n', (42709, 42723), False, 'from os.path import normpath, dirname, join, isdir, relpath, exists\n'), ((63324, 63340), 'unipath.Path', 'Path', (['cache_root'], {}), '(cache_root)\n', (63328, 63340), False, 'from unipath import Path\n'), ((98688, 98694), 'builtins.str', 'str', (['k'], {}), '(k)\n', (98691, 98694), False, 'from builtins import str\n')]
|
__author__ = '[<NAME>](https://github.com/adamsqi)'
__date__ = '2020.06.21'
"""
This is a script for auto generation of README.md content. The script parses all .py script files within the repository and creates a README.md file.
Inspired by: [<NAME>](https://github.com/bamos/python-scripts/blob/master/README.md)
"""
import ast
import os
import re
from typing import List, Set
UNPARSABLE_FILES = ['.git', '.gitignore', 'README.md', 'generate_readme.py', 'LICENSE']
README_TEMPLATE = """<h1 align="center">Python scripts</h1>
<div align="center">

[](https://opensource.org/licenses/MIT)
</div>
This is a collection of short Python scripts I use as utility tools or just for testing of various features.
{content}
"""
def main():
generator = ReadmeGenerator()
generator.generate()
class ReadmeGenerator():
def __init__(self):
pass
def generate(self):
content = self._prepare_content()
parent_dir = self._get_parent_dir_path()
with open(os.path.join(parent_dir, 'README.md'), mode='w') as f:
ready_content = README_TEMPLATE.format(content=content)
f.write(ready_content)
def _prepare_content(self) -> str:
scripts_finder = ValidScriptsFinder()
valid_script_names = scripts_finder.find()
content = self._get_all_content_from_scripts(script_names=valid_script_names)
return content
def _get_all_content_from_scripts(self, script_names: List[str]) -> str:
content = ''
script_names = sorted(script_names)
for name in script_names:
script_link = self._generate_script_link(script_name=name)
meta_text = self._parse_single_file(name=name)
content += '### ' + script_link + '\n\n' + meta_text + '\n\n\n'
return content
def _generate_script_link(self, script_name: str) -> str:
url_base = 'https://github.com/adamsqi/python-scripts/blob/master/scripts/'
url = url_base + script_name
return f'[{script_name}]({url})'
def _parse_single_file(self, name: str) -> str:
content = self._read_file(file_path=name)
meta_text = self._extract_doc_string(content)
return meta_text
def _read_file(self, file_path: str) -> str:
parent_dir = self._get_parent_dir_path()
with open(os.path.join(parent_dir, 'scripts', file_path), mode='r') as f:
return f.read()
def _get_parent_dir_path(self) -> str:
dir_path = os.path.dirname(os.getcwd())
return dir_path
def _extract_doc_string(self, content: str) -> str:
ast_module = ast.parse(content)
ast_f = ast.literal_eval
author, date, doc_string = [ast_f(m.value) for m in ast_module.body[0:3]]
return f"""
+ Author: {author}
+ Created at: {date}
#### Description: {doc_string}
"""
class ValidScriptsFinder():
def __init__(self):
pass
def find(self) -> List[str]:
script_names = self._get_valid_script_names_within_cwd()
return script_names
def _get_valid_script_names_within_cwd(self) -> List[str]:
all_file_names = self._get_all_files_within_parent_dir()
file_names = set(all_file_names) - set(UNPARSABLE_FILES)
valid_file_names = self._exclude_files_with_ignored_extensions(file_names)
return valid_file_names
def _get_all_files_within_parent_dir(self) -> List[str]:
files = [file for file in os.listdir(os.path.join(self._get_parent_dir_path(), 'scripts'))]
return files
def _get_parent_dir_path(self) -> str:
dir_path = os.path.dirname(os.getcwd())
return dir_path
def _exclude_files_with_ignored_extensions(self, file_names: Set[str]) -> List[str]:
ignored_extensions = self._read_git_ignore()
result = [name for name in file_names if not any(sub in name for sub in ignored_extensions)] # only files without ignored extensions
return result
def _read_git_ignore(self) -> List[str]:
parent_dir = self._get_parent_dir_path()
with open(os.path.join(parent_dir,'.gitignore'), mode='r') as f:
data = f.read()
data = data.split('\n')
data = [el.replace('*', '') for el in data]
return data
if __name__ == '__main__':
main()
|
[
"os.getcwd",
"ast.parse",
"os.path.join"
] |
[((2818, 2836), 'ast.parse', 'ast.parse', (['content'], {}), '(content)\n', (2827, 2836), False, 'import ast\n'), ((2691, 2702), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2700, 2702), False, 'import os\n'), ((3865, 3876), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3874, 3876), False, 'import os\n'), ((1142, 1179), 'os.path.join', 'os.path.join', (['parent_dir', '"""README.md"""'], {}), "(parent_dir, 'README.md')\n", (1154, 1179), False, 'import os\n'), ((2508, 2554), 'os.path.join', 'os.path.join', (['parent_dir', '"""scripts"""', 'file_path'], {}), "(parent_dir, 'scripts', file_path)\n", (2520, 2554), False, 'import os\n'), ((4338, 4376), 'os.path.join', 'os.path.join', (['parent_dir', '""".gitignore"""'], {}), "(parent_dir, '.gitignore')\n", (4350, 4376), False, 'import os\n')]
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : <EMAIL>
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : deep-learning-with-python-notebooks
@File : ch0602_recurrent_neural_network.py
@Version : v0.1
@Time : 2019-11-24 16:00
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《Python 深度学习,Francois Chollet》, Sec0602,P162
@Desc : 深度学习用于文本和序列,理解循环神经网络(并不适用于情感分析,建议看0603进一步理解RNN)
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import winsound
from keras.activations import relu, sigmoid
from keras.datasets import imdb
from keras.layers import Dense
from keras.layers import Embedding, LSTM, SimpleRNN
from keras.losses import binary_crossentropy
from keras.metrics import binary_accuracy
from keras.models import Sequential
from keras.optimizers import rmsprop
from keras.preprocessing.sequence import pad_sequences
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
from tools import plot_classes_results
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 3, suppress = True, threshold = np.inf, linewidth = 200)
# to make this notebook's output stable across runs
seed = 42
np.random.seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
# ----------------------------------------------------------------------
# Listing 6-21:简单 RNN 的 Numpy 实现
def simple_rnn_use_numpy():
timesteps = 100 # 输入序列的时间步数
input_features = 32 # 输入特征空间的维度
output_features = 64 # 输出特征空间的维度
# 输入数据:随机噪声,仅仅作为示例
inputs = np.random.random((timesteps, input_features))
state_t = np.zeros((output_features,)) # 初始状态:全零向量
# 创建随机的权重矩阵
W = np.random.random((output_features, input_features)) / 10
U = np.random.random((output_features, output_features)) / 10
b = np.random.random((output_features,)) / 10
successive_outputs = []
for input_t in inputs:
# 当前输出 = 当前输入 + 前一个输出
output_t = np.tanh(np.dot(W, input_t) + np.dot(U, state_t), +b)
successive_outputs.append(output_t) # 将输出保存到一个列表中
# 更新网络的状态,用于下一个时间步
state_t = output_t
pass
# 最终的输出是一个形状为(timesteps,output_features)的二维张量
# np.stack() 把数组组成的列表转换成一个二维数组
final_output_sequence = np.stack(successive_outputs, axis = 0)
return final_output_sequence
# ----------------------------------------------------------------------
# 简单 RNN 的 Keras 实现
def keras_simplernn():
model = Sequential(name = "完整的状态序列")
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences = True))
model.summary()
model = Sequential(name = "最后一个时间步的输出")
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32))
model.summary()
model = Sequential(name = "多个循环层的逐个堆叠")
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences = True))
model.add(SimpleRNN(32, return_sequences = True))
model.add(SimpleRNN(32, return_sequences = True))
model.add(SimpleRNN(32))
model.summary()
pass
# 使用 RNN 和 LSTM 模型应用于 IMDB 电影评论分类问题
max_features = 10000
max_len = 500
batch_size = 128
epochs = 10
# 数据集的详细说明参考 ch0304
print("Listing 6.22:加载数据集...")
(train_data, y_train), (test_data, y_test) = imdb.load_data(num_words = max_features)
x_train = pad_sequences(train_data, maxlen = max_len)
x_test = pad_sequences(test_data, maxlen = max_len)
def train_model(model, data, labels):
return model.fit(data, labels, epochs = epochs, batch_size = batch_size,
validation_split = 0.2, verbose = 2, use_multiprocessing = True)
# ----------------------------------------------------------------------
def definite_rnn():
title = "将 SimpleRNN 应用于 IMDB "
model = Sequential(name = title)
model.add(Embedding(max_features, 64))
model.add(SimpleRNN(64))
model.add(Dense(1, activation = sigmoid))
model.summary()
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
history = train_model(model, x_train, y_train)
plot_classes_results(history, title, epochs)
print(title + "评估测试集", model.evaluate(x_test, y_test, verbose = 2, use_multiprocessing = True))
pass
# ----------------------------------------------------------------------
def definite_lstm():
title = "将 LSTM 应用于 IMDB"
model = Sequential(name = title)
model.add(Embedding(max_features, 64))
model.add(LSTM(64))
model.add(Dense(1, activation = sigmoid))
model.summary()
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
model = definite_rnn(title)
history = train_model(model, x_train, y_train)
plot_classes_results(history, title, epochs)
print(title + "评估测试集", model.evaluate(x_test, y_test, verbose = 2, use_multiprocessing = True))
pass
# ----------------------------------------------------------------------
# 重构 ch0304 的二分类问题
def vectorize_sequences(sequences, dimension = 10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
pass
return results
vector_train_data = vectorize_sequences(train_data, max_features)
vector_test_data = vectorize_sequences(test_data, max_features)
vector_train_labels = np.asarray(y_train)
vector_test_labels = np.asarray(y_test)
# 将数据进行 One-Hot 编码后,准确率比 RNN 和 LSTM 的质量还好(ch0304确认了密集层的效果确实很好)
def definite_dense_for_one_hot():
title = "将 Dense+One-Hot 应用于 IMDB"
model = Sequential(name = title)
model.add(Dense(16, activation = relu, input_shape = (10000,)))
model.add(Dense(16, activation = relu))
model.add(Dense(1, activation = sigmoid))
model.summary()
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
history = train_model(model, vector_train_data, vector_train_labels)
plot_classes_results(history, title, epochs)
print(title + "评估测试集",
model.evaluate(vector_test_data, vector_test_labels, verbose = 2, use_multiprocessing = True))
pass
# 没有将数据进行 One-Hot 编码,准确率下降的会很厉害
def definite_dense():
title = "将 Dense 应用于 IMDB"
model = Sequential(name = title)
model.add(Dense(16, activation = relu, input_shape = (500,)))
model.add(Dense(16, activation = relu))
model.add(Dense(1, activation = sigmoid))
model.summary()
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
history = train_model(model, x_train, y_train)
plot_classes_results(history, title, epochs)
print(title + "评估测试集", model.evaluate(x_test, y_test, verbose = 2, use_multiprocessing = True))
pass
# ----------------------------------------------------------------------
definite_rnn()
definite_lstm()
definite_dense_for_one_hot()
definite_dense()
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
|
[
"keras.optimizers.rmsprop",
"numpy.random.seed",
"keras.preprocessing.sequence.pad_sequences",
"numpy.set_printoptions",
"tools.plot_classes_results",
"matplotlib.pyplot.get_fignums",
"keras.datasets.imdb.load_data",
"numpy.stack",
"keras.layers.SimpleRNN",
"matplotlib.pyplot.show",
"numpy.asarray",
"winsound.Beep",
"numpy.dot",
"keras.layers.LSTM",
"numpy.zeros",
"numpy.random.random",
"keras.layers.Dense",
"keras.layers.Embedding",
"keras.models.Sequential"
] |
[((1218, 1303), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)', 'threshold': 'np.inf', 'linewidth': '(200)'}), '(precision=3, suppress=True, threshold=np.inf, linewidth=200\n )\n', (1237, 1303), True, 'import numpy as np\n'), ((1369, 1389), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1383, 1389), True, 'import numpy as np\n'), ((3470, 3508), 'keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'num_words': 'max_features'}), '(num_words=max_features)\n', (3484, 3508), False, 'from keras.datasets import imdb\n'), ((3521, 3562), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['train_data'], {'maxlen': 'max_len'}), '(train_data, maxlen=max_len)\n', (3534, 3562), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((3574, 3614), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['test_data'], {'maxlen': 'max_len'}), '(test_data, maxlen=max_len)\n', (3587, 3614), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((5554, 5573), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (5564, 5573), True, 'import numpy as np\n'), ((5595, 5613), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (5605, 5613), True, 'import numpy as np\n'), ((7117, 7140), 'winsound.Beep', 'winsound.Beep', (['(600)', '(500)'], {}), '(600, 500)\n', (7130, 7140), False, 'import winsound\n'), ((1803, 1848), 'numpy.random.random', 'np.random.random', (['(timesteps, input_features)'], {}), '((timesteps, input_features))\n', (1819, 1848), True, 'import numpy as np\n'), ((1863, 1891), 'numpy.zeros', 'np.zeros', (['(output_features,)'], {}), '((output_features,))\n', (1871, 1891), True, 'import numpy as np\n'), ((2500, 2536), 'numpy.stack', 'np.stack', (['successive_outputs'], {'axis': '(0)'}), '(successive_outputs, axis=0)\n', (2508, 2536), True, 'import numpy as np\n'), ((2702, 2728), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""完整的状态序列"""'}), "(name='完整的状态序列')\n", (2712, 2728), False, 'from keras.models import Sequential\n'), ((2854, 2883), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""最后一个时间步的输出"""'}), "(name='最后一个时间步的输出')\n", (2864, 2883), False, 'from keras.models import Sequential\n'), ((2984, 3013), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""多个循环层的逐个堆叠"""'}), "(name='多个循环层的逐个堆叠')\n", (2994, 3013), False, 'from keras.models import Sequential\n'), ((3963, 3985), 'keras.models.Sequential', 'Sequential', ([], {'name': 'title'}), '(name=title)\n', (3973, 3985), False, 'from keras.models import Sequential\n'), ((4289, 4333), 'tools.plot_classes_results', 'plot_classes_results', (['history', 'title', 'epochs'], {}), '(history, title, epochs)\n', (4309, 4333), False, 'from tools import plot_classes_results\n'), ((4581, 4603), 'keras.models.Sequential', 'Sequential', ([], {'name': 'title'}), '(name=title)\n', (4591, 4603), False, 'from keras.models import Sequential\n'), ((4934, 4978), 'tools.plot_classes_results', 'plot_classes_results', (['history', 'title', 'epochs'], {}), '(history, title, epochs)\n', (4954, 4978), False, 'from tools import plot_classes_results\n'), ((5764, 5786), 'keras.models.Sequential', 'Sequential', ([], {'name': 'title'}), '(name=title)\n', (5774, 5786), False, 'from keras.models import Sequential\n'), ((6152, 6196), 'tools.plot_classes_results', 'plot_classes_results', (['history', 'title', 'epochs'], {}), '(history, title, epochs)\n', (6172, 6196), False, 'from tools import plot_classes_results\n'), ((6437, 6459), 'keras.models.Sequential', 'Sequential', ([], {'name': 'title'}), '(name=title)\n', (6447, 6459), False, 'from keras.models import Sequential\n'), ((6801, 6845), 'tools.plot_classes_results', 'plot_classes_results', (['history', 'title', 'epochs'], {}), '(history, title, epochs)\n', (6821, 6845), False, 'from tools import plot_classes_results\n'), ((7177, 7187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7185, 7187), True, 'import matplotlib.pyplot as plt\n'), ((1930, 1981), 'numpy.random.random', 'np.random.random', (['(output_features, input_features)'], {}), '((output_features, input_features))\n', (1946, 1981), True, 'import numpy as np\n'), ((1995, 2047), 'numpy.random.random', 'np.random.random', (['(output_features, output_features)'], {}), '((output_features, output_features))\n', (2011, 2047), True, 'import numpy as np\n'), ((2061, 2097), 'numpy.random.random', 'np.random.random', (['(output_features,)'], {}), '((output_features,))\n', (2077, 2097), True, 'import numpy as np\n'), ((2745, 2765), 'keras.layers.Embedding', 'Embedding', (['(10000)', '(32)'], {}), '(10000, 32)\n', (2754, 2765), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((2781, 2817), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(32)'], {'return_sequences': '(True)'}), '(32, return_sequences=True)\n', (2790, 2817), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((2900, 2920), 'keras.layers.Embedding', 'Embedding', (['(10000)', '(32)'], {}), '(10000, 32)\n', (2909, 2920), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((2936, 2949), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(32)'], {}), '(32)\n', (2945, 2949), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((3030, 3050), 'keras.layers.Embedding', 'Embedding', (['(10000)', '(32)'], {}), '(10000, 32)\n', (3039, 3050), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((3066, 3102), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(32)'], {'return_sequences': '(True)'}), '(32, return_sequences=True)\n', (3075, 3102), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((3120, 3156), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(32)'], {'return_sequences': '(True)'}), '(32, return_sequences=True)\n', (3129, 3156), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((3174, 3210), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(32)'], {'return_sequences': '(True)'}), '(32, return_sequences=True)\n', (3183, 3210), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((3228, 3241), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(32)'], {}), '(32)\n', (3237, 3241), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((4002, 4029), 'keras.layers.Embedding', 'Embedding', (['max_features', '(64)'], {}), '(max_features, 64)\n', (4011, 4029), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((4045, 4058), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(64)'], {}), '(64)\n', (4054, 4058), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((4074, 4102), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': 'sigmoid'}), '(1, activation=sigmoid)\n', (4079, 4102), False, 'from keras.layers import Dense\n'), ((4620, 4647), 'keras.layers.Embedding', 'Embedding', (['max_features', '(64)'], {}), '(max_features, 64)\n', (4629, 4647), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((4663, 4671), 'keras.layers.LSTM', 'LSTM', (['(64)'], {}), '(64)\n', (4667, 4671), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((4687, 4715), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': 'sigmoid'}), '(1, activation=sigmoid)\n', (4692, 4715), False, 'from keras.layers import Dense\n'), ((5803, 5851), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': 'relu', 'input_shape': '(10000,)'}), '(16, activation=relu, input_shape=(10000,))\n', (5808, 5851), False, 'from keras.layers import Dense\n'), ((5871, 5897), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': 'relu'}), '(16, activation=relu)\n', (5876, 5897), False, 'from keras.layers import Dense\n'), ((5915, 5943), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': 'sigmoid'}), '(1, activation=sigmoid)\n', (5920, 5943), False, 'from keras.layers import Dense\n'), ((6476, 6522), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': 'relu', 'input_shape': '(500,)'}), '(16, activation=relu, input_shape=(500,))\n', (6481, 6522), False, 'from keras.layers import Dense\n'), ((6542, 6568), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': 'relu'}), '(16, activation=relu)\n', (6547, 6568), False, 'from keras.layers import Dense\n'), ((6586, 6614), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': 'sigmoid'}), '(1, activation=sigmoid)\n', (6591, 6614), False, 'from keras.layers import Dense\n'), ((7148, 7165), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (7163, 7165), True, 'import matplotlib.pyplot as plt\n'), ((4156, 4173), 'keras.optimizers.rmsprop', 'rmsprop', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (4163, 4173), False, 'from keras.optimizers import rmsprop\n'), ((4769, 4786), 'keras.optimizers.rmsprop', 'rmsprop', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (4776, 4786), False, 'from keras.optimizers import rmsprop\n'), ((5997, 6014), 'keras.optimizers.rmsprop', 'rmsprop', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (6004, 6014), False, 'from keras.optimizers import rmsprop\n'), ((6668, 6685), 'keras.optimizers.rmsprop', 'rmsprop', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (6675, 6685), False, 'from keras.optimizers import rmsprop\n'), ((2216, 2234), 'numpy.dot', 'np.dot', (['W', 'input_t'], {}), '(W, input_t)\n', (2222, 2234), True, 'import numpy as np\n'), ((2237, 2255), 'numpy.dot', 'np.dot', (['U', 'state_t'], {}), '(U, state_t)\n', (2243, 2255), True, 'import numpy as np\n')]
|
import numpy as np
from Classes.Uncertainty import Uncertainty
from Classes.QComp import QComp
class QAData(object):
"""Evaluates and stores quality assurance characteristics and messages.
Attributes
----------
q_run_threshold_caution: int
Caution threshold for interpolated discharge for a run of invalid ensembles, in percent.
q_run_threshold_warning: int
Warning threshold for interpolated discharge for a run of invalid ensembles, in percent.
q_total_threshold_caution: int
Caution threshold for total interpolated discharge for invalid ensembles, in percent.
q_total_threshold_warning: int
Warning threshold for total interpolated discharge for invalid ensembles, in percent.
transects: dict
Dictionary of quality assurance checks for transects
system_tst: dict
Dictionary of quality assurance checks on the system test(s)
compass: dict
Dictionary of quality assurance checks on compass calibration and evaluations
temperature: dict
Dictionary of quality assurance checks on temperature comparions and variation
moving_bed: dict
Dictionary of quality assurance checks on moving-bed tests
user: dict
Dictionary of quality assurance checks on user input data
bt_vel: dict
Dictionary of quality assurance checks on bottom track velocities
gga_vel: dict
Dictionary of quality assurance checks on gga boat velocities
vtg_vel: dict
Dictionary of quality assurance checks on vtg boat velocities
w_vel: dict
Dictionary of quality assurance checks on water track velocities
extrapolation: dict
Dictionary of quality assurance checks on extrapolations
edges: dict
Dictionary of quality assurance checks on edges
"""
def __init__(self, meas):
"""Checks the measurement for all quality assurance issues.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Set default thresholds
self.q_run_threshold_caution = 3
self.q_run_threshold_warning = 5
self.q_total_threshold_caution = 10
self.q_total_threshold_warning = 25
# Initialize instance variables
self.transects = dict()
self.system_tst = dict()
self.compass = dict()
self.temperature = dict()
self.moving_bed = dict()
self.user = dict()
self.depths = dict()
self.bt_vel = dict()
self.gga_vel = dict()
self.vtg_vel = dict()
self.w_vel = dict()
self.extrapolation = dict()
self.edges = dict()
# Apply QA checks
self.transects_qa(meas)
self.system_tst_qa(meas)
self.compass_qa(meas)
self.temperature_qa(meas)
self.moving_bed_qa(meas)
self.user_qa(meas)
self.depths_qa(meas)
self.boat_qa(meas)
self.water_qa(meas)
self.extrapolation_qa(meas)
self.edges_qa(meas)
def transects_qa(self, meas):
"""Apply quality checks to transects
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Assume good results
self.transects['status'] = 'good'
# Initialize keys
self.transects['messages'] = []
self.transects['recip'] = 0
self.transects['sign'] = 0
self.transects['duration'] = 0
self.transects['number'] = 0
self.transects['uncertainty'] = 0
checked = []
discharges = []
start_edge = []
for n in range(len(meas.transects)):
checked.append(meas.transects[n].checked)
if meas.transects[n].checked:
discharges.append(meas.discharge[n])
start_edge.append(meas.transects[n].start_edge)
num_checked = np.nansum(np.asarray(checked))
# Check duration
total_duration = 0
if num_checked >= 1:
for transect in meas.transects:
if transect.checked:
total_duration += transect.date_time.transect_duration_sec
# Check duration against USGS policy
if total_duration < 720:
self.transects['status'] = 'caution'
self.transects['messages'].append(
['Transects: Duration of selected transects is less than 720 seconds;', 2, 0])
self.transects['duration'] = 1
# Check transects for missing ensembles
for transect in meas.transects:
if transect.checked:
# Determine number of missing ensembles
if transect.adcp.manufacturer == 'SonTek':
# Determine number of missing ensembles for SonTek data
idx_missing = np.where(transect.date_time.ens_duration_sec > 1.5)[0]
if len(idx_missing) > 0:
average_ensemble_duration = (np.nansum(transect.date_time.ens_duration_sec)
- np.nansum(transect.date_time.ens_duration_sec[idx_missing])
/ (len(transect.date_time.ens_duration_sec) - len(idx_missing)))
num_missing = np.round(np.nansum(transect.date_time.ens_duration_sec[idx_missing])
/ average_ensemble_duration) - len(idx_missing)
else:
num_missing = 0
else:
# Determine number of lost ensembles for TRDI data
idx_missing = np.where(np.isnan(transect.date_time.ens_duration_sec) == True)[0]
num_missing = len(idx_missing) - 1
# Save caution message
if num_missing > 0:
self.transects['messages'].append([['Transects: ' + str(transect.file_name) + ' is missing'
+ str(int(num_missing)) + ' ensembles;'], 2, 0])
self.transects['status'] = 'caution'
# Check number of transects checked
if num_checked == 0:
# No transects selected
self.transects['status'] = 'warning'
self.transects['messages'].append(['TRANSECTS: No transects selected;', 1, 0])
self.transects['number'] = 2
elif num_checked == 1:
# Only one transect selected
self.transects['status'] = 'caution'
self.transects['messages'].append(['Transects: Only one transect selected;', 2, 0])
self.transects['number'] = 2
else:
self.transects['number'] = num_checked
if num_checked == 2:
# Only 2 transects selected
cov, _ = Uncertainty.uncertainty_q_random(discharges, 'total')
# Check uncertainty
if cov > 2:
self.transects['status'] = 'caution'
self.transects['messages'].append(
['Transects: Uncertainty would be reduced by additional transects;', 2, 0])
# Check for consistent sign
q_positive = []
for q in discharges:
if q.total >= 0:
q_positive.append(True)
else:
q_positive.append(False)
if len(np.unique(q_positive)) > 1:
self.transects['status'] = 'warning'
self.transects['messages'].append(
['Transects: Sign of total Q is not consistent. One or more start banks may be incorrect;', 1, 0])
# Check for reciprocal transects
num_left = start_edge.count('Left')
num_right = start_edge.count('Right')
if not num_left == num_right:
self.transects['status'] = 'warning'
self.transects['messages'].append(['Transects: Transects selected are not reciprocal transects;', 1, 0])
# Check for zero discharge transects
q_zero = False
for q in discharges:
if q.total == 0:
q_zero = True
if q_zero:
self.transects['status'] = 'warning'
self.transects['messages'].append(['TRANSECTS: One or more transects have zero Q;', 1, 0])
def system_tst_qa(self, meas):
"""Apply QA checks to system test.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.system_tst['messages'] = []
self.system_tst['status'] = 'good'
# Determine is a system test was recorded
if not meas.system_test:
# No system test data recorded
self.system_tst['status'] = 'warning'
self.system_tst['messages'].append(['SYSTEM TEST: No system test;', 1, 3])
else:
pt3_fail = False
num_tests_with_failure = 0
for test in meas.system_test:
if hasattr(test, 'result'):
if 'pt3' in test.result and test.result['pt3'] is not None:
# Check hard_limit, high gain, wide bandwidth
if 'hard_limit' in test.result['pt3']:
if 'high_wide' in test.result['pt3']['hard_limit']:
corr_table = test.result['pt3']['hard_limit']['high_wide']['corr_table']
if len(corr_table) > 0:
# All lags past lag 2 should be less than 50% of lag 0
qa_threshold = corr_table[0, :] * 0.5
all_lag_check = np.greater(corr_table[3::, :], qa_threshold)
# Lag 7 should be less than 25% of lag 0
lag_7_check = np.greater(corr_table[7, :], corr_table[0, :] * 0.25)
# If either condition is met for any beam the test fails
if np.sum(np.sum(all_lag_check)) + np.sum(lag_7_check) > 1:
pt3_fail = True
if test.result['n_failed'] is not None and test.result['n_failed'] > 0:
num_tests_with_failure += 1
if pt3_fail:
self.system_tst['status'] = 'caution'
self.system_tst['messages'].append(
['System Test: One or more PT3 tests in the system test indicate potential EMI;', 2, 3])
# Check for failed tests
if num_tests_with_failure == len(meas.system_test):
# All tests had a failure
self.system_tst['status'] = 'warning'
self.system_tst['messages'].append(
['SYSTEM TEST: All system test sets have at least one test that failed;', 1, 3])
elif num_tests_with_failure > 0:
self.system_tst['status'] = 'caution'
self.system_tst['messages'].append(
['System Test: One or more system test sets have at least one test that failed;', 2, 3])
def compass_qa(self, meas):
"""Apply QA checks to compass calibration and evaluation.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.compass['messages'] = []
checked = []
for transect in meas.transects:
checked.append(transect.checked)
if np.any(checked):
heading = np.unique(meas.transects[checked.index(1)].sensors.heading_deg.internal.data)
else:
heading = np.array([0])
# Intialize variable as if ADCP has no compass
self.compass['status'] = 'inactive'
self.compass['status1'] = 'good'
self.compass['status2'] = 'good'
self.compass['magvar'] = 0
self.compass['magvar_idx'] = 0
if len(heading) > 1 and np.any(np.not_equal(heading, 0)):
# ADCP has a compass
# A compass calibration is required is a loop test or GPS are used
# Check for loop test
loop = False
for test in meas.mb_tests:
if test.type == 'Loop':
loop = True
# Check for GPS data
gps = False
if meas.transects[checked.index(True)].boat_vel.gga_vel is not None or \
meas.transects[checked.index(True)].boat_vel.vtg_vel is not None:
gps = True
if gps or loop:
# Compass calibration is required
# Determine the ADCP manufacturer
if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
# SonTek ADCP
if not meas.compass_cal:
# No compass calibration
self.compass['status1'] = 'warning'
self.compass['messages'].append(['COMPASS: No compass calibration;', 1, 4])
elif meas.compass_cal[-1].result['compass']['error'] == 'N/A':
# If the error cannot be decoded from the calibration assume the calibration is good
self.compass['status1'] = 'good'
else:
if meas.compass_cal[-1].result['compass']['error'] <= 0.2:
self.compass['status1'] = 'good'
else:
self.compass['status1'] = 'caution'
self.compass['messages'].append(['COMPASS: Calibration result > 0.2 deg;', 2, 4])
elif meas.transects[checked.index(True)].adcp.manufacturer == 'TRDI':
# TRDI ADCP
if not meas.compass_cal:
# No compass calibration
if not meas.compass_eval:
# No calibration or evaluation
self.compass['status1'] = 'warning'
self.compass['messages'].append(['COMPASS: No compass calibration or evaluation;', 1, 4])
else:
# No calibration but an evaluation was completed
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: No compass calibration;', 2, 4])
else:
# Compass was calibrated
if not meas.compass_eval:
# No compass evaluation
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: No compass evaluation;', 2, 4])
else:
# Check results of evaluation
try:
if float(meas.compass_eval[-1].result['compass']['error']) <= 1:
self.compass['status1'] = 'good'
else:
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: Evaluation result > 1 deg;', 2, 4])
except ValueError:
self.compass['status1'] = 'good'
else:
# Compass not required
if (not meas.compass_cal) and (not meas.compass_eval):
# No compass calibration or evaluation
self.compass['status1'] = 'default'
else:
# Compass was calibrated and evaluated
self.compass['status1'] = 'good'
# Check for consistent magvar
magvar = []
mag_error_exceeded = []
pitch_mean = []
pitch_std = []
pitch_exceeded = []
roll_mean = []
roll_std = []
roll_exceeded = []
for transect in meas.transects:
if transect.checked:
heading_source_selected = getattr(
transect.sensors.heading_deg, transect.sensors.heading_deg.selected)
pitch_source_selected = getattr(transect.sensors.pitch_deg, transect.sensors.pitch_deg.selected)
roll_source_selected = getattr(transect.sensors.roll_deg, transect.sensors.roll_deg.selected)
magvar.append(heading_source_selected.mag_var_deg)
pitch_mean.append(np.nanmean(pitch_source_selected.data))
pitch_std.append(np.nanstd(pitch_source_selected.data))
roll_mean.append(np.nanmean(roll_source_selected.data))
roll_std.append(np.nanstd(roll_source_selected.data))
# SonTek G3 compass provides pitch, roll, and magnetic error parameters that can be checked
if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
if heading_source_selected.pitch_limit is not None:
# Check for bug in SonTek data where pitch and roll was n x 3 use n x 1
if len(pitch_source_selected.data.shape) == 1:
pitch_data = pitch_source_selected.data
else:
pitch_data = pitch_source_selected.data[:, 0]
idx_max = np.where(pitch_data > heading_source_selected.pitch_limit[0])[0]
idx_min = np.where(pitch_data < heading_source_selected.pitch_limit[1])[0]
if len(idx_max) > 0 or len(idx_min) > 0:
pitch_exceeded.append(True)
else:
pitch_exceeded.append(False)
if heading_source_selected.roll_limit is not None:
if len(roll_source_selected.data.shape) == 1:
roll_data = roll_source_selected.data
else:
roll_data = roll_source_selected.data[:, 0]
idx_max = np.where(roll_data > heading_source_selected.pitch_limit[0])[0]
idx_min = np.where(roll_data < heading_source_selected.pitch_limit[1])[0]
if len(idx_max) > 0 or len(idx_min) > 0:
roll_exceeded.append(True)
else:
roll_exceeded.append(False)
if heading_source_selected.mag_error is not None:
idx_max = np.where(heading_source_selected.mag_error > 2)[0]
if len(idx_max) > 0:
mag_error_exceeded.append(True)
else:
mag_error_exceeded.append(False)
if len(np.unique(magvar)) > 1:
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: Magnetic variation is not consistent among transects;', 2, 4])
self.compass['magvar'] = 1
# Check that magvar was set if GPS data are available
if gps:
if 0 in magvar:
self.compass['status2'] = 'warning'
self.compass['messages'].append(
['COMPASS: Magnetic variation is 0 and GPS data are present;', 1, 4])
self.compass['magvar'] = 2
self.compass['magvar_idx'] = magvar.index(0)
# Check pitch mean
if np.any(np.asarray(pitch_mean) > 8):
self.compass['status2'] = 'warning'
self.compass['messages'].append(['PITCH: One or more transects have a mean pitch > 8 deg;', 1, 4])
elif np.any(np.asarray(pitch_mean) > 4):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Pitch: One or more transects have a mean pitch > 4 deg;', 2, 4])
# Check roll mean
if np.any(np.asarray(roll_mean) > 8):
self.compass['status2'] = 'warning'
self.compass['messages'].append(['ROLL: One or more transects have a mean roll > 8 deg;', 1, 4])
elif np.any(np.asarray(roll_mean) > 4):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Roll: One or more transects have a mean roll > 4 deg;', 2, 4])
# Check pitch standard deviation
if np.any(np.asarray(pitch_std) > 5):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Pitch: One or more transects have a pitch std dev > 5 deg;', 2, 4])
# Check roll standard deviation
if np.any(np.asarray(roll_std) > 5):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Roll: One or more transects have a roll std dev > 5 deg;', 2, 4])
# Additional checks for SonTek G3 compass
if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
# Check if pitch limits were exceeded
if any(pitch_exceeded):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: One or more transects have pitch exceeding calibration limits;', 2, 4])
# Check if roll limits were exceeded
if any(roll_exceeded):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: One or more transects have roll exceeding calibration limits;', 2, 4])
# Check if magnetic error was exceeded
if any(mag_error_exceeded):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: One or more transects have a change in mag field exceeding 2%;', 2, 4])
if self.compass['status1'] == 'warning' or self.compass['status2'] == 'warning':
self.compass['status'] = 'warning'
elif self.compass['status1'] == 'caution' or self.compass['status2'] == 'caution':
self.compass['status'] = 'caution'
else:
self.compass['status'] = 'good'
def temperature_qa(self, meas):
"""Apply QA checks to temperature.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.temperature['messages'] = []
check = [0, 0]
# Create array of all temperatures
temp = np.array([])
checked = []
for transect in meas.transects:
if transect.checked:
checked.append(transect.checked)
temp_selected = getattr(transect.sensors.temperature_deg_c, transect.sensors.temperature_deg_c.selected)
if len(temp) == 0:
temp = temp_selected.data
else:
temp = np.hstack((temp, temp_selected.data))
# Check temperature range
if np.any(checked):
temp_range = np.nanmax(temp) - np.nanmin(temp)
else:
temp_range = 0
if temp_range > 2:
check[0] = 3
self.temperature['messages'].append(['TEMPERATURE: Temperature range is '
+ '%3.1f % temp_range'
+ 'degrees C which is greater than 2 degrees;', 1, 5])
elif temp_range > 1:
check[0] = 2
self.temperature['messages'].append(['TEMPERATURE: Temperature range is '
+ '%3.1f % temp_range'
+ 'degrees C which is greater than 1 degrees;', 2, 5])
else:
check[0] = 1
# Check for independent temperature reading
if 'user' in meas.ext_temp_chk:
try:
user = float(meas.ext_temp_chk['user'])
except (ValueError, TypeError) as e:
user = None
if user is None:
# No independent temperature reading
check[1] = 2
self.temperature['messages'].append(['Temperature: No independent temperature reading;', 2, 5])
elif meas.ext_temp_chk['adcp']:
# Compare user to manually entered ADCP temperature
diff = np.abs(user - meas.ext_temp_chk['adcp'])
if diff < 2:
check[1] = 1
else:
check[1] = 3
self.temperature['messages'].append(
['TEMP.: The difference between ADCP and reference is > 2: ' + '%3.1f % diff' + ' C;', 1, 5])
else:
# Compare user to mean of all temperature data
diff = np.abs(user - np.nanmean(temp))
if diff < 2:
check[1] = 1
else:
check[1] = 3
self.temperature['messages'].append(
['TEMP.: The difference between ADCP and reference is > 2: ' + '%3.1f % diff' + ' C;', 1, 5])
# Assign temperature status
max_check = max(check)
if max_check == 1:
self.temperature['status'] = 'good'
elif max_check == 2:
self.temperature['status'] = 'caution'
elif max_check == 3:
self.temperature['status'] = 'warning'
def moving_bed_qa(self, meas):
"""Applies quality checks to moving-bed tests.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.moving_bed['messages'] = []
self.moving_bed['code'] = 0
# Are there moving-bed tests?
if len(meas.mb_tests) < 1:
# No moving-bed test
self.moving_bed['messages'].append(['MOVING-BED TEST: No moving bed test;', 1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
else:
# Moving-bed tests available
mb_data = meas.mb_tests
# Are tests valid according to the user
user_valid_test = []
file_names = []
idx_selected = []
test_quality = []
mb_tests = []
mb = []
mb_test_type = []
loop = []
for n, test in enumerate(mb_data):
if test.user_valid:
user_valid_test.append(True)
file_names.append(test.transect.file_name)
if test.type == 'Loop' and not test.test_quality == 'Errors':
loop.append(test.moving_bed)
# Selected test
if test.selected:
idx_selected.append(n)
test_quality.append(test.test_quality)
mb_tests.append(test)
mb.append(test.moving_bed)
mb_test_type.append(test.type)
else:
user_valid_test.append(False)
if not any(user_valid_test):
# No valid test according to user
self.moving_bed['messages'].append(['MOVING-BED TEST: No valid moving-bed test based on user input;',
1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
else:
# Check for duplicate valid moving-bed tests
if len(np.unique(file_names)) < len(file_names):
self.moving_bed['messages'].append([
'MOVING-BED TEST: Duplicate moving-bed test files marked valid;', 1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
if self.moving_bed['code'] == 0:
# Check test quality
if len(test_quality) > 0 and sum(np.array(test_quality) == 'Good') > 0:
self.moving_bed['status'] = 'good'
self.moving_bed['code'] = 1
# Check if there is a moving-bed
if any(mb):
# Moving-bed present
self.moving_bed['messages'].append(
['Moving-Bed Test: A moving-bed is present, use GPS or moving-bed correction;', 2, 6])
self.moving_bed['code'] = 2
self.moving_bed['status'] = 'caution'
# Check for test type
if sum(np.array(mb_test_type) == 'Stationary'):
# Check for GPS or 3 stationary tests
if len(mb_tests) < 3:
gps = []
for transect in meas.transects:
if transect.checked:
if transect.gps is None:
gps.append(False)
else:
gps.append(True)
if not all(gps):
# GPS not available for all selected transects
self.moving_bed['messages'].append([
'Moving-Bed Test: '
+ 'Less than 3 stationary tests available for moving-bed correction;',
2, 6])
elif len(test_quality) > 0 and sum(np.array(test_quality) == 'Warnings') > 0:
# Quality check has warnings
self.moving_bed['messages'].append(['Moving-Bed Test: The moving-bed test(s) has warnings, '
+ 'please review tests to determine validity;', 2, 6])
self.moving_bed['status'] = 'caution'
self.moving_bed['code'] = 2
elif len(test_quality) > 0 and sum(np.array(test_quality) == 'Manual') > 0:
# Manual override used
self.moving_bed['messages'].append(['MOVING-BED TEST: '
+ 'The user has manually forced the use of some tests;', 1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
else:
# Test has critical errors
self.moving_bed['messages'].append(['MOVING-BED TEST: The moving-bed test(s) have critical errors '
+ 'and will not be used;', 1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
# Check multiple loops for consistency
if len(np.unique(loop)) > 1:
self.moving_bed['messages'].append(['Moving-Bed Test: Results of valid loops are not consistent, '
+ 'review moving-bed tests;', 2, 6])
if self.moving_bed['code'] < 3:
self.moving_bed['code'] = 2
self.moving_bed['status'] = 'caution'
def user_qa(self, meas):
"""Apply quality checks to user input data.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.user['messages'] = []
self.user['status'] = 'good'
# Check for Station Name
self.user['sta_name'] = False
if meas.station_name is None:
self.user['messages'].append(['Site Info: Station name not entered;', 2, 2])
self.user['status'] = 'caution'
self.user['sta_name'] = True
# Check for Station Number
self.user['sta_number'] = False
if meas.station_number is None:
self.user['messages'].append(['Site Info: Station number not entered;', 2, 2])
self.user['status'] = 'caution'
self.user['sta_name'] = True
def depths_qa(self, meas):
"""Apply quality checks to depth data.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Initialize variables
n_transects = len(meas.transects)
self.depths['q_total'] = np.tile(np.nan, n_transects)
self.depths['q_max_run'] = np.tile(np.nan, n_transects)
self.depths['q_total_caution'] = np.tile(False, n_transects)
self.depths['q_run_caution'] = np.tile(False, n_transects)
self.depths['q_total_warning'] = np.tile(False, n_transects)
self.depths['q_run_warning'] = np.tile(False, n_transects)
self.depths['all_invalid'] = np.tile(False, n_transects)
self.depths['messages'] = []
self.depths['status'] = 'good'
self.depths['draft'] = 0
checked = []
drafts = []
for n, transect in enumerate(meas.transects):
checked.append(transect.checked)
if transect.checked:
in_transect_idx = transect.in_transect_idx
depths_selected = getattr(transect.depths, transect.depths.selected)
drafts.append(depths_selected.draft_use_m)
# Determine valid measured depths
if transect.depths.composite:
depth_na = depths_selected.depth_source_ens[in_transect_idx] != 'NA'
depth_in = depths_selected.depth_source_ens[in_transect_idx] != 'IN'
depth_valid = np.all(np.vstack((depth_na, depth_in)), 0)
else:
depth_valid_temp = depths_selected.valid_data[in_transect_idx]
depth_nan = depths_selected.depth_processed_m[in_transect_idx] != np.nan
depth_valid = np.all(np.vstack((depth_nan, depth_valid_temp)), 0)
if not np.any(depth_valid):
self.depths['all_invalid'][n] = True
# Compute QA characteristics
q_total, q_max_run, number_invalid_ensembles = QAData.invalid_qa(depth_valid, meas.discharge[n])
self.depths['q_total'][n] = q_total
self.depths['q_max_run'][n] = q_max_run
# Compute percentage compared to total
q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
# Apply total interpolated discharge threshold
if q_total_percent > self.q_total_threshold_warning:
self.depths['q_total_warning'][n] = True
elif q_total_percent > self.q_total_threshold_caution:
self.depths['q_total_caution'][n] = True
# Apply interpolated discharge run thresholds
if q_max_run_percent > self.q_run_threshold_warning:
self.depths['q_run_warning'][n] = True
elif q_max_run_percent > self.q_run_threshold_caution:
self.depths['q_run_caution'][n] = True
if checked:
# Create array of all unique draft values
draft_check = np.unique(np.round(drafts, 3))
# Check draft consistency
if len(draft_check) > 1:
self.depths['status'] = 'caution'
self.depths['draft'] = 1
self.depths['messages'].append(['Depth: Transducer depth is not consistent among transects;', 2, 10])
# Check for zero draft
if np.any(np.less(draft_check, 0.01)):
self.depths['status'] = 'warning'
self.depths['draft'] = 2
self.depths['messages'].append(['DEPTH: Transducer depth is too shallow, likely 0;', 1, 10])
# Check consecutive interpolated discharge criteria
if np.any(self.depths['q_run_warning']):
self.depths['messages'].append(['DEPTH: Int. Q for consecutive invalid ensembles exceeds '
+ '%2.0f % self.q_run_threshold_warning' + '%;', 1, 10])
self.depths['status'] = 'warning'
elif np.any(self.depths['q_run_caution']):
self.depths['messages'].append(['Depth: Int. Q for consecutive invalid ensembles exceeds '
+ '%2.0f % self.q_run_threshold_caution' + '%;', 2, 10])
self.depths['status'] = 'caution'
# Check total interpolated discharge criteria
if np.any(self.depths['q_total_warning']):
self.depths['messages'].append(['DEPTH: Int. Q for invalid ensembles in a transect exceeds '
+ '%2.0f % self.q_total_threshold_warning' + '%;', 1, 10])
self.depths['status'] = 'warning'
elif np.any(self.depths['q_total_caution']):
self.depths['messages'].append(['Depth: Int. Q for invalid ensembles in a transect exceeds '
+ '%2.0f % self.q_total_threshold_caution' + '%;', 2, 10])
self.depths['status'] = 'caution'
# Check if all depths are invalid
if np.any(self.depths['all_invalid']):
self.depths['messages'].append(['DEPTH: There are no valid depths for one or more transects.', 2, 10])
self.depths['status'] = 'warning'
else:
self.depths['status'] = 'inactive'
def boat_qa(self, meas):
"""Apply quality checks to boat data.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Initialize variables
n_transects = len(meas.transects)
data_type = {'BT': {'class': 'bt_vel', 'warning': 'BT-', 'caution': 'bt-',
'filter': [('All: ', 0), ('Original: ', 1), ('ErrorVel: ', 2),
('VertVel: ', 3), ('Other: ', 4), ('3Beams: ', 5)]},
'GGA': {'class': 'gga_vel', 'warning': 'GGA-', 'caution': 'gga-',
'filter': [('All: ', 0), ('Original: ', 1), ('DGPS: ', 2),
('Altitude: ', 3), ('Other: ', 4), ('HDOP: ', 5)]},
'VTG': {'class': 'vtg_vel', 'warning': 'VTG-', 'caution': 'vtg-',
'filter': [('All: ', 0), ('Original: ', 1), ('HDOP: ', 5)]}}
for dt_key, dt_value in data_type.items():
boat = getattr(self, dt_value['class'])
# Initialize dictionaries for each data type
boat['q_total_caution'] = np.tile(False, (n_transects, 6))
boat['q_max_run_caution'] = np.tile(False, (n_transects, 6))
boat['q_total_warning'] = np.tile(False, (n_transects, 6))
boat['q_max_run_warning'] = np.tile(False, (n_transects, 6))
boat['all_invalid'] = np.tile(False, n_transects)
boat['q_total'] = np.tile(np.nan, (n_transects, 6))
boat['q_max_run'] = np.tile(np.nan, (n_transects, 6))
boat['messages'] = []
status_switch = 0
avg_speed_check = 0
# Check the results of each filter
for dt_filter in dt_value['filter']:
boat['status'] = 'inactive'
# Quality check each transect
for n, transect in enumerate(meas.transects):
# Evaluate on transects used in the discharge computation
if transect.checked:
in_transect_idx = transect.in_transect_idx
# Check to see if data are available for the data_type
if getattr(transect.boat_vel, dt_value['class']) is not None:
boat['status'] = 'good'
# Compute quality characteristics
valid = getattr(transect.boat_vel, dt_value['class']).valid_data[dt_filter[1],
in_transect_idx]
q_total, q_max_run, number_invalid_ens = QAData.invalid_qa(valid, meas.discharge[n])
boat['q_total'][n, dt_filter[1]] = q_total
boat['q_max_run'][n, dt_filter[1]] = q_max_run
# Compute percentage compared to total
q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
# Check if all invalid
if dt_filter[1] == 0 and not np.any(valid):
boat['all_invalid'][n] = True
# Apply total interpolated discharge threshold
if q_total_percent > self.q_total_threshold_warning:
boat['q_total_warning'][n, dt_filter[1]] = True
elif q_total_percent > self.q_total_threshold_caution:
boat['q_total_caution'][n, dt_filter[1]] = True
# Apply interpolated discharge run thresholds
if q_max_run_percent > self.q_run_threshold_warning:
boat['q_max_run_warning'][n, dt_filter[1]] = True
elif q_max_run_percent > self.q_run_threshold_caution:
boat['q_max_run_caution'][n, dt_filter[1]] = True
# Check boat velocity for vtg data
if dt_key is 'VTG' and transect.boat_vel.selected is 'vtg_vel' and avg_speed_check == 0:
avg_speed = np.nanmean((transect.boat_vel.vtg_vel.u_mps**2
+ transect.boat_vel.vtg_vel.v_mps**2)**0.5)
if avg_speed < 0.24:
boat['q_total_caution'][n, dt_filter[1]] = True
boat['messages'].append(
['vtg-AvgSpeed: VTG data may not be accurate for average boat speed less than'
+ '0.24 m/s (0.8 ft/s);', 2, 8])
avg_speed_check = 1
# Create message for consecutive invalid discharge
if boat['q_max_run_warning'][:, dt_filter[1]].any():
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['warning'] + dt_filter[0] +
'Int. Q for consecutive invalid ensembles exceeds ' +
'%3.1f' % self.q_run_threshold_warning + '%;', 1, module_code])
status_switch = 2
elif boat['q_max_run_caution'][:, dt_filter[1]].any():
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['caution'] + dt_filter[0] +
'Int. Q for consecutive invalid ensembles exceeds ' +
'%3.1f' % self.q_run_threshold_caution + '%;', 2, module_code])
if status_switch < 1:
status_switch = 1
# Create message for total invalid discharge
if boat['q_total_warning'][:, dt_filter[1]].any():
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['warning'] + dt_filter[0] +
'Int. Q for invalid ensembles in a transect exceeds ' +
'%3.1f' % self.q_total_threshold_warning + '%;', 1, module_code])
status_switch = 2
elif boat['q_max_run_caution'][:, dt_filter[1]].any():
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['caution'] + dt_filter[0] +
'Int. Q for invalid ensembles in a transect exceeds ' +
'%3.1f' % self.q_total_threshold_caution + '%;', 2, module_code])
if status_switch < 1:
status_switch = 1
# Create message for all data invalid
if boat['all_invalid'].any():
boat['status'] = 'warning'
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['warning'] + dt_value['filter'][0][0] +
'There are no valid data for one or more transects.;', 1, module_code])
# Set status
if status_switch == 2:
boat['status'] = 'warning'
elif status_switch == 1:
boat['status'] = 'caution'
setattr(self, dt_value['class'], boat)
def water_qa(self, meas):
"""Apply quality checks to water data.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Initialize filter labels and indices
prefix = ['All: ', 'Original: ', 'ErrorVel: ', 'VertVel: ', 'Other: ', '3Beams: ', 'SNR:']
if meas.transects[0].adcp.manufacturer is 'TRDI':
filter_index = [0, 1, 2, 3, 4, 5]
else:
filter_index = [0, 1, 2, 3, 4, 5, 7]
n_transects = len(meas.transects)
n_filters = len(filter_index) + 1
# Initialize dictionaries for each data type
self.w_vel['q_total_caution'] = np.tile(False, (n_transects, n_filters))
self.w_vel['q_max_run_caution'] = np.tile(False, (n_transects, n_filters))
self.w_vel['q_total_warning'] = np.tile(False, (n_transects, n_filters))
self.w_vel['q_max_run_warning'] = np.tile(False, (n_transects, n_filters))
self.w_vel['all_invalid'] = np.tile(False, n_transects)
self.w_vel['q_total'] = np.tile(np.nan, (n_transects, n_filters))
self.w_vel['q_max_run'] = np.tile(np.nan, (n_transects, n_filters))
self.w_vel['messages'] = []
status_switch = 0
# TODO if meas had a property checked as list it would save creating that list multiple times
checked = []
for transect in meas.transects:
checked.append(transect.checked)
# At least one transect is being used to compute discharge
if any(checked):
# Loop through filters
for prefix_idx, filter_idx in enumerate(filter_index):
# Loop through transects
for n, transect in enumerate(meas.transects):
if transect.checked:
valid_original = np.any(transect.w_vel.valid_data[1, :, transect.in_transect_idx].T, 0)
# Determine what data each filter have marked invalid. Original invalid data are excluded
valid = np.any(transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T, 0)
if filter_idx > 1:
valid_int = valid.astype(int) - valid_original.astype(int)
valid = valid_int != -1
# Check if all data are invalid
if filter_idx == 0:
if np.nansum(valid.astype(int)) < 1:
self.w_vel['all_invalid'][n] = True
# TODO seems like the rest of this should be under else of all invalid or multiple messages
# generated.
# Compute characteristics
q_total, q_max_run, number_invalid_ens = QAData.invalid_qa(valid, meas.discharge[n])
self.w_vel['q_total'][n, filter_idx] = q_total
self.w_vel['q_max_run'][n, filter_idx] = q_max_run
# Compute percentage compared to total
q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
# Check total invalid discharge in ensembles for warning
if q_total_percent > self.q_total_threshold_warning:
self.w_vel['q_total_warning'][n, filter_idx] = True
# Apply run or cluster thresholds
if q_max_run_percent > self.q_run_threshold_warning:
self.w_vel['q_max_run_warning'][n, filter_idx] = True
elif q_max_run_percent > self.q_run_threshold_caution:
self.w_vel['q_max_run_caution'][n, filter_idx] = True
# Compute percent discharge interpolated for both cells and ensembles
# This approach doesn't exclude original data
valid_cells = transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T
q_invalid_total = np.nansum(meas.discharge[n].middle_cells[np.logical_not(valid_cells)]) \
+ np.nansum(meas.discharge[n].top_ens[np.logical_not(valid)]) \
+ np.nansum(meas.discharge[n].bottom_ens[np.logical_not(valid)])
q_invalid_total_percent = (q_invalid_total / meas.discharge[n].total) * 100
if q_invalid_total_percent > self.q_total_threshold_caution:
self.w_vel['q_total_caution'][n, filter_idx] = True
# Generate messages for ensemble run or clusters
if np.any(self.w_vel['q_max_run_warning'][:, filter_idx]):
self.w_vel['messages'].append(['WT-' + prefix[prefix_idx]
+ 'Int. Q for consecutive invalid ensembles exceeds '
+ '%3.0f' % self.q_run_threshold_warning
+ '%;', 1, 11])
status_switch = 2
elif np.any(self.w_vel['q_max_run_caution'][:, filter_idx]):
self.w_vel['messages'].append(['wt-' + prefix[prefix_idx]
+ 'Int. Q for consecutive invalid ensembles exceeds '
+ '%3.0f' % self.q_run_threshold_caution
+ '%;', 2, 11])
if status_switch < 1:
status_switch = 1
# Generate message for total_invalid Q
if np.any(self.w_vel['q_total_warning'][:, filter_idx]):
self.w_vel['messages'].append(['WT-' + prefix[prefix_idx]
+ 'Int. Q for invalid ensembles in a transect exceeds '
+ '%3.0f' % self.q_total_threshold_warning
+ '%;', 1, 11])
status_switch = 2
elif np.any(self.w_vel['q_total_caution'][:, filter_idx]):
self.w_vel['messages'].append(['wt-' + prefix[prefix_idx]
+ 'Int. Q for invalid cells and ensembles in a transect exceeds '
+ '%3.0f' % self.q_total_threshold_caution
+ '%;', 2, 11])
if status_switch < 1:
status_switch = 1
# Generate message for all invalid
if np.any(self.w_vel['all_invalid']):
self.w_vel['messages'].append(['WT-', prefix[0], 'There are no valid data for one or more transects.',
1, 11])
status_switch = 2
# Set status
self.w_vel['status'] = 'good'
if status_switch == 2:
self.w_vel['status'] = 'warning'
elif status_switch == 1:
self.w_vel['status'] = 'caution'
else:
self.w_vel['status'] = 'inactive'
def extrapolation_qa(self, meas):
"""Apply quality checks to extrapolation methods
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.extrapolation['messages'] = []
checked = []
discharges = []
for n, transect in enumerate(meas.transects):
checked.append(transect.checked)
if transect.checked:
discharges.append(meas.discharge[n])
if any(checked):
self.extrapolation['status'] = 'good'
extrap_uncertainty = Uncertainty.uncertainty_extrapolation(meas, discharges)
if np.abs(extrap_uncertainty) > 2:
self.extrapolation['messages'].append(['Extrapolation: The extrapolation uncertainty is more than '
+ '2 percent;', 2, 12])
self.extrapolation['messages'].append([' Carefully review the extrapolation;', 2, 12])
self.extrapolation['status'] = 'caution'
else:
self.extrapolation['status'] = 'inactive'
def edges_qa(self, meas):
"""Apply quality checks to edge estimates
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Intialize variables
self.edges['messages'] = []
checked = []
left_q = []
right_q = []
total_q = []
edge_dist_left = []
edge_dist_right = []
dist_moved_left = []
dist_moved_right = []
dist_made_good = []
left_type = []
right_type = []
for n, transect in enumerate(meas.transects):
checked.append(transect.checked)
if transect.checked:
left_q.append(meas.discharge[n].left)
right_q.append(meas.discharge[n].right)
total_q.append(meas.discharge[n].total)
dmr, dml, dmg = QAData.edge_distance_moved(transect)
dist_moved_right.append(dmr)
dist_moved_left.append(dml)
dist_made_good.append(dmg)
edge_dist_left.append(transect.edges.left.distance_m)
edge_dist_right.append(transect.edges.right.distance_m)
left_type.append(transect.edges.left.type)
right_type.append(transect.edges.right.type)
if any(checked):
# Set default status to good
self.edges['status'] = 'good'
# Check left edge q > 5%
self.edges['left_q'] = 0
left_q_percent = (np.nanmean(left_q) / np.nanmean(total_q)) * 100
if np.abs(left_q_percent) > 5:
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Left edge Q is greater than 5%;', 2, 13])
self.edges['left_q'] = 1
# Check right edge q > 5%
self.edges['right_q'] = 0
right_q_percent = (np.nanmean(right_q) / np.nanmean(total_q)) * 100
if np.abs(right_q_percent) > 5:
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Right edge Q is greater than 5%;', 2, 13])
self.edges['right_q'] = 1
# Check for consistent sign
q_positive = []
self.edges['left_sign'] = 0
for q in left_q:
if q >= 0:
q_positive.append(True)
else:
q_positive.append(False)
if len(np.unique(q_positive)) > 1 and left_q_percent > 0.5:
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Sign of left edge Q is not consistent;', 2, 13])
self.edges['left_sign'] = 1
q_positive = []
self.edges['right_sign'] = 0
for q in right_q:
if q >= 0:
q_positive.append(True)
else:
q_positive.append(False)
if len(np.unique(q_positive)) > 1 and right_q_percent > 0.5:
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Sign of right edge Q is not consistent;', 2, 13])
self.edges['right_sign'] = 1
# Check distance moved
dmg_5_percent = 0.05 * np.nanmean(dist_made_good)
avg_right_edge_dist = np.nanmean(edge_dist_right)
right_threshold = np.nanmin([dmg_5_percent, avg_right_edge_dist])
self.edges['right_dist_moved_idx'] = np.where(dist_moved_right > right_threshold)[0]
if np.any(self.edges['right_dist_moved_idx']):
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Excessive boat movement in right edge ensembles;', 2, 13])
avg_left_edge_dist = np.nanmean(edge_dist_left)
left_threshold = np.nanmin([dmg_5_percent, avg_left_edge_dist])
self.edges['left_dist_moved_idx'] = np.where(dist_moved_left > left_threshold)[0]
if np.any(self.edges['left_dist_moved_idx']):
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Excessive boat movement in left edge ensembles;', 2, 13])
# Check for edge ensembles marked invalid due to excluded distance
for transect in meas.transects:
if transect.checked:
ens_sum_excluded_data = np.nansum(transect.w_vel.valid_data[6, :, :], 0)
cells_above_sl = np.nansum(transect.w_vel.cells_above_sl, 0)
ens_excluded_data = np.not_equal(ens_sum_excluded_data, cells_above_sl)
if any(ens_excluded_data):
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: The excluded distance caused invalid ensembles '
+ 'in an edge, check edge distance;', 2, 13])
break
# Check edges for zero discharge
self.edges['left_zero'] = 0
left_zero_idx = np.where(left_q == 0)[0]
if left_zero_idx:
self.edges['status'] = 'warning'
self.edges['messages'].append(['EDGES: Left edge has zero Q;', 1, 13])
self.edges['left_zero'] = 2
self.edges['right_zero'] = 0
right_zero_idx = np.where(right_q == 0)[0]
if right_zero_idx:
self.edges['status'] = 'warning'
self.edges['messages'].append(['EDGES: Right edge has zero Q;', 1, 13])
self.edges['right_zero'] = 2
# Check consistent edge type
self.edges['left_type'] = 0
if len(np.unique(left_type)) > 1:
self.edges['status'] = 'warning'
self.edges['messages'].append(['EDGES: Left edge type is not consistent;', 1, 13])
self.edges['left_type'] = 2
self.edges['right_type'] = 0
if len(np.unique(right_type)) > 1:
self.edges['status'] = 'warning'
self.edges['messages'].append(['EDGES: Right edge type is not consistent;', 1, 13])
self.edges['right_type'] = 2
else:
self.edges['status'] = 'inactive'
@staticmethod
def invalid_qa(valid, discharge):
"""Computes the total invalid discharge in ensembles that have invalid data. The function also computes
the maximum run or cluster of ensembles with the maximum interpolated discharge.
Parameters
----------
valid: np.array(bool)
Array identifying valid and invalid ensembles.
discharge: QComp
Object of class QComp
Returns
-------
q_invalid_total: float
Total interpolated discharge in invalid ensembles
q_invalid_max_run: float
Maximum interpolated discharge in a run or cluster of invalid ensembles
ens_invalid: int
Total number of invalid ensembles
"""
# Create bool for invalid data
invalid = np.logical_not(valid)
q_invalid_total = np.nansum(discharge.middle_ens[invalid]) + np.nansum(discharge.top_ens[invalid]) \
+ np.nansum(discharge.bottom_ens[invalid])
# Compute total number of invalid ensembles
ens_invalid = np.sum(invalid)
# Compute the indices of where changes occur
valid_int = np.insert(valid.astype(int), 0, -1)
valid_int = np.append(valid_int, -1)
valid_run = np.where(np.diff(valid_int) != 0)[0]
run_length = np.diff(valid_run)
run_length0 = run_length[(valid[0] == 1)::2]
n_runs = len(run_length0)
if valid[0] is True:
n_start = 1
else:
n_start = 0
n_end = len(valid_run)-1
if n_runs > 1:
m = 0
q_invalid_run = []
for n in range(n_start, n_end, 2):
m += 1
idx_start = valid_run[n]
idx_end = valid_run[n+1]
q_invalid_run.append(np.nansum(discharge.middle_ens[idx_start:idx_end])
+ np.nansum(discharge.top_ens[idx_start:idx_end])
+ np.nansum(discharge.bottom_ens[idx_start:idx_end]))
# Determine the maximum discharge in a single run
q_invalid_max_run = np.nanmax(np.abs(q_invalid_run))
else:
q_invalid_max_run = 0
return q_invalid_total, q_invalid_max_run, ens_invalid
@staticmethod
def edge_distance_moved(transect):
"""Computes the boat movement during edge ensemble collection.
Parameters
----------
transect: Transect
Object of class Transect
Returns
-------
right_dist_moved: float
Distance in m moved during collection of right edge samples
left_dist_moved: float
Distance in m moved during collection of left edge samples
dmg: float
Distance made good for the entire transect
"""
boat_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
ens_duration = transect.date_time.ens_duration_sec
# Get boat velocities
if boat_selected is not None:
u_processed = boat_selected.u_processed_mps
v_processed = boat_selected.v_processed_mps
else:
u_processed = np.tile(np.nan, transect.boat_vel.bt_vel.u_processed_mps.shape)
v_processed = np.tile(np.nan, transect.boat_vel.bt_vel.v_processed_mps.shape)
# Compute boat coordinates
x_processed = np.nancumsum(u_processed * ens_duration)
y_processed = np.nancumsum(v_processed * ens_duration)
dmg = (x_processed[-1]**2 + y_processed[-1]**2)**0.5
# Compute left distance moved
# TODO should be a dist moved function
left_edge_idx = QComp.edge_ensembles('left', transect)
if len(left_edge_idx) > 0:
boat_x = x_processed[left_edge_idx[-1]] - x_processed[left_edge_idx[0]]
boat_y = y_processed[left_edge_idx[-1]] - y_processed[left_edge_idx[0]]
left_dist_moved = (boat_x**2 + boat_y**2)**0.5
else:
left_dist_moved = np.nan
# Compute right distance moved
right_edge_idx = QComp.edge_ensembles('right', transect)
if len(right_edge_idx) > 0:
boat_x = x_processed[right_edge_idx[-1]] - x_processed[right_edge_idx[0]]
boat_y = y_processed[right_edge_idx[-1]] - y_processed[right_edge_idx[0]]
right_dist_moved = (boat_x ** 2 + boat_y ** 2) ** 0.5
else:
right_dist_moved = np.nan
return right_dist_moved, left_dist_moved, dmg
|
[
"numpy.sum",
"numpy.abs",
"numpy.greater",
"numpy.isnan",
"numpy.tile",
"numpy.round",
"numpy.unique",
"numpy.nanmean",
"Classes.Uncertainty.Uncertainty.uncertainty_extrapolation",
"numpy.logical_not",
"numpy.append",
"Classes.QComp.QComp.edge_ensembles",
"numpy.less",
"numpy.nansum",
"Classes.Uncertainty.Uncertainty.uncertainty_q_random",
"numpy.asarray",
"numpy.not_equal",
"numpy.hstack",
"numpy.nanmax",
"numpy.vstack",
"numpy.nanstd",
"numpy.nancumsum",
"numpy.nanmin",
"numpy.any",
"numpy.diff",
"numpy.array",
"numpy.where"
] |
[((11642, 11657), 'numpy.any', 'np.any', (['checked'], {}), '(checked)\n', (11648, 11657), True, 'import numpy as np\n'), ((23573, 23585), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (23581, 23585), True, 'import numpy as np\n'), ((24064, 24079), 'numpy.any', 'np.any', (['checked'], {}), '(checked)\n', (24070, 24079), True, 'import numpy as np\n'), ((33643, 33671), 'numpy.tile', 'np.tile', (['np.nan', 'n_transects'], {}), '(np.nan, n_transects)\n', (33650, 33671), True, 'import numpy as np\n'), ((33707, 33735), 'numpy.tile', 'np.tile', (['np.nan', 'n_transects'], {}), '(np.nan, n_transects)\n', (33714, 33735), True, 'import numpy as np\n'), ((33777, 33804), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (33784, 33804), True, 'import numpy as np\n'), ((33844, 33871), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (33851, 33871), True, 'import numpy as np\n'), ((33913, 33940), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (33920, 33940), True, 'import numpy as np\n'), ((33980, 34007), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (33987, 34007), True, 'import numpy as np\n'), ((34045, 34072), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (34052, 34072), True, 'import numpy as np\n'), ((47610, 47650), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, n_filters)'], {}), '(False, (n_transects, n_filters))\n', (47617, 47650), True, 'import numpy as np\n'), ((47693, 47733), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, n_filters)'], {}), '(False, (n_transects, n_filters))\n', (47700, 47733), True, 'import numpy as np\n'), ((47774, 47814), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, n_filters)'], {}), '(False, (n_transects, n_filters))\n', (47781, 47814), True, 'import numpy as np\n'), ((47857, 47897), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, n_filters)'], {}), '(False, (n_transects, n_filters))\n', (47864, 47897), True, 'import numpy as np\n'), ((47934, 47961), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (47941, 47961), True, 'import numpy as np\n'), ((47994, 48035), 'numpy.tile', 'np.tile', (['np.nan', '(n_transects, n_filters)'], {}), '(np.nan, (n_transects, n_filters))\n', (48001, 48035), True, 'import numpy as np\n'), ((48070, 48111), 'numpy.tile', 'np.tile', (['np.nan', '(n_transects, n_filters)'], {}), '(np.nan, (n_transects, n_filters))\n', (48077, 48111), True, 'import numpy as np\n'), ((62914, 62935), 'numpy.logical_not', 'np.logical_not', (['valid'], {}), '(valid)\n', (62928, 62935), True, 'import numpy as np\n'), ((63175, 63190), 'numpy.sum', 'np.sum', (['invalid'], {}), '(invalid)\n', (63181, 63190), True, 'import numpy as np\n'), ((63322, 63346), 'numpy.append', 'np.append', (['valid_int', '(-1)'], {}), '(valid_int, -1)\n', (63331, 63346), True, 'import numpy as np\n'), ((63425, 63443), 'numpy.diff', 'np.diff', (['valid_run'], {}), '(valid_run)\n', (63432, 63443), True, 'import numpy as np\n'), ((65519, 65559), 'numpy.nancumsum', 'np.nancumsum', (['(u_processed * ens_duration)'], {}), '(u_processed * ens_duration)\n', (65531, 65559), True, 'import numpy as np\n'), ((65582, 65622), 'numpy.nancumsum', 'np.nancumsum', (['(v_processed * ens_duration)'], {}), '(v_processed * ens_duration)\n', (65594, 65622), True, 'import numpy as np\n'), ((65794, 65832), 'Classes.QComp.QComp.edge_ensembles', 'QComp.edge_ensembles', (['"""left"""', 'transect'], {}), "('left', transect)\n", (65814, 65832), False, 'from Classes.QComp import QComp\n'), ((66211, 66250), 'Classes.QComp.QComp.edge_ensembles', 'QComp.edge_ensembles', (['"""right"""', 'transect'], {}), "('right', transect)\n", (66231, 66250), False, 'from Classes.QComp import QComp\n'), ((3937, 3956), 'numpy.asarray', 'np.asarray', (['checked'], {}), '(checked)\n', (3947, 3956), True, 'import numpy as np\n'), ((11795, 11808), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (11803, 11808), True, 'import numpy as np\n'), ((37224, 37260), 'numpy.any', 'np.any', (["self.depths['q_run_warning']"], {}), "(self.depths['q_run_warning'])\n", (37230, 37260), True, 'import numpy as np\n'), ((37911, 37949), 'numpy.any', 'np.any', (["self.depths['q_total_warning']"], {}), "(self.depths['q_total_warning'])\n", (37917, 37949), True, 'import numpy as np\n'), ((38602, 38636), 'numpy.any', 'np.any', (["self.depths['all_invalid']"], {}), "(self.depths['all_invalid'])\n", (38608, 38636), True, 'import numpy as np\n'), ((40045, 40077), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, 6)'], {}), '(False, (n_transects, 6))\n', (40052, 40077), True, 'import numpy as np\n'), ((40118, 40150), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, 6)'], {}), '(False, (n_transects, 6))\n', (40125, 40150), True, 'import numpy as np\n'), ((40189, 40221), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, 6)'], {}), '(False, (n_transects, 6))\n', (40196, 40221), True, 'import numpy as np\n'), ((40262, 40294), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, 6)'], {}), '(False, (n_transects, 6))\n', (40269, 40294), True, 'import numpy as np\n'), ((40329, 40356), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (40336, 40356), True, 'import numpy as np\n'), ((40387, 40420), 'numpy.tile', 'np.tile', (['np.nan', '(n_transects, 6)'], {}), '(np.nan, (n_transects, 6))\n', (40394, 40420), True, 'import numpy as np\n'), ((40453, 40486), 'numpy.tile', 'np.tile', (['np.nan', '(n_transects, 6)'], {}), '(np.nan, (n_transects, 6))\n', (40460, 40486), True, 'import numpy as np\n'), ((53759, 53792), 'numpy.any', 'np.any', (["self.w_vel['all_invalid']"], {}), "(self.w_vel['all_invalid'])\n", (53765, 53792), True, 'import numpy as np\n'), ((54898, 54953), 'Classes.Uncertainty.Uncertainty.uncertainty_extrapolation', 'Uncertainty.uncertainty_extrapolation', (['meas', 'discharges'], {}), '(meas, discharges)\n', (54935, 54953), False, 'from Classes.Uncertainty import Uncertainty\n'), ((63059, 63099), 'numpy.nansum', 'np.nansum', (['discharge.bottom_ens[invalid]'], {}), '(discharge.bottom_ens[invalid])\n', (63068, 63099), True, 'import numpy as np\n'), ((65307, 65370), 'numpy.tile', 'np.tile', (['np.nan', 'transect.boat_vel.bt_vel.u_processed_mps.shape'], {}), '(np.nan, transect.boat_vel.bt_vel.u_processed_mps.shape)\n', (65314, 65370), True, 'import numpy as np\n'), ((65397, 65460), 'numpy.tile', 'np.tile', (['np.nan', 'transect.boat_vel.bt_vel.v_processed_mps.shape'], {}), '(np.nan, transect.boat_vel.bt_vel.v_processed_mps.shape)\n', (65404, 65460), True, 'import numpy as np\n'), ((12105, 12129), 'numpy.not_equal', 'np.not_equal', (['heading', '(0)'], {}), '(heading, 0)\n', (12117, 12129), True, 'import numpy as np\n'), ((24106, 24121), 'numpy.nanmax', 'np.nanmax', (['temp'], {}), '(temp)\n', (24115, 24121), True, 'import numpy as np\n'), ((24124, 24139), 'numpy.nanmin', 'np.nanmin', (['temp'], {}), '(temp)\n', (24133, 24139), True, 'import numpy as np\n'), ((35654, 35701), 'numpy.abs', 'np.abs', (['(q_total / meas.discharge[n].total * 100)'], {}), '(q_total / meas.discharge[n].total * 100)\n', (35660, 35701), True, 'import numpy as np\n'), ((35740, 35789), 'numpy.abs', 'np.abs', (['(q_max_run / meas.discharge[n].total * 100)'], {}), '(q_max_run / meas.discharge[n].total * 100)\n', (35746, 35789), True, 'import numpy as np\n'), ((36551, 36570), 'numpy.round', 'np.round', (['drafts', '(3)'], {}), '(drafts, 3)\n', (36559, 36570), True, 'import numpy as np\n'), ((36915, 36941), 'numpy.less', 'np.less', (['draft_check', '(0.01)'], {}), '(draft_check, 0.01)\n', (36922, 36941), True, 'import numpy as np\n'), ((37541, 37577), 'numpy.any', 'np.any', (["self.depths['q_run_caution']"], {}), "(self.depths['q_run_caution'])\n", (37547, 37577), True, 'import numpy as np\n'), ((38234, 38272), 'numpy.any', 'np.any', (["self.depths['q_total_caution']"], {}), "(self.depths['q_total_caution'])\n", (38240, 38272), True, 'import numpy as np\n'), ((51729, 51783), 'numpy.any', 'np.any', (["self.w_vel['q_max_run_warning'][:, filter_idx]"], {}), "(self.w_vel['q_max_run_warning'][:, filter_idx])\n", (51735, 51783), True, 'import numpy as np\n'), ((52743, 52795), 'numpy.any', 'np.any', (["self.w_vel['q_total_warning'][:, filter_idx]"], {}), "(self.w_vel['q_total_warning'][:, filter_idx])\n", (52749, 52795), True, 'import numpy as np\n'), ((54970, 54996), 'numpy.abs', 'np.abs', (['extrap_uncertainty'], {}), '(extrap_uncertainty)\n', (54976, 54996), True, 'import numpy as np\n'), ((57005, 57027), 'numpy.abs', 'np.abs', (['left_q_percent'], {}), '(left_q_percent)\n', (57011, 57027), True, 'import numpy as np\n'), ((58929, 58956), 'numpy.nanmean', 'np.nanmean', (['edge_dist_right'], {}), '(edge_dist_right)\n', (58939, 58956), True, 'import numpy as np\n'), ((58991, 59038), 'numpy.nanmin', 'np.nanmin', (['[dmg_5_percent, avg_right_edge_dist]'], {}), '([dmg_5_percent, avg_right_edge_dist])\n', (59000, 59038), True, 'import numpy as np\n'), ((59159, 59201), 'numpy.any', 'np.any', (["self.edges['right_dist_moved_idx']"], {}), "(self.edges['right_dist_moved_idx'])\n", (59165, 59201), True, 'import numpy as np\n'), ((59412, 59438), 'numpy.nanmean', 'np.nanmean', (['edge_dist_left'], {}), '(edge_dist_left)\n', (59422, 59438), True, 'import numpy as np\n'), ((59472, 59518), 'numpy.nanmin', 'np.nanmin', (['[dmg_5_percent, avg_left_edge_dist]'], {}), '([dmg_5_percent, avg_left_edge_dist])\n', (59481, 59518), True, 'import numpy as np\n'), ((59636, 59677), 'numpy.any', 'np.any', (["self.edges['left_dist_moved_idx']"], {}), "(self.edges['left_dist_moved_idx'])\n", (59642, 59677), True, 'import numpy as np\n'), ((62962, 63002), 'numpy.nansum', 'np.nansum', (['discharge.middle_ens[invalid]'], {}), '(discharge.middle_ens[invalid])\n', (62971, 63002), True, 'import numpy as np\n'), ((63005, 63042), 'numpy.nansum', 'np.nansum', (['discharge.top_ens[invalid]'], {}), '(discharge.top_ens[invalid])\n', (63014, 63042), True, 'import numpy as np\n'), ((64254, 64275), 'numpy.abs', 'np.abs', (['q_invalid_run'], {}), '(q_invalid_run)\n', (64260, 64275), True, 'import numpy as np\n'), ((6876, 6929), 'Classes.Uncertainty.Uncertainty.uncertainty_q_random', 'Uncertainty.uncertainty_q_random', (['discharges', '"""total"""'], {}), "(discharges, 'total')\n", (6908, 6929), False, 'from Classes.Uncertainty import Uncertainty\n'), ((19248, 19265), 'numpy.unique', 'np.unique', (['magvar'], {}), '(magvar)\n', (19257, 19265), True, 'import numpy as np\n'), ((19998, 20020), 'numpy.asarray', 'np.asarray', (['pitch_mean'], {}), '(pitch_mean)\n', (20008, 20020), True, 'import numpy as np\n'), ((20525, 20546), 'numpy.asarray', 'np.asarray', (['roll_mean'], {}), '(roll_mean)\n', (20535, 20546), True, 'import numpy as np\n'), ((21061, 21082), 'numpy.asarray', 'np.asarray', (['pitch_std'], {}), '(pitch_std)\n', (21071, 21082), True, 'import numpy as np\n'), ((21384, 21404), 'numpy.asarray', 'np.asarray', (['roll_std'], {}), '(roll_std)\n', (21394, 21404), True, 'import numpy as np\n'), ((23980, 24017), 'numpy.hstack', 'np.hstack', (['(temp, temp_selected.data)'], {}), '((temp, temp_selected.data))\n', (23989, 24017), True, 'import numpy as np\n'), ((25450, 25490), 'numpy.abs', 'np.abs', (["(user - meas.ext_temp_chk['adcp'])"], {}), "(user - meas.ext_temp_chk['adcp'])\n", (25456, 25490), True, 'import numpy as np\n'), ((35219, 35238), 'numpy.any', 'np.any', (['depth_valid'], {}), '(depth_valid)\n', (35225, 35238), True, 'import numpy as np\n'), ((52186, 52240), 'numpy.any', 'np.any', (["self.w_vel['q_max_run_caution'][:, filter_idx]"], {}), "(self.w_vel['q_max_run_caution'][:, filter_idx])\n", (52192, 52240), True, 'import numpy as np\n'), ((53202, 53254), 'numpy.any', 'np.any', (["self.w_vel['q_total_caution'][:, filter_idx]"], {}), "(self.w_vel['q_total_caution'][:, filter_idx])\n", (53208, 53254), True, 'import numpy as np\n'), ((56942, 56960), 'numpy.nanmean', 'np.nanmean', (['left_q'], {}), '(left_q)\n', (56952, 56960), True, 'import numpy as np\n'), ((56963, 56982), 'numpy.nanmean', 'np.nanmean', (['total_q'], {}), '(total_q)\n', (56973, 56982), True, 'import numpy as np\n'), ((57408, 57431), 'numpy.abs', 'np.abs', (['right_q_percent'], {}), '(right_q_percent)\n', (57414, 57431), True, 'import numpy as np\n'), ((58864, 58890), 'numpy.nanmean', 'np.nanmean', (['dist_made_good'], {}), '(dist_made_good)\n', (58874, 58890), True, 'import numpy as np\n'), ((59092, 59136), 'numpy.where', 'np.where', (['(dist_moved_right > right_threshold)'], {}), '(dist_moved_right > right_threshold)\n', (59100, 59136), True, 'import numpy as np\n'), ((59571, 59613), 'numpy.where', 'np.where', (['(dist_moved_left > left_threshold)'], {}), '(dist_moved_left > left_threshold)\n', (59579, 59613), True, 'import numpy as np\n'), ((60793, 60814), 'numpy.where', 'np.where', (['(left_q == 0)'], {}), '(left_q == 0)\n', (60801, 60814), True, 'import numpy as np\n'), ((61123, 61145), 'numpy.where', 'np.where', (['(right_q == 0)'], {}), '(right_q == 0)\n', (61131, 61145), True, 'import numpy as np\n'), ((63376, 63394), 'numpy.diff', 'np.diff', (['valid_int'], {}), '(valid_int)\n', (63383, 63394), True, 'import numpy as np\n'), ((4861, 4912), 'numpy.where', 'np.where', (['(transect.date_time.ens_duration_sec > 1.5)'], {}), '(transect.date_time.ens_duration_sec > 1.5)\n', (4869, 4912), True, 'import numpy as np\n'), ((7471, 7492), 'numpy.unique', 'np.unique', (['q_positive'], {}), '(q_positive)\n', (7480, 7492), True, 'import numpy as np\n'), ((16762, 16800), 'numpy.nanmean', 'np.nanmean', (['pitch_source_selected.data'], {}), '(pitch_source_selected.data)\n', (16772, 16800), True, 'import numpy as np\n'), ((16839, 16876), 'numpy.nanstd', 'np.nanstd', (['pitch_source_selected.data'], {}), '(pitch_source_selected.data)\n', (16848, 16876), True, 'import numpy as np\n'), ((16915, 16952), 'numpy.nanmean', 'np.nanmean', (['roll_source_selected.data'], {}), '(roll_source_selected.data)\n', (16925, 16952), True, 'import numpy as np\n'), ((16990, 17026), 'numpy.nanstd', 'np.nanstd', (['roll_source_selected.data'], {}), '(roll_source_selected.data)\n', (16999, 17026), True, 'import numpy as np\n'), ((20218, 20240), 'numpy.asarray', 'np.asarray', (['pitch_mean'], {}), '(pitch_mean)\n', (20228, 20240), True, 'import numpy as np\n'), ((20742, 20763), 'numpy.asarray', 'np.asarray', (['roll_mean'], {}), '(roll_mean)\n', (20752, 20763), True, 'import numpy as np\n'), ((28665, 28686), 'numpy.unique', 'np.unique', (['file_names'], {}), '(file_names)\n', (28674, 28686), True, 'import numpy as np\n'), ((32092, 32107), 'numpy.unique', 'np.unique', (['loop'], {}), '(loop)\n', (32101, 32107), True, 'import numpy as np\n'), ((34875, 34906), 'numpy.vstack', 'np.vstack', (['(depth_na, depth_in)'], {}), '((depth_na, depth_in))\n', (34884, 34906), True, 'import numpy as np\n'), ((35150, 35190), 'numpy.vstack', 'np.vstack', (['(depth_nan, depth_valid_temp)'], {}), '((depth_nan, depth_valid_temp))\n', (35159, 35190), True, 'import numpy as np\n'), ((48763, 48833), 'numpy.any', 'np.any', (['transect.w_vel.valid_data[1, :, transect.in_transect_idx].T', '(0)'], {}), '(transect.w_vel.valid_data[1, :, transect.in_transect_idx].T, 0)\n', (48769, 48833), True, 'import numpy as np\n'), ((48981, 49060), 'numpy.any', 'np.any', (['transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T', '(0)'], {}), '(transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T, 0)\n', (48987, 49060), True, 'import numpy as np\n'), ((50042, 50089), 'numpy.abs', 'np.abs', (['(q_total / meas.discharge[n].total * 100)'], {}), '(q_total / meas.discharge[n].total * 100)\n', (50048, 50089), True, 'import numpy as np\n'), ((50136, 50185), 'numpy.abs', 'np.abs', (['(q_max_run / meas.discharge[n].total * 100)'], {}), '(q_max_run / meas.discharge[n].total * 100)\n', (50142, 50185), True, 'import numpy as np\n'), ((57340, 57359), 'numpy.nanmean', 'np.nanmean', (['right_q'], {}), '(right_q)\n', (57350, 57359), True, 'import numpy as np\n'), ((57362, 57381), 'numpy.nanmean', 'np.nanmean', (['total_q'], {}), '(total_q)\n', (57372, 57381), True, 'import numpy as np\n'), ((60070, 60118), 'numpy.nansum', 'np.nansum', (['transect.w_vel.valid_data[6, :, :]', '(0)'], {}), '(transect.w_vel.valid_data[6, :, :], 0)\n', (60079, 60118), True, 'import numpy as np\n'), ((60160, 60203), 'numpy.nansum', 'np.nansum', (['transect.w_vel.cells_above_sl', '(0)'], {}), '(transect.w_vel.cells_above_sl, 0)\n', (60169, 60203), True, 'import numpy as np\n'), ((60248, 60299), 'numpy.not_equal', 'np.not_equal', (['ens_sum_excluded_data', 'cells_above_sl'], {}), '(ens_sum_excluded_data, cells_above_sl)\n', (60260, 60299), True, 'import numpy as np\n'), ((61491, 61511), 'numpy.unique', 'np.unique', (['left_type'], {}), '(left_type)\n', (61500, 61511), True, 'import numpy as np\n'), ((61791, 61812), 'numpy.unique', 'np.unique', (['right_type'], {}), '(right_type)\n', (61800, 61812), True, 'import numpy as np\n'), ((64097, 64147), 'numpy.nansum', 'np.nansum', (['discharge.bottom_ens[idx_start:idx_end]'], {}), '(discharge.bottom_ens[idx_start:idx_end])\n', (64106, 64147), True, 'import numpy as np\n'), ((5014, 5060), 'numpy.nansum', 'np.nansum', (['transect.date_time.ens_duration_sec'], {}), '(transect.date_time.ens_duration_sec)\n', (5023, 5060), True, 'import numpy as np\n'), ((25902, 25918), 'numpy.nanmean', 'np.nanmean', (['temp'], {}), '(temp)\n', (25912, 25918), True, 'import numpy as np\n'), ((41893, 41940), 'numpy.abs', 'np.abs', (['(q_total / meas.discharge[n].total * 100)'], {}), '(q_total / meas.discharge[n].total * 100)\n', (41899, 41940), True, 'import numpy as np\n'), ((41991, 42040), 'numpy.abs', 'np.abs', (['(q_max_run / meas.discharge[n].total * 100)'], {}), '(q_max_run / meas.discharge[n].total * 100)\n', (41997, 42040), True, 'import numpy as np\n'), ((57969, 57990), 'numpy.unique', 'np.unique', (['q_positive'], {}), '(q_positive)\n', (57978, 57990), True, 'import numpy as np\n'), ((58520, 58541), 'numpy.unique', 'np.unique', (['q_positive'], {}), '(q_positive)\n', (58529, 58541), True, 'import numpy as np\n'), ((63920, 63970), 'numpy.nansum', 'np.nansum', (['discharge.middle_ens[idx_start:idx_end]'], {}), '(discharge.middle_ens[idx_start:idx_end])\n', (63929, 63970), True, 'import numpy as np\n'), ((64010, 64057), 'numpy.nansum', 'np.nansum', (['discharge.top_ens[idx_start:idx_end]'], {}), '(discharge.top_ens[idx_start:idx_end])\n', (64019, 64057), True, 'import numpy as np\n'), ((5116, 5175), 'numpy.nansum', 'np.nansum', (['transect.date_time.ens_duration_sec[idx_missing]'], {}), '(transect.date_time.ens_duration_sec[idx_missing])\n', (5125, 5175), True, 'import numpy as np\n'), ((5698, 5743), 'numpy.isnan', 'np.isnan', (['transect.date_time.ens_duration_sec'], {}), '(transect.date_time.ens_duration_sec)\n', (5706, 5743), True, 'import numpy as np\n'), ((17704, 17765), 'numpy.where', 'np.where', (['(pitch_data > heading_source_selected.pitch_limit[0])'], {}), '(pitch_data > heading_source_selected.pitch_limit[0])\n', (17712, 17765), True, 'import numpy as np\n'), ((17807, 17868), 'numpy.where', 'np.where', (['(pitch_data < heading_source_selected.pitch_limit[1])'], {}), '(pitch_data < heading_source_selected.pitch_limit[1])\n', (17815, 17868), True, 'import numpy as np\n'), ((18464, 18524), 'numpy.where', 'np.where', (['(roll_data > heading_source_selected.pitch_limit[0])'], {}), '(roll_data > heading_source_selected.pitch_limit[0])\n', (18472, 18524), True, 'import numpy as np\n'), ((18566, 18626), 'numpy.where', 'np.where', (['(roll_data < heading_source_selected.pitch_limit[1])'], {}), '(roll_data < heading_source_selected.pitch_limit[1])\n', (18574, 18626), True, 'import numpy as np\n'), ((18965, 19012), 'numpy.where', 'np.where', (['(heading_source_selected.mag_error > 2)'], {}), '(heading_source_selected.mag_error > 2)\n', (18973, 19012), True, 'import numpy as np\n'), ((29099, 29121), 'numpy.array', 'np.array', (['test_quality'], {}), '(test_quality)\n', (29107, 29121), True, 'import numpy as np\n'), ((29739, 29761), 'numpy.array', 'np.array', (['mb_test_type'], {}), '(mb_test_type)\n', (29747, 29761), True, 'import numpy as np\n'), ((43257, 43358), 'numpy.nanmean', 'np.nanmean', (['((transect.boat_vel.vtg_vel.u_mps ** 2 + transect.boat_vel.vtg_vel.v_mps **\n 2) ** 0.5)'], {}), '((transect.boat_vel.vtg_vel.u_mps ** 2 + transect.boat_vel.\n vtg_vel.v_mps ** 2) ** 0.5)\n', (43267, 43358), True, 'import numpy as np\n'), ((5341, 5400), 'numpy.nansum', 'np.nansum', (['transect.date_time.ens_duration_sec[idx_missing]'], {}), '(transect.date_time.ens_duration_sec[idx_missing])\n', (5350, 5400), True, 'import numpy as np\n'), ((9798, 9841), 'numpy.greater', 'np.greater', (['corr_table[3:, :]', 'qa_threshold'], {}), '(corr_table[3:, :], qa_threshold)\n', (9808, 9841), True, 'import numpy as np\n'), ((9971, 10024), 'numpy.greater', 'np.greater', (['corr_table[7, :]', '(corr_table[0, :] * 0.25)'], {}), '(corr_table[7, :], corr_table[0, :] * 0.25)\n', (9981, 10024), True, 'import numpy as np\n'), ((30767, 30789), 'numpy.array', 'np.array', (['test_quality'], {}), '(test_quality)\n', (30775, 30789), True, 'import numpy as np\n'), ((42152, 42165), 'numpy.any', 'np.any', (['valid'], {}), '(valid)\n', (42158, 42165), True, 'import numpy as np\n'), ((51354, 51375), 'numpy.logical_not', 'np.logical_not', (['valid'], {}), '(valid)\n', (51368, 51375), True, 'import numpy as np\n'), ((31241, 31263), 'numpy.array', 'np.array', (['test_quality'], {}), '(test_quality)\n', (31249, 31263), True, 'import numpy as np\n'), ((51161, 51188), 'numpy.logical_not', 'np.logical_not', (['valid_cells'], {}), '(valid_cells)\n', (51175, 51188), True, 'import numpy as np\n'), ((51259, 51280), 'numpy.logical_not', 'np.logical_not', (['valid'], {}), '(valid)\n', (51273, 51280), True, 'import numpy as np\n'), ((10190, 10209), 'numpy.sum', 'np.sum', (['lag_7_check'], {}), '(lag_7_check)\n', (10196, 10209), True, 'import numpy as np\n'), ((10165, 10186), 'numpy.sum', 'np.sum', (['all_lag_check'], {}), '(all_lag_check)\n', (10171, 10186), True, 'import numpy as np\n')]
|
"""Add CheckConstraints for non-nullable string cols
Revision ID: b7fc1ab24c92
Revises: <PASSWORD>
Create Date: 2020-07-10 13:24:56.007611
"""
from datetime import datetime
from uuid import uuid4
from alembic import op
from geoalchemy2 import Geometry
from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text
from sqlalchemy.dialects.sqlite import REAL, TIMESTAMP
from sqlalchemy.orm import declarative_base, deferred
from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint
from pepys_import.core.store import constants
from pepys_import.core.store.common_db import (
CommentMixin,
ElevationPropertyMixin,
LocationPropertyMixin,
LogMixin,
MediaMixin,
ReferenceRepr,
SynonymMixin,
)
from pepys_import.core.store.db_base import sqlite_naming_convention
from pepys_import.core.store.db_status import TableTypes
from pepys_import.utils.sqlalchemy_utils import UUIDType
# revision identifiers, used by Alembic.
revision = "b7fc1ab24c92"
down_revision = "d5d740c76aa3"
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("Tasks", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Tasks_name", condition="name <> ''")
with op.batch_alter_table("Synonyms", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Synonyms_table", condition="\"table\" <> ''")
batch_op.create_check_constraint("ck_Synonyms_synonym", condition="synonym <> ''")
with op.batch_alter_table("Changes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Changes_user", condition="user <> ''")
batch_op.create_check_constraint("ck_Changes_reason", condition="reason <> ''")
with op.batch_alter_table("Logs", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Logs_table", condition="\"table\" <> ''")
with op.batch_alter_table("PlatformTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_PlatformTypes_name", condition="name <> ''")
with op.batch_alter_table("Nationalities", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Nationalities_name", condition="name <> ''")
with op.batch_alter_table("GeometryTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_GeometryTypes_name", condition="name <> ''")
with op.batch_alter_table("GeometrySubTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_GeometrySubTypes_name", condition="name <> ''")
with op.batch_alter_table("Users", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Users_name", condition="name <> ''")
with op.batch_alter_table("UnitTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_UnitTypes_name", condition="name <> ''")
with op.batch_alter_table("ClassificationTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_ClassificationTypes_name", condition="name <> ''")
with op.batch_alter_table("ContactTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_ContactTypes_name", condition="name <> ''")
with op.batch_alter_table("SensorTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_SensorTypes_name", condition="name <> ''")
with op.batch_alter_table("Privacies", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Privacies_name", condition="name <> ''")
with op.batch_alter_table("DatafileTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_DatafileTypes_name", condition="name <> ''")
with op.batch_alter_table("MediaTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_MediaTypes_name", condition="name <> ''")
with op.batch_alter_table("CommentTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_CommentTypes_name", condition="name <> ''")
with op.batch_alter_table("CommodityTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_CommodityTypes_name", condition="name <> ''")
with op.batch_alter_table("ConfidenceLevels", schema=None) as batch_op:
batch_op.create_check_constraint("ck_ConfidenceLevels_name", condition="name <> ''")
with op.batch_alter_table("Comments", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Comments_content", condition="content <> ''")
with op.batch_alter_table("Media", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Media_url", condition="url <> ''")
def downgrade():
Metadata = MetaData(naming_convention=sqlite_naming_convention)
BaseSpatiaLite = declarative_base(metadata=Metadata)
class Task(BaseSpatiaLite):
__tablename__ = constants.TASK
table_type = TableTypes.METADATA
table_type_id = 4
task_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False)
parent_id = Column(
UUIDType, ForeignKey("Tasks.task_id", onupdate="cascade"), nullable=False
)
start = Column(TIMESTAMP, nullable=False)
end = Column(TIMESTAMP, nullable=False)
environment = deferred(Column(String(150)))
location = deferred(Column(String(150)))
privacy_id = Column(
UUIDType, ForeignKey("Privacies.privacy_id", onupdate="cascade"), nullable=False
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_Tasks_name"),)
class Synonym(BaseSpatiaLite, SynonymMixin):
__tablename__ = constants.SYNONYM
table_type = TableTypes.METADATA
table_type_id = 7
synonym_id = Column(UUIDType, primary_key=True, default=uuid4)
table = Column(String(150), nullable=False)
entity = Column(UUIDType, nullable=False)
synonym = Column(String(150), nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (
CheckConstraint("\"table\" <> ''", name="ck_Synonyms_table"),
CheckConstraint("synonym <> ''", name="ck_Synonyms_synonym"),
)
class Change(BaseSpatiaLite):
__tablename__ = constants.CHANGE
table_type = TableTypes.METADATA
table_type_id = 8
change_id = Column(UUIDType, primary_key=True, default=uuid4)
user = Column(String(150), nullable=False)
modified = Column(DATE, nullable=False)
reason = Column(String(500), nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (
CheckConstraint("user <> ''", name="ck_Changes_user"),
CheckConstraint("reason <> ''", name="ck_Changes_reason"),
)
class Log(BaseSpatiaLite, LogMixin):
__tablename__ = constants.LOG
table_type = TableTypes.METADATA
table_type_id = 9
log_id = Column(UUIDType, primary_key=True, default=uuid4)
table = Column(String(150), nullable=False)
id = Column(UUIDType, nullable=False)
field = Column(String(150))
new_value = Column(String(150))
change_id = Column(
UUIDType, ForeignKey("Changes.change_id", onupdate="cascade"), nullable=False
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("\"table\" <> ''", name="ck_Logs_table"),)
class PlatformType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.PLATFORM_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 13
platform_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_PlatformTypes_name"),)
class Nationality(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.NATIONALITY
table_type = TableTypes.REFERENCE
table_type_id = 14
nationality_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
priority = Column(Integer)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_Nationalities_name"),)
class GeometryType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.GEOMETRY_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 15
geo_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_GeometryTypes_name"),)
class GeometrySubType(BaseSpatiaLite):
__tablename__ = constants.GEOMETRY_SUBTYPE
table_type = TableTypes.REFERENCE
table_type_id = 16
geo_sub_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False)
parent = Column(
UUIDType, ForeignKey("GeometryTypes.geo_type_id", onupdate="cascade"), nullable=False
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (
UniqueConstraint("name", "parent", name="uq_GeometrySubTypes_name_parent"),
CheckConstraint("name <> ''", name="ck_GeometrySubTypes_name"),
)
class User(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.USER
table_type = TableTypes.REFERENCE
table_type_id = 17
user_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_Users_name"),)
class UnitType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.UNIT_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 18
unit_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_UnitTypes_name"),)
class ClassificationType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.CLASSIFICATION_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 19
class_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_ClassificationTypes_name"),)
class ContactType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.CONTACT_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 20
contact_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_ContactTypes_name"),)
class SensorType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.SENSOR_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 21
sensor_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_SensorTypes_name"),)
class Privacy(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.PRIVACY
table_type = TableTypes.REFERENCE
table_type_id = 22
privacy_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
level = Column(Integer, nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_Privacies_name"),)
class DatafileType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.DATAFILE_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 23
datafile_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_DatafileTypes_name"),)
class MediaType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.MEDIA_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 24
media_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_MediaTypes_name"),)
class CommentType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.COMMENT_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 25
comment_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_CommentTypes_name"),)
class CommodityType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.COMMODITY_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 26
commodity_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_CommodityTypes_name"),)
class ConfidenceLevel(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.CONFIDENCE_LEVEL
table_type = TableTypes.REFERENCE
table_type_id = 27
confidence_level_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_ConfidenceLevels_name"),)
class Comment(BaseSpatiaLite, CommentMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sensor_name = "N/A"
__tablename__ = constants.COMMENT
table_type = TableTypes.MEASUREMENT
table_type_id = 32
comment_id = Column(UUIDType, primary_key=True, default=uuid4)
platform_id = Column(UUIDType, ForeignKey("Platforms.platform_id", onupdate="cascade"))
time = Column(TIMESTAMP, nullable=False)
comment_type_id = Column(
UUIDType, ForeignKey("CommentTypes.comment_type_id", onupdate="cascade"), nullable=False
)
content = Column(Text, nullable=False)
source_id = Column(
UUIDType, ForeignKey("Datafiles.datafile_id", onupdate="cascade"), nullable=False
)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id", onupdate="cascade"))
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("content <> ''", name="ck_Comments_content"),)
class Media(BaseSpatiaLite, MediaMixin, ElevationPropertyMixin, LocationPropertyMixin):
__tablename__ = constants.MEDIA
table_type = TableTypes.MEASUREMENT
table_type_id = 34
media_id = Column(UUIDType, primary_key=True, default=uuid4)
platform_id = Column(UUIDType, ForeignKey("Platforms.platform_id", onupdate="cascade"))
subject_id = Column(UUIDType, ForeignKey("Platforms.platform_id", onupdate="cascade"))
sensor_id = Column(UUIDType, ForeignKey("Sensors.sensor_id", onupdate="cascade"))
_location = deferred(
Column(
"location",
Geometry(geometry_type="POINT", srid=4326, management=True, spatial_index=False),
)
)
_elevation = deferred(Column("elevation", REAL))
time = Column(TIMESTAMP)
media_type_id = Column(
UUIDType, ForeignKey("MediaTypes.media_type_id", onupdate="cascade"), nullable=False
)
url = deferred(Column(String(150), nullable=False))
source_id = Column(
UUIDType, ForeignKey("Datafiles.datafile_id", onupdate="cascade"), nullable=False
)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id", onupdate="cascade"))
remarks = Column(Text)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("url <> ''", name="ck_Media_url"),)
with op.batch_alter_table("Tasks", schema=None, copy_from=Task.__table__) as batch_op:
batch_op.drop_constraint("ck_Tasks_name", type_="check")
with op.batch_alter_table("Synonyms", schema=None, copy_from=Synonym.__table__) as batch_op:
batch_op.drop_constraint("ck_Synonyms_table", type_="check")
batch_op.drop_constraint("ck_Synonyms_synonym", type_="check")
with op.batch_alter_table("Changes", schema=None, copy_from=Change.__table__) as batch_op:
batch_op.drop_constraint("ck_Changes_user", type_="check")
batch_op.drop_constraint("ck_Changes_reason", type_="check")
with op.batch_alter_table("Logs", schema=None, copy_from=Log.__table__) as batch_op:
batch_op.drop_constraint("ck_Logs_table", type_="check")
with op.batch_alter_table(
"PlatformTypes", schema=None, copy_from=PlatformType.__table__
) as batch_op:
batch_op.drop_constraint("ck_PlatformTypes_name", type_="check")
with op.batch_alter_table(
"Nationalities", schema=None, copy_from=Nationality.__table__
) as batch_op:
batch_op.drop_constraint("ck_Nationalities_name", type_="check")
with op.batch_alter_table(
"GeometryTypes", schema=None, copy_from=GeometryType.__table__
) as batch_op:
batch_op.drop_constraint("ck_GeometryTypes_name", type_="check")
with op.batch_alter_table(
"GeometrySubTypes", schema=None, copy_from=GeometrySubType.__table__
) as batch_op:
batch_op.drop_constraint("ck_GeometrySubTypes_name", type_="check")
with op.batch_alter_table("Users", schema=None, copy_from=User.__table__) as batch_op:
batch_op.drop_constraint("ck_Users_name", type_="check")
with op.batch_alter_table("UnitTypes", schema=None, copy_from=UnitType.__table__) as batch_op:
batch_op.drop_constraint("ck_UnitTypes_name", type_="check")
with op.batch_alter_table(
"ClassificationTypes", schema=None, copy_from=ClassificationType.__table__
) as batch_op:
batch_op.drop_constraint("ck_ClassificationTypes_name", type_="check")
with op.batch_alter_table(
"ContactTypes", schema=None, copy_from=ContactType.__table__
) as batch_op:
batch_op.drop_constraint("ck_ContactTypes_name", type_="check")
with op.batch_alter_table(
"SensorTypes", schema=None, copy_from=SensorType.__table__
) as batch_op:
batch_op.drop_constraint("ck_SensorTypes_name", type_="check")
with op.batch_alter_table("Privacies", schema=None, copy_from=Privacy.__table__) as batch_op:
batch_op.drop_constraint("ck_Privacies_name", type_="check")
with op.batch_alter_table(
"DatafileTypes", schema=None, copy_from=DatafileType.__table__
) as batch_op:
batch_op.drop_constraint("ck_DatafileTypes_name", type_="check")
with op.batch_alter_table("MediaTypes", schema=None, copy_from=MediaType.__table__) as batch_op:
batch_op.drop_constraint("ck_MediaTypes_name", type_="check")
with op.batch_alter_table(
"CommentTypes", schema=None, copy_from=CommentType.__table__
) as batch_op:
batch_op.drop_constraint("ck_CommentTypes_name", type_="check")
with op.batch_alter_table(
"CommodityTypes", schema=None, copy_from=CommodityType.__table__
) as batch_op:
batch_op.drop_constraint("ck_CommodityTypes_name", type_="check")
with op.batch_alter_table(
"ConfidenceLevels", schema=None, copy_from=ConfidenceLevel.__table__
) as batch_op:
batch_op.drop_constraint("ck_ConfidenceLevels_name", type_="check")
with op.batch_alter_table("Comments", schema=None, copy_from=Comment.__table__) as batch_op:
batch_op.drop_constraint("ck_Comments_content", type_="check")
with op.batch_alter_table("Media", schema=None, copy_from=Media.__table__) as batch_op:
batch_op.drop_constraint("ck_Media_url", type_="check")
|
[
"sqlalchemy.MetaData",
"sqlalchemy.orm.declarative_base",
"sqlalchemy.sql.schema.UniqueConstraint",
"sqlalchemy.ForeignKey",
"geoalchemy2.Geometry",
"sqlalchemy.Column",
"sqlalchemy.String",
"sqlalchemy.sql.schema.CheckConstraint",
"alembic.op.batch_alter_table"
] |
[((4668, 4720), 'sqlalchemy.MetaData', 'MetaData', ([], {'naming_convention': 'sqlite_naming_convention'}), '(naming_convention=sqlite_naming_convention)\n', (4676, 4720), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((4742, 4777), 'sqlalchemy.orm.declarative_base', 'declarative_base', ([], {'metadata': 'Metadata'}), '(metadata=Metadata)\n', (4758, 4777), False, 'from sqlalchemy.orm import declarative_base, deferred\n'), ((1107, 1149), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Tasks"""'], {'schema': 'None'}), "('Tasks', schema=None)\n", (1127, 1149), False, 'from alembic import op\n'), ((1255, 1300), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Synonyms"""'], {'schema': 'None'}), "('Synonyms', schema=None)\n", (1275, 1300), False, 'from alembic import op\n'), ((1506, 1550), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Changes"""'], {'schema': 'None'}), "('Changes', schema=None)\n", (1526, 1550), False, 'from alembic import op\n'), ((1746, 1787), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Logs"""'], {'schema': 'None'}), "('Logs', schema=None)\n", (1766, 1787), False, 'from alembic import op\n'), ((1898, 1948), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""PlatformTypes"""'], {'schema': 'None'}), "('PlatformTypes', schema=None)\n", (1918, 1948), False, 'from alembic import op\n'), ((2062, 2112), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Nationalities"""'], {'schema': 'None'}), "('Nationalities', schema=None)\n", (2082, 2112), False, 'from alembic import op\n'), ((2226, 2276), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""GeometryTypes"""'], {'schema': 'None'}), "('GeometryTypes', schema=None)\n", (2246, 2276), False, 'from alembic import op\n'), ((2390, 2443), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""GeometrySubTypes"""'], {'schema': 'None'}), "('GeometrySubTypes', schema=None)\n", (2410, 2443), False, 'from alembic import op\n'), ((2560, 2602), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Users"""'], {'schema': 'None'}), "('Users', schema=None)\n", (2580, 2602), False, 'from alembic import op\n'), ((2708, 2754), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""UnitTypes"""'], {'schema': 'None'}), "('UnitTypes', schema=None)\n", (2728, 2754), False, 'from alembic import op\n'), ((2864, 2920), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""ClassificationTypes"""'], {'schema': 'None'}), "('ClassificationTypes', schema=None)\n", (2884, 2920), False, 'from alembic import op\n'), ((3040, 3089), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""ContactTypes"""'], {'schema': 'None'}), "('ContactTypes', schema=None)\n", (3060, 3089), False, 'from alembic import op\n'), ((3202, 3250), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""SensorTypes"""'], {'schema': 'None'}), "('SensorTypes', schema=None)\n", (3222, 3250), False, 'from alembic import op\n'), ((3362, 3408), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Privacies"""'], {'schema': 'None'}), "('Privacies', schema=None)\n", (3382, 3408), False, 'from alembic import op\n'), ((3518, 3568), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""DatafileTypes"""'], {'schema': 'None'}), "('DatafileTypes', schema=None)\n", (3538, 3568), False, 'from alembic import op\n'), ((3682, 3729), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""MediaTypes"""'], {'schema': 'None'}), "('MediaTypes', schema=None)\n", (3702, 3729), False, 'from alembic import op\n'), ((3840, 3889), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""CommentTypes"""'], {'schema': 'None'}), "('CommentTypes', schema=None)\n", (3860, 3889), False, 'from alembic import op\n'), ((4002, 4053), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""CommodityTypes"""'], {'schema': 'None'}), "('CommodityTypes', schema=None)\n", (4022, 4053), False, 'from alembic import op\n'), ((4168, 4221), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""ConfidenceLevels"""'], {'schema': 'None'}), "('ConfidenceLevels', schema=None)\n", (4188, 4221), False, 'from alembic import op\n'), ((4338, 4383), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Comments"""'], {'schema': 'None'}), "('Comments', schema=None)\n", (4358, 4383), False, 'from alembic import op\n'), ((4498, 4540), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Media"""'], {'schema': 'None'}), "('Media', schema=None)\n", (4518, 4540), False, 'from alembic import op\n'), ((4936, 4985), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (4942, 4985), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((5177, 5210), 'sqlalchemy.Column', 'Column', (['TIMESTAMP'], {'nullable': '(False)'}), '(TIMESTAMP, nullable=False)\n', (5183, 5210), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((5225, 5258), 'sqlalchemy.Column', 'Column', (['TIMESTAMP'], {'nullable': '(False)'}), '(TIMESTAMP, nullable=False)\n', (5231, 5258), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((5515, 5556), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (5521, 5556), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((5819, 5868), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (5825, 5868), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((5938, 5970), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'nullable': '(False)'}), '(UUIDType, nullable=False)\n', (5944, 5970), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((6048, 6089), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (6054, 6089), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((6440, 6489), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (6446, 6489), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((6560, 6588), 'sqlalchemy.Column', 'Column', (['DATE'], {'nullable': '(False)'}), '(DATE, nullable=False)\n', (6566, 6588), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((6665, 6706), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (6671, 6706), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((7048, 7097), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (7054, 7097), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((7163, 7195), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'nullable': '(False)'}), '(UUIDType, nullable=False)\n', (7169, 7195), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((7423, 7464), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (7429, 7464), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((7752, 7801), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (7758, 7801), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((7936, 7977), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (7942, 7977), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((8263, 8312), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (8269, 8312), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((8443, 8458), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (8449, 8458), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((8482, 8523), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (8488, 8523), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((8809, 8858), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (8815, 8858), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((8993, 9034), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (8999, 9034), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((9315, 9364), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (9321, 9364), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((9572, 9613), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (9578, 9613), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((9991, 10040), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (9997, 10040), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((10175, 10216), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (10181, 10216), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((10487, 10536), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (10493, 10536), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((10671, 10712), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (10677, 10712), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((11008, 11057), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (11014, 11057), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((11192, 11233), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (11198, 11233), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((11527, 11576), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (11533, 11576), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((11711, 11752), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (11717, 11752), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((12036, 12085), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (12042, 12085), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((12220, 12261), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (12226, 12261), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((12533, 12582), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (12539, 12582), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((12710, 12741), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (12716, 12741), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((12765, 12806), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (12771, 12806), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((13093, 13142), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (13099, 13142), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((13277, 13318), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (13283, 13318), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((13600, 13649), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (13606, 13649), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((13784, 13825), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (13790, 13825), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((14110, 14159), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (14116, 14159), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((14294, 14335), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (14300, 14335), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((14628, 14677), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (14634, 14677), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((14812, 14853), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (14818, 14853), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((15154, 15203), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (15160, 15203), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((15338, 15379), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (15344, 15379), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((15786, 15835), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (15792, 15835), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((15947, 15980), 'sqlalchemy.Column', 'Column', (['TIMESTAMP'], {'nullable': '(False)'}), '(TIMESTAMP, nullable=False)\n', (15953, 15980), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((16144, 16172), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (16150, 16172), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((16422, 16463), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (16428, 16463), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((16778, 16827), 'sqlalchemy.Column', 'Column', (['UUIDType'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUIDType, primary_key=True, default=uuid4)\n', (16784, 16827), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((17381, 17398), 'sqlalchemy.Column', 'Column', (['TIMESTAMP'], {}), '(TIMESTAMP)\n', (17387, 17398), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((17842, 17854), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (17848, 17854), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((17878, 17919), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (17884, 17919), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((18009, 18077), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Tasks"""'], {'schema': 'None', 'copy_from': 'Task.__table__'}), "('Tasks', schema=None, copy_from=Task.__table__)\n", (18029, 18077), False, 'from alembic import op\n'), ((18166, 18240), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Synonyms"""'], {'schema': 'None', 'copy_from': 'Synonym.__table__'}), "('Synonyms', schema=None, copy_from=Synonym.__table__)\n", (18186, 18240), False, 'from alembic import op\n'), ((18404, 18476), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Changes"""'], {'schema': 'None', 'copy_from': 'Change.__table__'}), "('Changes', schema=None, copy_from=Change.__table__)\n", (18424, 18476), False, 'from alembic import op\n'), ((18636, 18702), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Logs"""'], {'schema': 'None', 'copy_from': 'Log.__table__'}), "('Logs', schema=None, copy_from=Log.__table__)\n", (18656, 18702), False, 'from alembic import op\n'), ((18791, 18880), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""PlatformTypes"""'], {'schema': 'None', 'copy_from': 'PlatformType.__table__'}), "('PlatformTypes', schema=None, copy_from=PlatformType.\n __table__)\n", (18811, 18880), False, 'from alembic import op\n'), ((18986, 19074), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Nationalities"""'], {'schema': 'None', 'copy_from': 'Nationality.__table__'}), "('Nationalities', schema=None, copy_from=Nationality.\n __table__)\n", (19006, 19074), False, 'from alembic import op\n'), ((19180, 19269), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""GeometryTypes"""'], {'schema': 'None', 'copy_from': 'GeometryType.__table__'}), "('GeometryTypes', schema=None, copy_from=GeometryType.\n __table__)\n", (19200, 19269), False, 'from alembic import op\n'), ((19375, 19470), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""GeometrySubTypes"""'], {'schema': 'None', 'copy_from': 'GeometrySubType.__table__'}), "('GeometrySubTypes', schema=None, copy_from=\n GeometrySubType.__table__)\n", (19395, 19470), False, 'from alembic import op\n'), ((19579, 19647), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Users"""'], {'schema': 'None', 'copy_from': 'User.__table__'}), "('Users', schema=None, copy_from=User.__table__)\n", (19599, 19647), False, 'from alembic import op\n'), ((19736, 19812), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""UnitTypes"""'], {'schema': 'None', 'copy_from': 'UnitType.__table__'}), "('UnitTypes', schema=None, copy_from=UnitType.__table__)\n", (19756, 19812), False, 'from alembic import op\n'), ((19905, 20006), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""ClassificationTypes"""'], {'schema': 'None', 'copy_from': 'ClassificationType.__table__'}), "('ClassificationTypes', schema=None, copy_from=\n ClassificationType.__table__)\n", (19925, 20006), False, 'from alembic import op\n'), ((20118, 20205), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""ContactTypes"""'], {'schema': 'None', 'copy_from': 'ContactType.__table__'}), "('ContactTypes', schema=None, copy_from=ContactType.\n __table__)\n", (20138, 20205), False, 'from alembic import op\n'), ((20310, 20395), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""SensorTypes"""'], {'schema': 'None', 'copy_from': 'SensorType.__table__'}), "('SensorTypes', schema=None, copy_from=SensorType.__table__\n )\n", (20330, 20395), False, 'from alembic import op\n'), ((20499, 20574), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Privacies"""'], {'schema': 'None', 'copy_from': 'Privacy.__table__'}), "('Privacies', schema=None, copy_from=Privacy.__table__)\n", (20519, 20574), False, 'from alembic import op\n'), ((20667, 20756), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""DatafileTypes"""'], {'schema': 'None', 'copy_from': 'DatafileType.__table__'}), "('DatafileTypes', schema=None, copy_from=DatafileType.\n __table__)\n", (20687, 20756), False, 'from alembic import op\n'), ((20862, 20940), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""MediaTypes"""'], {'schema': 'None', 'copy_from': 'MediaType.__table__'}), "('MediaTypes', schema=None, copy_from=MediaType.__table__)\n", (20882, 20940), False, 'from alembic import op\n'), ((21034, 21121), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""CommentTypes"""'], {'schema': 'None', 'copy_from': 'CommentType.__table__'}), "('CommentTypes', schema=None, copy_from=CommentType.\n __table__)\n", (21054, 21121), False, 'from alembic import op\n'), ((21226, 21317), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""CommodityTypes"""'], {'schema': 'None', 'copy_from': 'CommodityType.__table__'}), "('CommodityTypes', schema=None, copy_from=CommodityType\n .__table__)\n", (21246, 21317), False, 'from alembic import op\n'), ((21424, 21519), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""ConfidenceLevels"""'], {'schema': 'None', 'copy_from': 'ConfidenceLevel.__table__'}), "('ConfidenceLevels', schema=None, copy_from=\n ConfidenceLevel.__table__)\n", (21444, 21519), False, 'from alembic import op\n'), ((21628, 21702), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Comments"""'], {'schema': 'None', 'copy_from': 'Comment.__table__'}), "('Comments', schema=None, copy_from=Comment.__table__)\n", (21648, 21702), False, 'from alembic import op\n'), ((21797, 21866), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""Media"""'], {'schema': 'None', 'copy_from': 'Media.__table__'}), "('Media', schema=None, copy_from=Media.__table__)\n", (21817, 21866), False, 'from alembic import op\n'), ((5008, 5019), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (5014, 5019), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((5087, 5134), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Tasks.task_id"""'], {'onupdate': '"""cascade"""'}), "('Tasks.task_id', onupdate='cascade')\n", (5097, 5134), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((5411, 5465), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Privacies.privacy_id"""'], {'onupdate': '"""cascade"""'}), "('Privacies.privacy_id', onupdate='cascade')\n", (5421, 5465), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((5584, 5635), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_Tasks_name"""'}), '("name <> \'\'", name=\'ck_Tasks_name\')\n', (5599, 5635), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((5892, 5903), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (5898, 5903), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((5996, 6007), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (6002, 6007), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((6130, 6190), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['""""table" <> \'\'"""'], {'name': '"""ck_Synonyms_table"""'}), '(\'"table" <> \\\'\\\'\', name=\'ck_Synonyms_table\')\n', (6145, 6190), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((6204, 6264), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""synonym <> \'\'"""'], {'name': '"""ck_Synonyms_synonym"""'}), '("synonym <> \'\'", name=\'ck_Synonyms_synonym\')\n', (6219, 6264), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((6512, 6523), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (6518, 6523), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((6613, 6624), 'sqlalchemy.String', 'String', (['(500)'], {}), '(500)\n', (6619, 6624), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((6747, 6800), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""user <> \'\'"""'], {'name': '"""ck_Changes_user"""'}), '("user <> \'\'", name=\'ck_Changes_user\')\n', (6762, 6800), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((6814, 6871), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""reason <> \'\'"""'], {'name': '"""ck_Changes_reason"""'}), '("reason <> \'\'", name=\'ck_Changes_reason\')\n', (6829, 6871), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((7121, 7132), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (7127, 7132), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((7219, 7230), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (7225, 7230), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((7259, 7270), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (7265, 7270), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((7322, 7373), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Changes.change_id"""'], {'onupdate': '"""cascade"""'}), "('Changes.change_id', onupdate='cascade')\n", (7332, 7373), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((7492, 7548), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['""""table" <> \'\'"""'], {'name': '"""ck_Logs_table"""'}), '(\'"table" <> \\\'\\\'\', name=\'ck_Logs_table\')\n', (7507, 7548), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((7837, 7848), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (7843, 7848), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((8005, 8064), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_PlatformTypes_name"""'}), '("name <> \'\'", name=\'ck_PlatformTypes_name\')\n', (8020, 8064), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((8348, 8359), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (8354, 8359), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((8551, 8610), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_Nationalities_name"""'}), '("name <> \'\'", name=\'ck_Nationalities_name\')\n', (8566, 8610), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((8894, 8905), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (8900, 8905), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((9062, 9121), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_GeometryTypes_name"""'}), '("name <> \'\'", name=\'ck_GeometryTypes_name\')\n', (9077, 9121), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((9387, 9398), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (9393, 9398), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((9463, 9522), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""GeometryTypes.geo_type_id"""'], {'onupdate': '"""cascade"""'}), "('GeometryTypes.geo_type_id', onupdate='cascade')\n", (9473, 9522), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((9654, 9728), 'sqlalchemy.sql.schema.UniqueConstraint', 'UniqueConstraint', (['"""name"""', '"""parent"""'], {'name': '"""uq_GeometrySubTypes_name_parent"""'}), "('name', 'parent', name='uq_GeometrySubTypes_name_parent')\n", (9670, 9728), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((9742, 9804), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_GeometrySubTypes_name"""'}), '("name <> \'\'", name=\'ck_GeometrySubTypes_name\')\n', (9757, 9804), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((10076, 10087), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (10082, 10087), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((10244, 10295), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_Users_name"""'}), '("name <> \'\'", name=\'ck_Users_name\')\n', (10259, 10295), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((10572, 10583), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (10578, 10583), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((10740, 10795), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_UnitTypes_name"""'}), '("name <> \'\'", name=\'ck_UnitTypes_name\')\n', (10755, 10795), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((11093, 11104), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (11099, 11104), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((11261, 11326), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_ClassificationTypes_name"""'}), '("name <> \'\'", name=\'ck_ClassificationTypes_name\')\n', (11276, 11326), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((11612, 11623), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (11618, 11623), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((11780, 11838), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_ContactTypes_name"""'}), '("name <> \'\'", name=\'ck_ContactTypes_name\')\n', (11795, 11838), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((12121, 12132), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (12127, 12132), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((12289, 12346), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_SensorTypes_name"""'}), '("name <> \'\'", name=\'ck_SensorTypes_name\')\n', (12304, 12346), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((12618, 12629), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (12624, 12629), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((12834, 12889), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_Privacies_name"""'}), '("name <> \'\'", name=\'ck_Privacies_name\')\n', (12849, 12889), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((13178, 13189), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (13184, 13189), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((13346, 13405), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_DatafileTypes_name"""'}), '("name <> \'\'", name=\'ck_DatafileTypes_name\')\n', (13361, 13405), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((13685, 13696), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (13691, 13696), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((13853, 13909), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_MediaTypes_name"""'}), '("name <> \'\'", name=\'ck_MediaTypes_name\')\n', (13868, 13909), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((14195, 14206), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (14201, 14206), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((14363, 14421), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_CommentTypes_name"""'}), '("name <> \'\'", name=\'ck_CommentTypes_name\')\n', (14378, 14421), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((14713, 14724), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (14719, 14724), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((14881, 14941), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_CommodityTypes_name"""'}), '("name <> \'\'", name=\'ck_CommodityTypes_name\')\n', (14896, 14941), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((15239, 15250), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (15245, 15250), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((15407, 15469), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""name <> \'\'"""'], {'name': '"""ck_ConfidenceLevels_name"""'}), '("name <> \'\'", name=\'ck_ConfidenceLevels_name\')\n', (15422, 15469), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((15875, 15930), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Platforms.platform_id"""'], {'onupdate': '"""cascade"""'}), "('Platforms.platform_id', onupdate='cascade')\n", (15885, 15930), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((16037, 16099), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""CommentTypes.comment_type_id"""'], {'onupdate': '"""cascade"""'}), "('CommentTypes.comment_type_id', onupdate='cascade')\n", (16047, 16099), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((16223, 16278), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Datafiles.datafile_id"""'], {'onupdate': '"""cascade"""'}), "('Datafiles.datafile_id', onupdate='cascade')\n", (16233, 16278), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((16343, 16397), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Privacies.privacy_id"""'], {'onupdate': '"""cascade"""'}), "('Privacies.privacy_id', onupdate='cascade')\n", (16353, 16397), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((16491, 16551), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""content <> \'\'"""'], {'name': '"""ck_Comments_content"""'}), '("content <> \'\'", name=\'ck_Comments_content\')\n', (16506, 16551), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((16867, 16922), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Platforms.platform_id"""'], {'onupdate': '"""cascade"""'}), "('Platforms.platform_id', onupdate='cascade')\n", (16877, 16922), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((16962, 17017), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Platforms.platform_id"""'], {'onupdate': '"""cascade"""'}), "('Platforms.platform_id', onupdate='cascade')\n", (16972, 17017), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((17056, 17107), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Sensors.sensor_id"""'], {'onupdate': '"""cascade"""'}), "('Sensors.sensor_id', onupdate='cascade')\n", (17066, 17107), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((17339, 17364), 'sqlalchemy.Column', 'Column', (['"""elevation"""', 'REAL'], {}), "('elevation', REAL)\n", (17345, 17364), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((17453, 17511), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""MediaTypes.media_type_id"""'], {'onupdate': '"""cascade"""'}), "('MediaTypes.media_type_id', onupdate='cascade')\n", (17463, 17511), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((17648, 17703), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Datafiles.datafile_id"""'], {'onupdate': '"""cascade"""'}), "('Datafiles.datafile_id', onupdate='cascade')\n", (17658, 17703), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((17768, 17822), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Privacies.privacy_id"""'], {'onupdate': '"""cascade"""'}), "('Privacies.privacy_id', onupdate='cascade')\n", (17778, 17822), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((17947, 17996), 'sqlalchemy.sql.schema.CheckConstraint', 'CheckConstraint', (['"""url <> \'\'"""'], {'name': '"""ck_Media_url"""'}), '("url <> \'\'", name=\'ck_Media_url\')\n', (17962, 17996), False, 'from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint\n'), ((5297, 5308), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (5303, 5308), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((5346, 5357), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (5352, 5357), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n'), ((17203, 17288), 'geoalchemy2.Geometry', 'Geometry', ([], {'geometry_type': '"""POINT"""', 'srid': '(4326)', 'management': '(True)', 'spatial_index': '(False)'}), "(geometry_type='POINT', srid=4326, management=True, spatial_index=False\n )\n", (17211, 17288), False, 'from geoalchemy2 import Geometry\n'), ((17568, 17579), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (17574, 17579), False, 'from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text\n')]
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base Key Class
This module defines the Key class. The Key class is the base class to
represent all encryption keys. The basis for this class was copied
from Java.
"""
from castellan.common.objects import managed_object
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Key(managed_object.ManagedObject):
"""Base class to represent all keys."""
@abc.abstractproperty
def algorithm(self):
"""Returns the key's algorithm.
Returns the key's algorithm. For example, "DSA" indicates that this key
is a DSA key and "AES" indicates that this key is an AES key.
"""
pass
@abc.abstractproperty
def bit_length(self):
"""Returns the key's bit length.
Returns the key's bit length. For example, for AES symmetric keys,
this refers to the length of the key, and for RSA keys, this refers to
the length of the modulus.
"""
pass
|
[
"six.add_metaclass"
] |
[((927, 957), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (944, 957), False, 'import six\n')]
|
from utils import *
from chinese_checkers.TinyChineseCheckersGame import ChineseCheckersGame
from chinese_checkers.tensorflow.ResNet import NNetWrapper as nn
from chinese_checkers.Evaluator import Evaluator
from MCTS import MCTS
from chinese_checkers.InitializeAgent import InitializeAgent
from chinese_checkers.GreedyAgent import GreedyAgent
from chinese_checkers.TinyGUI import GUI
import numpy as np
args = dotdict({
'numMCTSSims': 2,
'cpuct': 15,
'max_steps': 600,
'load_folder_file': ('checkpoint', 41),
})
args2 = dotdict({
'numMCTSSims': 200,
'cpuct': 15,
'max_steps': 600,
'load_folder_file': ('checkpoint', 12),
})
game = ChineseCheckersGame()
gui = GUI(1)
nn1 = nn(game)
nn1.load_first_checkpoint(args.load_folder_file[0], args.load_folder_file[1])
mcts1 = MCTS(game, nn1, args)
# nn2 = nn(game)
# nn2.load_first_checkpoint(args2.load_folder_file[0], args2.load_folder_file[1])
# mcts2 = MCTS(game, nn2, args2)
actor = InitializeAgent(game)
forward = GreedyAgent(game)
evaluator = Evaluator(None, mcts1, mcts1, game, gui, True)
scores_all = np.zeros((3, 3))
steps_all = 0
wrong_win_all = 0
for _ in range(20):
scores, steps, wrong_win = evaluator.play_game(1, 1)
for p in range(3):
if scores[p] == 3:
scores_all[p,0] += 1
elif scores[p] == 1:
scores_all[p,1] += 1
else:
scores_all[p,2] += 1
steps_all += steps
wrong_win_all += wrong_win
print(scores_all)
|
[
"chinese_checkers.Evaluator.Evaluator",
"chinese_checkers.GreedyAgent.GreedyAgent",
"chinese_checkers.InitializeAgent.InitializeAgent",
"chinese_checkers.TinyChineseCheckersGame.ChineseCheckersGame",
"numpy.zeros",
"MCTS.MCTS",
"chinese_checkers.tensorflow.ResNet.NNetWrapper",
"chinese_checkers.TinyGUI.GUI"
] |
[((667, 688), 'chinese_checkers.TinyChineseCheckersGame.ChineseCheckersGame', 'ChineseCheckersGame', ([], {}), '()\n', (686, 688), False, 'from chinese_checkers.TinyChineseCheckersGame import ChineseCheckersGame\n'), ((695, 701), 'chinese_checkers.TinyGUI.GUI', 'GUI', (['(1)'], {}), '(1)\n', (698, 701), False, 'from chinese_checkers.TinyGUI import GUI\n'), ((708, 716), 'chinese_checkers.tensorflow.ResNet.NNetWrapper', 'nn', (['game'], {}), '(game)\n', (710, 716), True, 'from chinese_checkers.tensorflow.ResNet import NNetWrapper as nn\n'), ((803, 824), 'MCTS.MCTS', 'MCTS', (['game', 'nn1', 'args'], {}), '(game, nn1, args)\n', (807, 824), False, 'from MCTS import MCTS\n'), ((967, 988), 'chinese_checkers.InitializeAgent.InitializeAgent', 'InitializeAgent', (['game'], {}), '(game)\n', (982, 988), False, 'from chinese_checkers.InitializeAgent import InitializeAgent\n'), ((999, 1016), 'chinese_checkers.GreedyAgent.GreedyAgent', 'GreedyAgent', (['game'], {}), '(game)\n', (1010, 1016), False, 'from chinese_checkers.GreedyAgent import GreedyAgent\n'), ((1030, 1076), 'chinese_checkers.Evaluator.Evaluator', 'Evaluator', (['None', 'mcts1', 'mcts1', 'game', 'gui', '(True)'], {}), '(None, mcts1, mcts1, game, gui, True)\n', (1039, 1076), False, 'from chinese_checkers.Evaluator import Evaluator\n'), ((1090, 1106), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1098, 1106), True, 'import numpy as np\n')]
|
from tensorflow.keras.utils import Sequence
import os
import pandas as pd
import random
import numpy as np
class DataGenerator(Sequence):
def __init__(self,
path_args,
batch_size: int,
shuffle: bool,
mode: str):
self.x_img_path = './train/'
self.x_label_path = './label/'
self.mode = mode
# train
self.x_img = os.listdir(self.x_img_path)
self.x_label = os.listdir(self.x_label_path)
# TODO validation and test dataset
self.x_list = []
self.y_list = []
self.load_dataset()
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
def load_dataset(self):
for i, j in enumerate(self.x_img):
self.x_list.append(input_data)
self.y_list.append(result_data.astype(np.float))
def get_data_len(self):
return len(self.x_list), len(self.y_list)
def __len__(self):
return int(np.floor(len(self.x_list) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.x_list))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def get_input(self, index):
return self.x_list[index * self.batch_size:(index + 1) * self.batch_size]
def get_target(self, index):
return self.y_list[index * self.batch_size:(index + 1) * self.batch_size]
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
y_data = []
for j in range(start, stop):
data.append(self.x_list[j])
y_data.append(self.y_list[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
y_batch = [np.stack(samples, axis=0) for samples in zip(*y_data)]
# newer version of tf/keras want batch to be in tuple rather than list
return tuple(batch), tuple(y_batch)
|
[
"numpy.stack",
"os.listdir",
"numpy.random.shuffle"
] |
[((425, 452), 'os.listdir', 'os.listdir', (['self.x_img_path'], {}), '(self.x_img_path)\n', (435, 452), False, 'import os\n'), ((476, 505), 'os.listdir', 'os.listdir', (['self.x_label_path'], {}), '(self.x_label_path)\n', (486, 505), False, 'import os\n'), ((1198, 1229), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (1215, 1229), True, 'import numpy as np\n'), ((1808, 1833), 'numpy.stack', 'np.stack', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (1816, 1833), True, 'import numpy as np\n'), ((1880, 1905), 'numpy.stack', 'np.stack', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (1888, 1905), True, 'import numpy as np\n')]
|
from evennia_extensions.object_extensions.storage_wrappers import StorageWrapper
class RosterEntryWrapper(StorageWrapper):
def get_storage(self, instance):
return instance.obj.roster
def create_new_storage(self, instance):
raise AttributeError("This object does not have a RosterEntry to store that.")
class CharacterSheetWrapper(StorageWrapper):
def get_storage(self, instance):
return instance.obj.charactersheet
def create_new_storage(self, instance):
from evennia_extensions.character_extensions.models import CharacterSheet
return CharacterSheet.objects.create(objectdb=instance.obj)
class CombatSettingsWrapper(StorageWrapper):
def get_storage(self, instance):
return instance.obj.charactercombatsettings
def create_new_storage(self, instance):
from evennia_extensions.character_extensions.models import (
CharacterCombatSettings,
)
return CharacterCombatSettings.objects.create(objectdb=instance.obj)
class MessengerSettingsWrapper(StorageWrapper):
def get_storage(self, instance):
return instance.obj.charactermessengersettings
def create_new_storage(self, instance):
from evennia_extensions.character_extensions.models import (
CharacterMessengerSettings,
)
return CharacterMessengerSettings.objects.create(objectdb=instance.obj)
|
[
"evennia_extensions.character_extensions.models.CharacterMessengerSettings.objects.create",
"evennia_extensions.character_extensions.models.CharacterSheet.objects.create",
"evennia_extensions.character_extensions.models.CharacterCombatSettings.objects.create"
] |
[((599, 651), 'evennia_extensions.character_extensions.models.CharacterSheet.objects.create', 'CharacterSheet.objects.create', ([], {'objectdb': 'instance.obj'}), '(objectdb=instance.obj)\n', (628, 651), False, 'from evennia_extensions.character_extensions.models import CharacterSheet\n'), ((965, 1026), 'evennia_extensions.character_extensions.models.CharacterCombatSettings.objects.create', 'CharacterCombatSettings.objects.create', ([], {'objectdb': 'instance.obj'}), '(objectdb=instance.obj)\n', (1003, 1026), False, 'from evennia_extensions.character_extensions.models import CharacterCombatSettings\n'), ((1349, 1413), 'evennia_extensions.character_extensions.models.CharacterMessengerSettings.objects.create', 'CharacterMessengerSettings.objects.create', ([], {'objectdb': 'instance.obj'}), '(objectdb=instance.obj)\n', (1390, 1413), False, 'from evennia_extensions.character_extensions.models import CharacterMessengerSettings\n')]
|
# coding: utf-8
from flask import Flask,request,session,g,redirect,url_for,Blueprint
from flask import abort,render_template,flash
from helpers import getAvatar
import config
#from .base import BaseHandler
import base
config = config.rec()
user = Blueprint('user', __name__)
#class LoginHandler(BaseHandler):
@user.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
if base.isAdmin():
return redirect("/")
else:
return render_template("login.html",getAvatar=getAvatar)
username = request.form['username']
password = request.form['password']
if base.userAuth(username, password):
base.currentUserSet(username)
return redirect("/")
else:
return redirect("/login")
#class LogoutHandler(BaseHandler):
@user.route('/logout')
def logout():
session.pop('user',None)
return redirect('/login')
|
[
"flask.session.pop",
"flask.Blueprint",
"flask.redirect",
"base.currentUserSet",
"config.rec",
"flask.render_template",
"base.userAuth",
"base.isAdmin"
] |
[((229, 241), 'config.rec', 'config.rec', ([], {}), '()\n', (239, 241), False, 'import config\n'), ((250, 277), 'flask.Blueprint', 'Blueprint', (['"""user"""', '__name__'], {}), "('user', __name__)\n", (259, 277), False, 'from flask import Flask, request, session, g, redirect, url_for, Blueprint\n'), ((641, 674), 'base.userAuth', 'base.userAuth', (['username', 'password'], {}), '(username, password)\n', (654, 674), False, 'import base\n'), ((864, 889), 'flask.session.pop', 'session.pop', (['"""user"""', 'None'], {}), "('user', None)\n", (875, 889), False, 'from flask import Flask, request, session, g, redirect, url_for, Blueprint\n'), ((900, 918), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (908, 918), False, 'from flask import Flask, request, session, g, redirect, url_for, Blueprint\n'), ((417, 431), 'base.isAdmin', 'base.isAdmin', ([], {}), '()\n', (429, 431), False, 'import base\n'), ((684, 713), 'base.currentUserSet', 'base.currentUserSet', (['username'], {}), '(username)\n', (703, 713), False, 'import base\n'), ((729, 742), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (737, 742), False, 'from flask import Flask, request, session, g, redirect, url_for, Blueprint\n'), ((768, 786), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (776, 786), False, 'from flask import Flask, request, session, g, redirect, url_for, Blueprint\n'), ((452, 465), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (460, 465), False, 'from flask import Flask, request, session, g, redirect, url_for, Blueprint\n'), ((499, 549), 'flask.render_template', 'render_template', (['"""login.html"""'], {'getAvatar': 'getAvatar'}), "('login.html', getAvatar=getAvatar)\n", (514, 549), False, 'from flask import abort, render_template, flash\n')]
|
import os
default_cf_mapping_files = [os.path.dirname(__file__) + '/default-cloudformation-mapping.yaml']
|
[
"os.path.dirname"
] |
[((39, 64), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (54, 64), False, 'import os\n')]
|
#!/usr/bin/env python
from pwn import *
import os
# Exploiting vulnerable code narnia1.c:
#
# #include <stdio.h>
#
# int main(){
# int (*ret)();
#
# if(getenv("EGG")==NULL){
# printf("Give me something to execute at the env-variable EGG\n");
# exit(1);
# }
#
# printf("Trying to execute EGG!\n");
# ret = getenv("EGG");
# ret();
#
# return 0;
# }
# Define the context of the working machine
context(arch='amd64', os='linux')
# Compile the binary
log.info("Compiling the binary narnia1_local")
os.system('gcc narnia1.c -g -o narnia1_local -fno-stack-protector -z execstack')
# os.system('gcc narnia1.c -g -m32 -o narnia1_local -fno-stack-protector -z execstack')
# Get a simple shellcode
log.info("Putting together simple shellcode")
shellcode = asm(shellcraft.amd64.sh(), arch='amd64')
# print(shellcode)
log.info("Introduce shellcode in EGG env. variable")
os.environ["EGG"] = shellcode
log.info("Launching narnia1_local")
sh = process('narnia1_local')
sh.interactive()
|
[
"os.system"
] |
[((507, 592), 'os.system', 'os.system', (['"""gcc narnia1.c -g -o narnia1_local -fno-stack-protector -z execstack"""'], {}), "('gcc narnia1.c -g -o narnia1_local -fno-stack-protector -z execstack'\n )\n", (516, 592), False, 'import os\n')]
|
from geopy.geocoders import ArcGIS
import pyproj
class Georeferencer:
def __init__(self, crs):
self.georeferencer = ArcGIS()
self.transformer = pyproj.Transformer.from_crs("EPSG:4326", f"EPSG:{crs}")
def georeference(self, addresses):
result = {}
for address in addresses:
location = self.georeferencer.geocode(address)
x, y = self.transformer.transform(location.latitude, location.longitude)
result[address] = {"x": x, "y": y}
return result
if __name__ == "__main__":
refer = Georeferencer()
print(
refer.georeference(
[
"Špičkovina",
"Trg <NAME>",
]
)
)
|
[
"geopy.geocoders.ArcGIS",
"pyproj.Transformer.from_crs"
] |
[((130, 138), 'geopy.geocoders.ArcGIS', 'ArcGIS', ([], {}), '()\n', (136, 138), False, 'from geopy.geocoders import ArcGIS\n'), ((166, 221), 'pyproj.Transformer.from_crs', 'pyproj.Transformer.from_crs', (['"""EPSG:4326"""', 'f"""EPSG:{crs}"""'], {}), "('EPSG:4326', f'EPSG:{crs}')\n", (193, 221), False, 'import pyproj\n')]
|
"""Client is the primary interface for interacting with the Goodreads API. This integration test makes live API
calls and affirms that the correct objects are being returned. For a more comprehensive test that each of the interface
objects is created and functions properly when given the correct inputs, check the unit test suite."""
import os
import pytest
from betterreads.author import GoodreadsAuthor
from betterreads.book import GoodreadsBook
from betterreads.client import GoodreadsClient, GoodreadsClientException
from betterreads.comment import GoodreadsComment
from betterreads.event import GoodreadsEvent
from betterreads.group import GoodreadsGroup
from betterreads.review import GoodreadsReview
class TestClient:
@pytest.fixture
def test_client_fixture(self):
return GoodreadsClient(
os.environ.get("GOODREADS_KEY"), os.environ.get("GOODREADS_SECRET")
)
def test_auth_user_no_session(self, test_client_fixture):
with pytest.raises(GoodreadsClientException):
test_client_fixture.auth_user()
def test_author_by_id(self, test_client_fixture):
author = test_client_fixture.author(8566992)
assert isinstance(author, GoodreadsAuthor)
def test_author_by_name(self, test_client_fixture):
author = test_client_fixture.find_author("<NAME>")
assert isinstance(author, GoodreadsAuthor)
def test_book_by_id(self, test_client_fixture):
book = test_client_fixture.book(123455)
assert isinstance(book, GoodreadsBook)
def test_search_books(self, test_client_fixture):
books = test_client_fixture.search_books(
q="<NAME>", search_field="author"
)
assert len(books) > 1
assert all(isinstance(book, GoodreadsBook) for book in books)
def test_book_no_options_given(self, test_client_fixture):
with pytest.raises(GoodreadsClientException):
test_client_fixture.book(None, None)
def test_search_books_with_one_book(self, test_client_fixture):
books = test_client_fixture.search_books(
"Childhood, Boyhood, Truth: From an African Youth to the Selfish Gene"
)
assert len(books) == 1
assert all(isinstance(book, GoodreadsBook) for book in books)
def test_group_by_id(self, test_client_fixture):
group = test_client_fixture.group(8095)
assert isinstance(group, GoodreadsGroup)
def test_find_groups(self, test_client_fixture):
groups = test_client_fixture.find_groups("Goodreads Developers")
assert len(groups) > 1
assert all(isinstance(group, GoodreadsGroup) for group in groups)
def test_list_events(self, test_client_fixture):
events = test_client_fixture.list_events(80126)
assert len(events) > 0
assert all(isinstance(event, GoodreadsEvent) for event in events)
def test_search_books_total_pages(self, test_client_fixture):
num_pages = test_client_fixture.search_books_total_pages(
q="<NAME>", search_field="author"
)
assert isinstance(num_pages, int)
def test_search_books_all_pages(self, test_client_fixture):
books = test_client_fixture.search_books_all_pages(
q="<NAME>", search_field="author"
)
assert len(books) > 10
assert all(isinstance(book, GoodreadsBook) for book in books)
def test_get_review(self, test_client_fixture):
review = test_client_fixture.review(12345)
assert isinstance(review, GoodreadsReview)
def test_list_comments_review(self, test_client_fixture):
comments = test_client_fixture.list_comments("review", 1618778364)
assert all(isinstance(comment, GoodreadsComment) for comment in comments)
def test_get_recent_reviews(self, test_client_fixture):
reviews = test_client_fixture.recent_reviews()
assert all(isinstance(review, GoodreadsReview) for review in reviews)
|
[
"os.environ.get",
"pytest.raises"
] |
[((829, 860), 'os.environ.get', 'os.environ.get', (['"""GOODREADS_KEY"""'], {}), "('GOODREADS_KEY')\n", (843, 860), False, 'import os\n'), ((862, 896), 'os.environ.get', 'os.environ.get', (['"""GOODREADS_SECRET"""'], {}), "('GOODREADS_SECRET')\n", (876, 896), False, 'import os\n'), ((983, 1022), 'pytest.raises', 'pytest.raises', (['GoodreadsClientException'], {}), '(GoodreadsClientException)\n', (996, 1022), False, 'import pytest\n'), ((1880, 1919), 'pytest.raises', 'pytest.raises', (['GoodreadsClientException'], {}), '(GoodreadsClientException)\n', (1893, 1919), False, 'import pytest\n')]
|
# -*- coding: utf-8 -*-
# Time : 2021/12/22 16:15
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
import ast
from datetime import timedelta, datetime
from typing import List, Optional, Union
from redis.exceptions import ConnectionError, ResponseError
from services.middleware.stream_io import RedisClient
from services.settings import TIME_ZONE_CN, POOL_CAP
class EntropyHeap(RedisClient):
def __init__(self):
super().__init__()
def update(self, local_entropy: List[dict]):
self.db.lpush(self.PREFIX_ENTROPY, str(local_entropy))
def sync(self) -> List[dict]:
try:
response = self.db.lrange(self.PREFIX_ENTROPY, 0, 1)
if response:
remote_entropy = ast.literal_eval(
self.db.lrange(self.PREFIX_ENTROPY, 0, 1)[0]
)
return remote_entropy
return []
except ConnectionError:
return []
def set_new_cap(self, new_cap: int):
"""
设置新的统一队列容量
:param new_cap:
:return:
"""
self.db.set(name=self.PREFIX_CAPACITY, value=new_cap)
def get_unified_cap(self) -> int:
"""
返回统一队列容量,若没有设置,则返回配置文件的设定
:return:
"""
_unified_cap = self.db.get(self.PREFIX_CAPACITY)
return int(_unified_cap) if _unified_cap else POOL_CAP
def is_empty(self) -> bool:
return not bool(self.db.llen(self.PREFIX_ENTROPY))
class MessageQueue(RedisClient):
def __init__(self):
super().__init__()
self.group_name = "tasks_group"
self.consumer_name = "hexo"
self.max_queue_size = 5600
self.SYNERGY_PROTOCOL = "SYNERGY"
self.automated()
def is_exists_group(self, group_name: str) -> bool:
try:
groups = self.db.xinfo_groups(self.PREFIX_STREAM)
for group in groups:
if group.get("name", "") == group_name:
return True
return False
except ResponseError:
return False
def automated(self) -> None:
if not self.is_exists_group(self.group_name):
self.db.xgroup_create(
self.PREFIX_STREAM, self.group_name, id="0", mkstream=True
)
def ack(self, message_id: str) -> None:
self.db.xack(self.PREFIX_STREAM, self.group_name, message_id)
def broadcast_synergy_context(self, context: Union[dict, str]) -> None:
context = str(context) if isinstance(context, dict) else context
synergy_context = {self.SYNERGY_PROTOCOL: context}
self.db.xadd(
name=self.PREFIX_STREAM,
fields=synergy_context,
maxlen=self.max_queue_size,
approximate=True,
)
def listen(self, count: Optional[int] = None, block: Optional[int] = None):
while True:
try:
task_queue = self.db.xreadgroup(
groupname=self.group_name,
consumername=self.consumer_name,
streams={self.PREFIX_STREAM: ">"},
count=count,
block=block,
)
except ConnectionError:
yield None
else:
if task_queue:
_, message = task_queue[0]
yield message
class AccessControl(RedisClient):
def __init__(self, token: Optional[str] = None):
super().__init__()
self.PREFIX_ACCESS_USER = "v2rss:access:user"
self.PREFIX_ACCESS_LIMIT = "v2rss:access:limit"
if token:
self.init_tracer(token)
def init_tracer(self, token: str) -> None:
self.PREFIX_ACCESS_USER += f":{token}"
self.PREFIX_ACCESS_LIMIT += f":{token}"
# 自动注册
self._register()
def _register(self) -> None:
self.db.setnx(self.PREFIX_ACCESS_USER, 0)
def update(self) -> None:
self.db.setnx(self.PREFIX_ACCESS_LIMIT, 0)
self.db.incr(self.PREFIX_ACCESS_LIMIT)
self.db.incr(self.PREFIX_ACCESS_USER)
def _capture_access_trace(self):
_lifecycle = 10
self.db.setex(
name=self.PREFIX_ACCESS_LIMIT,
time=timedelta(seconds=_lifecycle),
value=str(datetime.now(TIME_ZONE_CN) + timedelta(seconds=_lifecycle)),
)
def is_user(self) -> bool:
return bool(self.db.exists(self.PREFIX_ACCESS_USER))
def is_repeat(self) -> bool:
return bool(self.db.exists(self.PREFIX_ACCESS_LIMIT))
|
[
"datetime.timedelta",
"datetime.datetime.now"
] |
[((4276, 4305), 'datetime.timedelta', 'timedelta', ([], {'seconds': '_lifecycle'}), '(seconds=_lifecycle)\n', (4285, 4305), False, 'from datetime import timedelta, datetime\n'), ((4329, 4355), 'datetime.datetime.now', 'datetime.now', (['TIME_ZONE_CN'], {}), '(TIME_ZONE_CN)\n', (4341, 4355), False, 'from datetime import timedelta, datetime\n'), ((4358, 4387), 'datetime.timedelta', 'timedelta', ([], {'seconds': '_lifecycle'}), '(seconds=_lifecycle)\n', (4367, 4387), False, 'from datetime import timedelta, datetime\n')]
|
from Experiments import test_knapsack_SPO_unit, test_knapsack_SPO
"""
Example SPO-Relax experiments for knapsack benchmarks.
Dependencies:
gcc/8.3.0
openmpi/3.1.4
python/3.7.4
scikit-learn/0.23.1-python-3.7.4
gurobi/9.0.0
numpy/1.17.3-python-3.7.4
matplotlib/3.2.1-python-3.7.4
"""
capacities = [12,24,48,72,96,120,144,172,196,220]
kfolds = [0,1,2,3,4]
dest_folder = 'Tests/Knapsack/weighted/spo'
test_knapsack_SPO(capacities=capacities, is_shuffle=False, NUMBER_OF_RANDOM_TESTS=1, kfolds=kfolds, n_iter=5,
dest_folder=dest_folder, noise_level=0)
dest_folder = 'Tests/Knapsack/unit/spo'
capacities = [5,10,15,20,25,30,35,40]
test_knapsack_SPO_unit(capacities=capacities, is_shuffle=False, NUMBER_OF_RANDOM_TESTS=1, kfolds=kfolds, n_iter=5,
dest_folder=dest_folder, noise_level=0)
|
[
"Experiments.test_knapsack_SPO_unit",
"Experiments.test_knapsack_SPO"
] |
[((400, 558), 'Experiments.test_knapsack_SPO', 'test_knapsack_SPO', ([], {'capacities': 'capacities', 'is_shuffle': '(False)', 'NUMBER_OF_RANDOM_TESTS': '(1)', 'kfolds': 'kfolds', 'n_iter': '(5)', 'dest_folder': 'dest_folder', 'noise_level': '(0)'}), '(capacities=capacities, is_shuffle=False,\n NUMBER_OF_RANDOM_TESTS=1, kfolds=kfolds, n_iter=5, dest_folder=\n dest_folder, noise_level=0)\n', (417, 558), False, 'from Experiments import test_knapsack_SPO_unit, test_knapsack_SPO\n'), ((646, 809), 'Experiments.test_knapsack_SPO_unit', 'test_knapsack_SPO_unit', ([], {'capacities': 'capacities', 'is_shuffle': '(False)', 'NUMBER_OF_RANDOM_TESTS': '(1)', 'kfolds': 'kfolds', 'n_iter': '(5)', 'dest_folder': 'dest_folder', 'noise_level': '(0)'}), '(capacities=capacities, is_shuffle=False,\n NUMBER_OF_RANDOM_TESTS=1, kfolds=kfolds, n_iter=5, dest_folder=\n dest_folder, noise_level=0)\n', (668, 809), False, 'from Experiments import test_knapsack_SPO_unit, test_knapsack_SPO\n')]
|
root = 'data/'
import numpy as np
from ffjord.datasets.power import POWER
from ffjord.datasets.gas import GAS
from ffjord.datasets.hepmass import HEPMASS
from ffjord.datasets.miniboone import MINIBOONE
from ffjord.datasets.bsds300 import BSDS300
from .synthetic import EightGaussians
from .synthetic import Checkerboard
from .synthetic import TwoSpirals
from .mnist import MNIST_4x4, MNIST_7x7, MNIST_8x8, MNIST_16x16, MNIST_28x28
from utils import order_variables_partial_correlation
all_datasets = [
'power', 'gas', 'hepmass', 'miniboone', 'bsds300', '8gaussians',
'checkerboard', '2spirals', 'mnist_4x4', 'mnist_7x7', 'mnist_8x8',
'mnist_16x16', 'mnist_28x28']
def subsample_train_data(data, subsample_size):
rng = np.random.RandomState(seed=42)
rng.shuffle(data.trn.x)
data.trn.x = data.trn.x[:subsample_size]
def do_optimal_ordering(data, tr=False):
ordering = order_variables_partial_correlation(data.trn.x, tr=tr)
data.trn.x = data.trn.x[:, ordering]
data.val.x = data.val.x[:, ordering]
data.tst.x = data.tst.x[:, ordering]
def load_data(name, optimal_order=False, subsample_size=None, tr=False):
if name == 'power':
data = POWER()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == 'gas':
data = GAS()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == 'hepmass':
data = HEPMASS()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == 'miniboone':
data = MINIBOONE()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == 'bsds300':
data = BSDS300()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == '8gaussians':
return EightGaussians()
elif name == 'checkerboard':
return Checkerboard()
elif name == '2spirals':
return TwoSpirals()
elif name == 'mnist_4x4':
return MNIST_4x4(optimal_order)
elif name == 'mnist_7x7':
return MNIST_7x7(optimal_order)
elif name == 'mnist_8x8':
return MNIST_8x8(optimal_order)
elif name == 'mnist_16x16':
return MNIST_16x16(optimal_order)
elif name == 'mnist_28x28':
return MNIST_28x28(optimal_order)
|
[
"ffjord.datasets.gas.GAS",
"ffjord.datasets.miniboone.MINIBOONE",
"numpy.random.RandomState",
"ffjord.datasets.hepmass.HEPMASS",
"ffjord.datasets.power.POWER",
"ffjord.datasets.bsds300.BSDS300",
"utils.order_variables_partial_correlation"
] |
[((738, 768), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(42)'}), '(seed=42)\n', (759, 768), True, 'import numpy as np\n'), ((899, 953), 'utils.order_variables_partial_correlation', 'order_variables_partial_correlation', (['data.trn.x'], {'tr': 'tr'}), '(data.trn.x, tr=tr)\n', (934, 953), False, 'from utils import order_variables_partial_correlation\n'), ((1191, 1198), 'ffjord.datasets.power.POWER', 'POWER', ([], {}), '()\n', (1196, 1198), False, 'from ffjord.datasets.power import POWER\n'), ((1443, 1448), 'ffjord.datasets.gas.GAS', 'GAS', ([], {}), '()\n', (1446, 1448), False, 'from ffjord.datasets.gas import GAS\n'), ((1697, 1706), 'ffjord.datasets.hepmass.HEPMASS', 'HEPMASS', ([], {}), '()\n', (1704, 1706), False, 'from ffjord.datasets.hepmass import HEPMASS\n'), ((1957, 1968), 'ffjord.datasets.miniboone.MINIBOONE', 'MINIBOONE', ([], {}), '()\n', (1966, 1968), False, 'from ffjord.datasets.miniboone import MINIBOONE\n'), ((2217, 2226), 'ffjord.datasets.bsds300.BSDS300', 'BSDS300', ([], {}), '()\n', (2224, 2226), False, 'from ffjord.datasets.bsds300 import BSDS300\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.timezone import utc
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comentario',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('comentario', models.TextField()),
('fecha', models.DateTimeField(default=datetime.datetime(2018, 4, 30, 15, 35, 58, 398297, tzinfo=utc))),
],
),
migrations.CreateModel(
name='Museo',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('nombre', models.CharField(max_length=32)),
('direccion', models.CharField(max_length=64)),
('descripcion', models.TextField()),
('barrio', models.CharField(max_length=32)),
('distrito', models.CharField(max_length=32)),
('accesibilidad', models.IntegerField()),
('telefono', models.BigIntegerField()),
('fax', models.BigIntegerField()),
('email', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Seleccion',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('museo', models.ForeignKey(to='webapp.Museo')),
],
),
migrations.CreateModel(
name='Usuario',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('contraseña', models.CharField(max_length=16)),
('color', models.CharField(max_length=16)),
('tamaño', models.IntegerField()),
('fondo', models.CharField(max_length=16)),
('titulo', models.CharField(max_length=16)),
('nombre', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='seleccion',
name='usuario',
field=models.ForeignKey(to='webapp.Usuario'),
),
migrations.AddField(
model_name='comentario',
name='museo',
field=models.ForeignKey(to='webapp.Museo'),
),
migrations.AddField(
model_name='comentario',
name='usuario',
field=models.ForeignKey(to='webapp.Usuario'),
),
]
|
[
"django.db.models.TextField",
"django.db.models.OneToOneField",
"django.db.migrations.swappable_dependency",
"django.db.models.BigIntegerField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.AutoField",
"datetime.datetime",
"django.db.models.IntegerField"
] |
[((264, 321), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (295, 321), False, 'from django.db import migrations, models\n'), ((2492, 2530), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""webapp.Usuario"""'}), "(to='webapp.Usuario')\n", (2509, 2530), False, 'from django.db import migrations, models\n'), ((2653, 2689), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""webapp.Museo"""'}), "(to='webapp.Museo')\n", (2670, 2689), False, 'from django.db import migrations, models\n'), ((2814, 2852), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""webapp.Usuario"""'}), "(to='webapp.Usuario')\n", (2831, 2852), False, 'from django.db import migrations, models\n'), ((456, 549), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), "(verbose_name='ID', auto_created=True, primary_key=True,\n serialize=False)\n", (472, 549), False, 'from django.db import migrations, models\n'), ((579, 597), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (595, 597), False, 'from django.db import migrations, models\n'), ((849, 942), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), "(verbose_name='ID', auto_created=True, primary_key=True,\n serialize=False)\n", (865, 942), False, 'from django.db import migrations, models\n'), ((968, 999), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (984, 999), False, 'from django.db import migrations, models\n'), ((1032, 1063), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (1048, 1063), False, 'from django.db import migrations, models\n'), ((1098, 1116), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1114, 1116), False, 'from django.db import migrations, models\n'), ((1146, 1177), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (1162, 1177), False, 'from django.db import migrations, models\n'), ((1209, 1240), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (1225, 1240), False, 'from django.db import migrations, models\n'), ((1277, 1298), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1296, 1298), False, 'from django.db import migrations, models\n'), ((1330, 1354), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (1352, 1354), False, 'from django.db import migrations, models\n'), ((1381, 1405), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (1403, 1405), False, 'from django.db import migrations, models\n'), ((1434, 1465), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (1450, 1465), False, 'from django.db import migrations, models\n'), ((1600, 1693), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), "(verbose_name='ID', auto_created=True, primary_key=True,\n serialize=False)\n", (1616, 1693), False, 'from django.db import migrations, models\n'), ((1718, 1754), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""webapp.Museo"""'}), "(to='webapp.Museo')\n", (1735, 1754), False, 'from django.db import migrations, models\n'), ((1887, 1980), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), "(verbose_name='ID', auto_created=True, primary_key=True,\n serialize=False)\n", (1903, 1980), False, 'from django.db import migrations, models\n'), ((2011, 2042), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (2027, 2042), False, 'from django.db import migrations, models\n'), ((2070, 2101), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (2086, 2101), False, 'from django.db import migrations, models\n'), ((2132, 2153), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2151, 2153), False, 'from django.db import migrations, models\n'), ((2181, 2212), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (2197, 2212), False, 'from django.db import migrations, models\n'), ((2242, 2273), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (2258, 2273), False, 'from django.db import migrations, models\n'), ((2303, 2352), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'to': 'settings.AUTH_USER_MODEL'}), '(to=settings.AUTH_USER_MODEL)\n', (2323, 2352), False, 'from django.db import migrations, models\n'), ((655, 717), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(4)', '(30)', '(15)', '(35)', '(58)', '(398297)'], {'tzinfo': 'utc'}), '(2018, 4, 30, 15, 35, 58, 398297, tzinfo=utc)\n', (672, 717), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-08-04 19:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0023_auto_20170801_1925'),
]
operations = [
migrations.AddField(
model_name='job',
name='vm_volume_name',
field=models.CharField(blank=True, help_text='Name of the volume attached to store data for this job.', max_length=255, null=True),
),
]
|
[
"django.db.models.CharField"
] |
[((401, 535), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Name of the volume attached to store data for this job."""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Name of the volume attached to store data for this job.', max_length=\n 255, null=True)\n", (417, 535), False, 'from django.db import migrations, models\n')]
|
from dataclasses import dataclass, field
from logging import getLogger
from typing import Any, Dict, List
import hydra
from hydra.core.config_store import ConfigStore
from hydra.utils import to_absolute_path
from kedro.io import DataCatalog, MemoryDataSet
from kedro.pipeline import Pipeline, node
from kedro.runner import SequentialRunner
from omegaconf import MISSING, DictConfig, OmegaConf
from arachne.config.base import BaseConfig
from arachne.data import Model
from arachne.tools import ToolConfigFactory, ToolFactory
from arachne.utils.model_utils import (
init_from_dir,
init_from_file,
load_model_spec,
save_model,
)
logger = getLogger(__name__)
@dataclass
class PipelineConfig(BaseConfig):
"""
This is a configuration class for pipeline.
Attributes:
tools (Any): Tool specific configurations. This will be overwritten by the value of the pipeline option.
pipeline (List[str]): Specifies tools to be applied in series.
"""
tools: Any = MISSING
pipeline: List[str] = MISSING
def get_default_tool_configs(tools: List[str]) -> Dict:
all_tools = ToolFactory.list()
config = {}
for t in tools:
if t not in all_tools:
assert False, f"Not supported tool ({t}) yet"
config[t] = ToolConfigFactory.get(t)
return config
def run(input: Model, cfg: PipelineConfig) -> Model:
"""
This function constructs and applies a pipeline defined by the config object to an input model.
Args:
input (Model): An input model.
cfg (PipelineConfig): A config object.
Returns:
Model: An output model.
"""
# Preprare DataCatalog
data_catalog = DataCatalog()
data_catalog.add("root_input", MemoryDataSet(data=input))
# setup catalogs for each tool configs and outputs
for idx, tool in enumerate(cfg.pipeline):
config = "tools." + tool + "." + str(idx) + ".config"
output = "tools." + tool + "." + str(idx) + ".output"
data_catalog.add(config, MemoryDataSet(data=cfg.tools[tool]))
data_catalog.add(output, MemoryDataSet())
# Construct pipeline
pipeline_tmp = []
prev_output = "root_input"
for idx, tool in enumerate(cfg.pipeline):
t = ToolFactory.get(tool)
config = "tools." + tool + "." + str(idx) + ".config"
output = "tools." + tool + "." + str(idx) + ".output"
tool_inputs = {"input": prev_output, "cfg": config}
task = node(t.run, inputs=tool_inputs, outputs=output)
prev_output = output
pipeline_tmp.append(task)
pipeline = Pipeline(pipeline_tmp)
# Create a runner to run the pipeline
runner = SequentialRunner()
# Run the pipeline
runner.run(pipeline, data_catalog)
return data_catalog.load(prev_output)
@hydra.main(config_path="../config", config_name="config")
def main(cfg: DictConfig) -> None:
"""
This is a main function for `arachne.driver.pipeline`.
"""
try:
assert len(list(cfg.pipeline)) > 0
except AssertionError as err:
logger.exception("You must specify one tool at least")
raise err
logger.info(OmegaConf.to_yaml(cfg))
# Setup the input DNN model
if not cfg.model_file and not cfg.model_dir:
raise RuntimeError("User must specify either model_file or model_dir.")
if cfg.model_file and cfg.model_dir:
raise RuntimeError("User must specify either model_file or model_dir.")
input_model: Model
if cfg.model_file:
input_model = init_from_file(to_absolute_path(cfg.model_file))
else:
input_model = init_from_dir(to_absolute_path(cfg.model_dir))
if cfg.model_spec_file:
# if a YAML file describing the model specification is provided, overwrite input_model.spec
input_model.spec = load_model_spec(to_absolute_path(cfg.model_spec_file))
output_model = run(input_model, cfg) # type: ignore
save_model(
model=output_model, output_path=to_absolute_path(cfg.output_path), tvm_cfg=cfg.tools.tvm
)
if __name__ == "__main__":
defaults = [{"tools": ToolFactory.list()}, {"override hydra/job_logging": "custom"}, "_self_"]
@dataclass
class PipelineCLIConfig(PipelineConfig):
defaults: List[Any] = field(default_factory=lambda: defaults)
cs = ConfigStore.instance()
cs.store(name="config", node=PipelineCLIConfig)
main()
|
[
"omegaconf.OmegaConf.to_yaml",
"kedro.io.DataCatalog",
"hydra.utils.to_absolute_path",
"arachne.tools.ToolFactory.list",
"arachne.tools.ToolFactory.get",
"kedro.runner.SequentialRunner",
"dataclasses.field",
"kedro.pipeline.node",
"hydra.core.config_store.ConfigStore.instance",
"hydra.main",
"kedro.io.MemoryDataSet",
"kedro.pipeline.Pipeline",
"arachne.tools.ToolConfigFactory.get",
"logging.getLogger"
] |
[((654, 673), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (663, 673), False, 'from logging import getLogger\n'), ((2802, 2859), 'hydra.main', 'hydra.main', ([], {'config_path': '"""../config"""', 'config_name': '"""config"""'}), "(config_path='../config', config_name='config')\n", (2812, 2859), False, 'import hydra\n'), ((1120, 1138), 'arachne.tools.ToolFactory.list', 'ToolFactory.list', ([], {}), '()\n', (1136, 1138), False, 'from arachne.tools import ToolConfigFactory, ToolFactory\n'), ((1688, 1701), 'kedro.io.DataCatalog', 'DataCatalog', ([], {}), '()\n', (1699, 1701), False, 'from kedro.io import DataCatalog, MemoryDataSet\n'), ((2595, 2617), 'kedro.pipeline.Pipeline', 'Pipeline', (['pipeline_tmp'], {}), '(pipeline_tmp)\n', (2603, 2617), False, 'from kedro.pipeline import Pipeline, node\n'), ((2674, 2692), 'kedro.runner.SequentialRunner', 'SequentialRunner', ([], {}), '()\n', (2690, 2692), False, 'from kedro.runner import SequentialRunner\n'), ((4317, 4339), 'hydra.core.config_store.ConfigStore.instance', 'ConfigStore.instance', ([], {}), '()\n', (4337, 4339), False, 'from hydra.core.config_store import ConfigStore\n'), ((1284, 1308), 'arachne.tools.ToolConfigFactory.get', 'ToolConfigFactory.get', (['t'], {}), '(t)\n', (1305, 1308), False, 'from arachne.tools import ToolConfigFactory, ToolFactory\n'), ((1737, 1762), 'kedro.io.MemoryDataSet', 'MemoryDataSet', ([], {'data': 'input'}), '(data=input)\n', (1750, 1762), False, 'from kedro.io import DataCatalog, MemoryDataSet\n'), ((2247, 2268), 'arachne.tools.ToolFactory.get', 'ToolFactory.get', (['tool'], {}), '(tool)\n', (2262, 2268), False, 'from arachne.tools import ToolConfigFactory, ToolFactory\n'), ((2468, 2515), 'kedro.pipeline.node', 'node', (['t.run'], {'inputs': 'tool_inputs', 'outputs': 'output'}), '(t.run, inputs=tool_inputs, outputs=output)\n', (2472, 2515), False, 'from kedro.pipeline import Pipeline, node\n'), ((3155, 3177), 'omegaconf.OmegaConf.to_yaml', 'OmegaConf.to_yaml', (['cfg'], {}), '(cfg)\n', (3172, 3177), False, 'from omegaconf import MISSING, DictConfig, OmegaConf\n'), ((4267, 4307), 'dataclasses.field', 'field', ([], {'default_factory': '(lambda : defaults)'}), '(default_factory=lambda : defaults)\n', (4272, 4307), False, 'from dataclasses import dataclass, field\n'), ((2023, 2058), 'kedro.io.MemoryDataSet', 'MemoryDataSet', ([], {'data': 'cfg.tools[tool]'}), '(data=cfg.tools[tool])\n', (2036, 2058), False, 'from kedro.io import DataCatalog, MemoryDataSet\n'), ((2093, 2108), 'kedro.io.MemoryDataSet', 'MemoryDataSet', ([], {}), '()\n', (2106, 2108), False, 'from kedro.io import DataCatalog, MemoryDataSet\n'), ((3547, 3579), 'hydra.utils.to_absolute_path', 'to_absolute_path', (['cfg.model_file'], {}), '(cfg.model_file)\n', (3563, 3579), False, 'from hydra.utils import to_absolute_path\n'), ((3627, 3658), 'hydra.utils.to_absolute_path', 'to_absolute_path', (['cfg.model_dir'], {}), '(cfg.model_dir)\n', (3643, 3658), False, 'from hydra.utils import to_absolute_path\n'), ((3832, 3869), 'hydra.utils.to_absolute_path', 'to_absolute_path', (['cfg.model_spec_file'], {}), '(cfg.model_spec_file)\n', (3848, 3869), False, 'from hydra.utils import to_absolute_path\n'), ((3985, 4018), 'hydra.utils.to_absolute_path', 'to_absolute_path', (['cfg.output_path'], {}), '(cfg.output_path)\n', (4001, 4018), False, 'from hydra.utils import to_absolute_path\n'), ((4103, 4121), 'arachne.tools.ToolFactory.list', 'ToolFactory.list', ([], {}), '()\n', (4119, 4121), False, 'from arachne.tools import ToolConfigFactory, ToolFactory\n')]
|
import urllib.request as req
import subprocess
s = '侵入者あり'
print(s)
# 読み上げ
subprocess.check_output('./jtalk.sh "' +s + '"', shell=True)
|
[
"subprocess.check_output"
] |
[((77, 138), 'subprocess.check_output', 'subprocess.check_output', (['(\'./jtalk.sh "\' + s + \'"\')'], {'shell': '(True)'}), '(\'./jtalk.sh "\' + s + \'"\', shell=True)\n', (100, 138), False, 'import subprocess\n')]
|
import matplotlib.pyplot as plt
from numpy import *
import os
"""
filename1 = "data_processing/" + "GeneralQuantumFisher1photons"
filename2 = "data_processing/" + "GeneralQuantumFisher2photons"
filename3 = "data_processing/" + "GeneralQuantumFisher3photons"
with open(filename1) as f:
fisher1 = f.readlines()
with open(filename2) as f:
fisher2 = f.readlines()
with open(filename3) as f:
fisher3 = f.readlines()
fisher1 = [float(i) for i in fisher1]
fisher2 = [float(i) for i in fisher2]
fisher3 = [float(i) for i in fisher3]
times = linspace(0, 2*pi, len(fisher1))
filename_pure = "data_processing/" + "FisherInfoN20Homodyne"
filename_deco01 = "data_processing/" + "FisherHomodyneN25Deco0.1"
filename_deco05 = "data_processing/" + "FisherHomodyneN25Deco0.5"
with open(filename_pure) as f:
for line in f:
fisher_pure = line.split(",")
with open(filename_deco01) as f:
for line in f:
fisher_deco01 = line.split(",")
with open(filename_deco05) as f:
for line in f:
fisher_deco05 = line.split(",")
fisher_pure = [float(i) for i in fisher_pure]
fisher_deco01 = [float(i) for i in fisher_deco01]
fisher_deco05 = [float(i) for i in fisher_deco05]
times_pure = list(load("data_processing/" + "FisherInfoN20HomodyneTimes.npy"))
times_deco01 = list(load("data_processing/" + "times2016-08-21-08.05.18.npy"))
times_deco05 = list(load("data_processing/" + "times2016-08-20-01.16.02.npy"))
for i in range(0,1):
del fisher_pure[::2]
del fisher_deco01[::2]
del fisher_deco05[::2]
del times_pure[::2]
del times_deco01[::2]
del times_deco05[::2]
filename_pure = "data_processing/" + "fisher_mirror_N30"
filename_deco = "data_processing/" + "FisherN25MirrorDeco0.1"
with open(filename_pure) as f:
for line in f:
fisher_pure = line.split(",")
with open(filename_deco) as f:
for line in f:
fisher_deco = line.split(",")
fisher_pure = [float(i) for i in fisher_pure]
fisher_deco = [float(i) for i in fisher_deco]
del fisher_pure[::2]
del fisher_pure[::2]
del fisher_pure[::2]
times_pure = linspace(0, 2*pi, len(fisher_pure))
times_deco = list(load("data_processing/" + "times2016-08-21-08.05.18.npy"))
del fisher_deco[::2]
del fisher_deco[::2]
del fisher_deco[::2]
del times_deco[::2]
del times_deco[::2]
del times_deco[::2]
"""
filename00 = "data/simulation2017-02-07-13.45.35/fock_fisher"
filename_times00 = "data/simulation2017-02-07-13.45.35/times"
filename005 = "data/simulation2017-02-08-09.36.08/fock_fisher"
filename_times005 = "data/simulation2017-02-08-09.36.08/times"
fisher00 = load(filename00 + '.npy')
times00 = load(filename_times00 + '.npy')
fisher005 = load(filename005 + '.npy')
times005 = load(filename_times005 + '.npy')
# Mask the decoherence
fisher005 = ma.masked_where(fisher005 > 3.8, fisher005)
fisher005 = ma.masked_where(fisher005 < 0.0, fisher005)
#plt.show()
def plot_fisher(times, data, chirange, filename):
"""
function: plot_fisher
- data/array: Fisher information to be plotted vs time.
Output:
- Plot/file
"""
plt.figure(figsize=(13,11))
# Use Latex
params = {'backend': 'ps',
'font.size': 12,
'axes.labelsize': 12,
# 'text.fontsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': True, #benutze latex fuer schrift encoding -> automatisch selbe schriftart wie in latex
'text.latex.unicode': True,
'font.family': 'serif',
'font.serif': 'cm',
#'figure.figsize': fig_size,
'text.latex.preamble': [r'\usepackage{physics}', r'\usepackage{amsmath}']}
plt.rcParams.update(params)
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
plt.xlabel('$t$', size = 40)
plt.ylabel('$I_F$', size = 40)
ax = plt.subplot(111)
ax.tick_params(axis='both', which='major', pad=10)
#plt.title('Fisher Information vs. time for ' + r'$\bar{g} = $' + str(self.args['gbar']) + ', $k = $' + str(self.k) + ', $N = $' + str(self.N) + ', $h = $' + str(self.h), size = 20, y=1.02)
#plt.gca().grid(True, linewidth = 2)
#plt.plot(times[0], data[0], '-o', color = 'k', label = '$1$ photon')
#plt.plot(times[1], data[1], '-o', color = 'b', label = '$4$ photons')
#plt.plot(times[2], data[2], '-o', color = 'r', label = '$9$ photons')
#plt.plot(times[0], data[0], color = 'b', label = 'Analytic')
plt.plot(times[0], data[0], '-o', color = 'k', markeredgewidth=0.0, label = '$\kappa = 0.0$')
plt.plot(times[1], data[1], '-o', color = 'b', markeredgewidth=0.0, label = '$\kappa = 0.05$')
plt.xticks([ 0, pi/2, pi, 3*pi/2, 2*pi], [r'$0$', r'$\pi/2$', r'$\pi$', r'$3\pi/2$', r'$2\pi$'], size = 40)
plt.yticks([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], [r'$0.0$', r'$0.5$', r'$1.0$', r'$1.5$', r'$2.0$', r'$2.5$', r'$3.0$', r'$3.5$'], size = 30)
#plt.yticks([0.0, 1000, 2000, 3000, 4000, 5000, 6000], [r'$0.0$', r'$1000$', r'$2000$', r'$3000$', r'$4000$', r'$5000$', r'$6000$'], size = 40)
#plt.yticks([0.0, 100, 200, 300, 400, 500, 600, 700, 800, 900], [r'$0.0$', r'$100$', r'$200$', r'$300$', r'$400$', r'$500$', r'$600$', r'$700$', r'$800$', r'$900$'], size = 40)
#plt.yticks([0.0, 200, 400, 600, 800], [r'$0.0$', r'$200$', r'$400$', r'$600$', r'$800$'], size = 30)
plt.subplots_adjust(bottom=0.15)
#plt.xlim([0, pi/2])
#plt.ylim([0,300])
plt.legend(loc = 1, fontsize = 30)
path = os.path.join(filename)
plt.savefig(path + ".pdf",transparent=True, dpi=600)
plt.show()
plot_fisher([times00, times005], [fisher00, fisher005], [0.0], "Fisher_Fock")
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"matplotlib.pyplot.savefig"
] |
[((3093, 3121), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 11)'}), '(figsize=(13, 11))\n', (3103, 3121), True, 'import matplotlib.pyplot as plt\n'), ((3752, 3779), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (3771, 3779), True, 'import matplotlib.pyplot as plt\n'), ((3854, 3880), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t$"""'], {'size': '(40)'}), "('$t$', size=40)\n", (3864, 3880), True, 'import matplotlib.pyplot as plt\n'), ((3887, 3915), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$I_F$"""'], {'size': '(40)'}), "('$I_F$', size=40)\n", (3897, 3915), True, 'import matplotlib.pyplot as plt\n'), ((3927, 3943), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3938, 3943), True, 'import matplotlib.pyplot as plt\n'), ((4530, 4625), 'matplotlib.pyplot.plot', 'plt.plot', (['times[0]', 'data[0]', '"""-o"""'], {'color': '"""k"""', 'markeredgewidth': '(0.0)', 'label': '"""$\\\\kappa = 0.0$"""'}), "(times[0], data[0], '-o', color='k', markeredgewidth=0.0, label=\n '$\\\\kappa = 0.0$')\n", (4538, 4625), True, 'import matplotlib.pyplot as plt\n'), ((4628, 4724), 'matplotlib.pyplot.plot', 'plt.plot', (['times[1]', 'data[1]', '"""-o"""'], {'color': '"""b"""', 'markeredgewidth': '(0.0)', 'label': '"""$\\\\kappa = 0.05$"""'}), "(times[1], data[1], '-o', color='b', markeredgewidth=0.0, label=\n '$\\\\kappa = 0.05$')\n", (4636, 4724), True, 'import matplotlib.pyplot as plt\n'), ((4727, 4842), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, pi / 2, pi, 3 * pi / 2, 2 * pi]', "['$0$', '$\\\\pi/2$', '$\\\\pi$', '$3\\\\pi/2$', '$2\\\\pi$']"], {'size': '(40)'}), "([0, pi / 2, pi, 3 * pi / 2, 2 * pi], ['$0$', '$\\\\pi/2$',\n '$\\\\pi$', '$3\\\\pi/2$', '$2\\\\pi$'], size=40)\n", (4737, 4842), True, 'import matplotlib.pyplot as plt\n'), ((4841, 4980), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5]', "['$0.0$', '$0.5$', '$1.0$', '$1.5$', '$2.0$', '$2.5$', '$3.0$', '$3.5$']"], {'size': '(30)'}), "([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], ['$0.0$', '$0.5$',\n '$1.0$', '$1.5$', '$2.0$', '$2.5$', '$3.0$', '$3.5$'], size=30)\n", (4851, 4980), True, 'import matplotlib.pyplot as plt\n'), ((5431, 5463), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.15)'}), '(bottom=0.15)\n', (5450, 5463), True, 'import matplotlib.pyplot as plt\n'), ((5517, 5547), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)', 'fontsize': '(30)'}), '(loc=1, fontsize=30)\n', (5527, 5547), True, 'import matplotlib.pyplot as plt\n'), ((5563, 5585), 'os.path.join', 'os.path.join', (['filename'], {}), '(filename)\n', (5575, 5585), False, 'import os\n'), ((5590, 5643), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + '.pdf')"], {'transparent': '(True)', 'dpi': '(600)'}), "(path + '.pdf', transparent=True, dpi=600)\n", (5601, 5643), True, 'import matplotlib.pyplot as plt\n'), ((5648, 5658), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5656, 5658), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: UTF-8 -*-
from flask import g
import sqlite3
import math
database = "project/appart.db"
categ_coef = 17960
coef = 360/6378137
def connect_db():
return sqlite3.connect(database)
def insert_db(street,lat,lon,year,room,all_area,all_floors):
# validating wether coordinates in Kyiv
#calculating distances
if lat < 50.702443 and lat > 50.144912:
if lon > 30.225671 and lon < 30.935312:
len_to_center_in_coord = ((lat-50.450198)**2+(lon-30.523986)**2)**(1/2)
len_to_center = len_to_center_in_coord/coef
len_to_metro = def_nearest_subway(lat,lon)
else:
len_to_center = 0
len_to_metro = 0
# calculating data for building of price estimation
client_price = int(math.exp(7.483156+ 0.001652*float(year)
+ 0.122520*float(room)
+ 0.008478*float(all_area)
+ 0.007029*float(all_floors)
- 0.000286*float(len_to_center)
- 0.000407*float(len_to_metro)))
category = int((client_price)/categ_coef)
# inserting data to DB
data_list = [street,lat,lon,year,room,all_area,all_floors,client_price,category,len_to_center*6.283279]
g.db = connect_db()
cur = g.db.cursor()
cur.execute('INSERT INTO client_data VALUES (?,?,?,?,?,?,?,?,?,?)',data_list)
g.db.commit()
return g.db.close()
# taking from database info
def search_db():
g.db = connect_db()
cur = g.db.cursor()
# taking client data
client_row = cur.execute('SELECT * FROM client_data WHERE ROWID=(SELECT MAX(ROWID) FROM client_data)')
posts_cl = []
posts_cl = [dict(year = row[3],room=row[4],all_area=row[5],all_floors=row[6],cl_price=row[7],category=row[8]) for row in client_row.fetchall()]
# taking data from calculated category based on price estimation
category = posts_cl[0]['category']
if category == 0:
category = 1
my_data_row = cur.execute('SELECT * FROM mytable_na WHERE price_category = (?)',[category])
posts_my_data=[]
posts_my_data = [dict(price=row[3],street=row[0],room=row[5],all_area=row[6],all_floors=row[9],distr=row[12]) for row in my_data_row.fetchall()]
g.db.close()
return [posts_cl,posts_my_data]
def taking_data_for_plot(rand_district):
g.db = connect_db()
cur = g.db.cursor()
# taking all data with district which was choosen in my data
#7 differnt metrics of districts
distr_data = cur.execute('SELECT * FROM mytable_na WHERE distr = (?)',[rand_district])
post_plot_distr = [dict(price = row[3],room = row[5],all_area=row[6],livin_area=row[7],kitch_area=row[8],all_floors=row[9],year=row[11],distr=str(row[13])) for row in distr_data.fetchall()]
g.db.close()
return post_plot_distr
def def_nearest_subway(lat,lon):
g.db = connect_db()
cur = g.db.cursor()
# taking metro data
client_row = cur.execute('SELECT * FROM metro_coords ')
metro_coords = []
for row in client_row.fetchall():
metro_coords.append([row[0],row[1],row[2]])
g.db.close()
min_list = []
for i in range(0,len(metro_coords)):
min_list.append((((lat - metro_coords[i][1])**2+(lon - metro_coords[i][2]))**2)**(1/2))
min_val = None
for i in range(0,len(min_list)) :
if min_val is None or min_list[i] < min_val :
min_val = min_list[i]
return min_val/coef
|
[
"flask.g.db.cursor",
"flask.g.db.close",
"sqlite3.connect",
"flask.g.db.commit"
] |
[((171, 196), 'sqlite3.connect', 'sqlite3.connect', (['database'], {}), '(database)\n', (186, 196), False, 'import sqlite3\n'), ((1352, 1365), 'flask.g.db.cursor', 'g.db.cursor', ([], {}), '()\n', (1363, 1365), False, 'from flask import g\n'), ((1452, 1465), 'flask.g.db.commit', 'g.db.commit', ([], {}), '()\n', (1463, 1465), False, 'from flask import g\n'), ((1478, 1490), 'flask.g.db.close', 'g.db.close', ([], {}), '()\n', (1488, 1490), False, 'from flask import g\n'), ((1574, 1587), 'flask.g.db.cursor', 'g.db.cursor', ([], {}), '()\n', (1585, 1587), False, 'from flask import g\n'), ((2311, 2323), 'flask.g.db.close', 'g.db.close', ([], {}), '()\n', (2321, 2323), False, 'from flask import g\n'), ((2438, 2451), 'flask.g.db.cursor', 'g.db.cursor', ([], {}), '()\n', (2449, 2451), False, 'from flask import g\n'), ((2841, 2853), 'flask.g.db.close', 'g.db.close', ([], {}), '()\n', (2851, 2853), False, 'from flask import g\n'), ((2951, 2964), 'flask.g.db.cursor', 'g.db.cursor', ([], {}), '()\n', (2962, 2964), False, 'from flask import g\n'), ((3165, 3177), 'flask.g.db.close', 'g.db.close', ([], {}), '()\n', (3175, 3177), False, 'from flask import g\n')]
|
from django import forms
from django.forms import ModelForm
from .models import Customer, Order
from myapp.models import Name
class NameForm(forms.ModelForm):
name_value = forms.CharField(max_length=100, help_text = "Enter a name")
class Meta:
model = Name
fields = ('name_value',)
class OrderForm(ModelForm):
class Meta:
model = Order
fields = '__all__'
|
[
"django.forms.CharField"
] |
[((177, 234), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(100)', 'help_text': '"""Enter a name"""'}), "(max_length=100, help_text='Enter a name')\n", (192, 234), False, 'from django import forms\n')]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Freenove\Desktop\树莓派六足机器人\界面UI\face.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Face(object):
def setupUi(self, Face):
Face.setObjectName("Face")
Face.resize(650, 320)
Face.setStyleSheet("QWidget{\n"
"background:#484848;\n"
"}\n"
"QAbstractButton{\n"
"border-style:none;\n"
"border-radius:0px;\n"
"padding:5px;\n"
"color:#DCDCDC;\n"
"background:qlineargradient(spread:pad,x1:0,y1:0,x2:0,y2:1,stop:0 #858585,stop:1 #383838);\n"
"}\n"
"QAbstractButton:hover{\n"
"color:#000000;\n"
"background-color:#008aff;\n"
"}\n"
"QAbstractButton:pressed{\n"
"color:#DCDCDC;\n"
"border-style:solid;\n"
"border-width:0px 0px 0px 4px;\n"
"padding:4px 4px 4px 2px;\n"
"border-color:#008aff;\n"
"background-color:#444444;\n"
"}\n"
"\n"
"QLabel{\n"
"color:#DCDCDC;\n"
"\n"
"\n"
"}\n"
"QLabel:focus{\n"
"border:1px solid #00BB9E;\n"
"\n"
"}\n"
"\n"
"QLineEdit{\n"
"border:1px solid #242424;\n"
"border-radius:3px;\n"
"padding:2px;\n"
"background:none;\n"
"selection-background-color:#484848;\n"
"selection-color:#DCDCDC;\n"
"}\n"
"QLineEdit:focus,QLineEdit:hover{\n"
"border:1px solid #242424;\n"
"}\n"
"QLineEdit{\n"
"border:1px solid #242424;\n"
"border-radius:3px;\n"
"padding:2px;\n"
"background:none;\n"
"selection-background-color:#484848;\n"
"selection-color:#DCDCDC;\n"
"}\n"
"\n"
"QLineEdit:focus,QLineEdit:hover{\n"
"border:1px solid #242424;\n"
"}\n"
"QLineEdit{\n"
"lineedit-password-character:9<PASSWORD>;\n"
"}")
self.label_video = QtWidgets.QLabel(Face)
self.label_video.setGeometry(QtCore.QRect(20, 10, 400, 300))
font = QtGui.QFont()
font.setFamily("Arial")
self.label_video.setFont(font)
self.label_video.setAlignment(QtCore.Qt.AlignCenter)
self.label_video.setObjectName("label_video")
self.label_photo = QtWidgets.QLabel(Face)
self.label_photo.setGeometry(QtCore.QRect(440, 15, 200, 200))
font = QtGui.QFont()
font.setFamily("Arial")
self.label_photo.setFont(font)
self.label_photo.setAlignment(QtCore.Qt.AlignCenter)
self.label_photo.setObjectName("label_photo")
self.lineEdit = QtWidgets.QLineEdit(Face)
self.lineEdit.setGeometry(QtCore.QRect(490, 235, 140, 25))
font = QtGui.QFont()
font.setFamily("Arial")
self.lineEdit.setFont(font)
self.lineEdit.setObjectName("lineEdit")
self.label = QtWidgets.QLabel(Face)
self.label.setGeometry(QtCore.QRect(440, 240, 45, 15))
font = QtGui.QFont()
font.setFamily("Arial")
self.label.setFont(font)
self.label.setObjectName("label")
self.Button_Read_Face = QtWidgets.QPushButton(Face)
self.Button_Read_Face.setGeometry(QtCore.QRect(460, 275, 150, 25))
font = QtGui.QFont()
font.setFamily("Arial")
self.Button_Read_Face.setFont(font)
self.Button_Read_Face.setObjectName("Button_Read_Face")
self.retranslateUi(Face)
QtCore.QMetaObject.connectSlotsByName(Face)
def retranslateUi(self, Face):
_translate = QtCore.QCoreApplication.translate
Face.setWindowTitle(_translate("Face", "Face"))
self.label_video.setText(_translate("Face", "Video"))
self.label_photo.setText(_translate("Face", "Photo"))
self.label.setText(_translate("Face", "Name:"))
self.Button_Read_Face.setText(_translate("Face", "Read Face"))
|
[
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtGui.QFont",
"PyQt5.QtCore.QMetaObject.connectSlotsByName"
] |
[((1762, 1784), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['Face'], {}), '(Face)\n', (1778, 1784), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1869, 1882), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1880, 1882), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2096, 2118), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['Face'], {}), '(Face)\n', (2112, 2118), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2204, 2217), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2215, 2217), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2428, 2453), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['Face'], {}), '(Face)\n', (2447, 2453), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2536, 2549), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2547, 2549), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2687, 2709), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['Face'], {}), '(Face)\n', (2703, 2709), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2788, 2801), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2799, 2801), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2941, 2968), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['Face'], {}), '(Face)\n', (2962, 2968), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3059, 3072), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3070, 3072), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3255, 3298), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Face'], {}), '(Face)\n', (3292, 3298), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1822, 1852), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(10)', '(400)', '(300)'], {}), '(20, 10, 400, 300)\n', (1834, 1852), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2156, 2187), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(440)', '(15)', '(200)', '(200)'], {}), '(440, 15, 200, 200)\n', (2168, 2187), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2488, 2519), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(490)', '(235)', '(140)', '(25)'], {}), '(490, 235, 140, 25)\n', (2500, 2519), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2741, 2771), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(440)', '(240)', '(45)', '(15)'], {}), '(440, 240, 45, 15)\n', (2753, 2771), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3011, 3042), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(460)', '(275)', '(150)', '(25)'], {}), '(460, 275, 150, 25)\n', (3023, 3042), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
import os
import ecdsa
import binascii
import hashlib
from typing import Union
from luracoin.config import Config
def bits_to_target(bits: bytes) -> hex:
"""
The first byte is the exponent and the other three bytes are the
coefficient.
Example:
0x1d00ffff => 00000000ffff000000000000000000000000...[0x1d = 29 bytes]
"""
bits = bits.hex()
# We get the first two characters which is the first byte and convert it
# to an integer, later we substract three bytes which are the coefficient
# and after that we multiply that for two, because each byte has two chars
target_exponent_number = (int(bits[0:2], 16) - 3) * 2
target_exponent = "".join(["0" for d in range(target_exponent_number)])
# The target has to be 32 bytes, so 64 characters. We need to add 0's at
# the start of the target as padding. Also here we need to add 6 because
# we need to take in account the exponent too
padding_number = 64 - target_exponent_number - 6
padding = "".join(["0" for d in range(padding_number)])
return padding + bits[2:8] + target_exponent
def sha256d(s: Union[str, bytes]) -> str:
"""A double SHA-256 hash."""
if not isinstance(s, bytes):
s = s.encode()
return hashlib.sha256(hashlib.sha256(s).digest()).hexdigest()
def mining_reward(height) -> int:
halving = int(height / Config.HALVING_BLOCKS) + 1
return int(Config.BLOCK_REWARD / halving)
def little_endian_to_int(little_endian_hex: str) -> int:
return int.from_bytes(
binascii.unhexlify(little_endian_hex), byteorder="little"
)
def is_hex(s: str) -> bool:
try:
int(s, 16)
except ValueError:
return False
return len(s) % 2 == 0
def bytes_to_signing_key(private_key: bytes) -> ecdsa.SigningKey:
return ecdsa.SigningKey.from_string(private_key, curve=ecdsa.SECP256k1)
|
[
"hashlib.sha256",
"binascii.unhexlify",
"ecdsa.SigningKey.from_string"
] |
[((1812, 1876), 'ecdsa.SigningKey.from_string', 'ecdsa.SigningKey.from_string', (['private_key'], {'curve': 'ecdsa.SECP256k1'}), '(private_key, curve=ecdsa.SECP256k1)\n', (1840, 1876), False, 'import ecdsa\n'), ((1540, 1577), 'binascii.unhexlify', 'binascii.unhexlify', (['little_endian_hex'], {}), '(little_endian_hex)\n', (1558, 1577), False, 'import binascii\n'), ((1270, 1287), 'hashlib.sha256', 'hashlib.sha256', (['s'], {}), '(s)\n', (1284, 1287), False, 'import hashlib\n')]
|
#!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: <NAME>, <NAME>, <NAME>
from sys import argv, exit
import os
import sqlite3
import datetime
import matplotlib
matplotlib.use('pdf')
from matplotlib import __version__ as matplotlibversion
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
from optparse import OptionParser, OptionGroup
def read_benchmark_log(dbname, filenames):
"""Parse benchmark log files and store the parsed data in a sqlite3 database."""
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute("""CREATE TABLE IF NOT EXISTS experiments
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR(512), totaltime REAL, timelimit REAL, hostname VARCHAR(1024), date DATETIME, setup TEXT)""")
c.execute("""CREATE TABLE IF NOT EXISTS known_planner_configs
(id INTEGER PRIMARY KEY AUTOINCREMENT, planner_name VARCHAR(512) NOT NULL, settings TEXT)""")
for filename in filenames:
print("Processing " + filename)
logfile = open(filename,'r')
expname = logfile.readline().split()[-1]
hostname = logfile.readline().split()[-1]
date = " ".join(logfile.readline().split()[2:])
goal_name = logfile.readline().split()[-1]
# disabled the planning request part
#logfile.readline() # skip <<<|
#expsetup = ""
#expline = logfile.readline()
#while not expline.startswith("|>>>"):
# expsetup = expsetup + expline
# expline = logfile.readline()
expsetup = ""
timelimit = float(logfile.readline().split()[0])
totaltime = float(logfile.readline().split()[0])
c.execute('INSERT INTO experiments VALUES (?,?,?,?,?,?,?)',
(None, expname, totaltime, timelimit, hostname, date, expsetup) )
c.execute('SELECT last_insert_rowid()')
experiment_id = c.fetchone()[0]
num_planners = int(logfile.readline().split()[0])
for i in range(num_planners):
planner_name = logfile.readline()[:-1]
print("Parsing data for " + planner_name)
# read common data for planner
num_common = int(logfile.readline().split()[0])
settings = ""
for j in range(num_common):
settings = settings + logfile.readline() + ';'
# find planner id
c.execute("SELECT id FROM known_planner_configs WHERE (planner_name=? AND settings=?)", (planner_name, settings,))
p = c.fetchone()
if p==None:
c.execute("INSERT INTO known_planner_configs VALUES (?,?,?)", (None, planner_name, settings,))
c.execute('SELECT last_insert_rowid()')
planner_id = c.fetchone()[0]
else:
planner_id = p[0]
# read run properties
# number of properties to read from log file
num_properties = int(logfile.readline().split()[0])
# load a dictionary of properties and types
# we keep the names of the properties in a list as well, to ensure the correct order of properties
properties = {}
basePropNames = ['experimentid', 'plannerid', 'goal_name'] # these are the ones not from the planner directly
basePropValues = [experiment_id, planner_id, goal_name]
propNames = []
propNames.extend(basePropNames)
for j in range(num_properties):
field = logfile.readline().split()
ftype = field[-1]
fname = "_".join(field[:-1])
properties[fname] = ftype
propNames.append(fname)
# create the table, if needed
table_columns = "experimentid INTEGER, plannerid INTEGER, goal_name VARCHAR(100)"
for k, v in properties.iteritems():
table_columns = table_columns + ', ' + k + ' ' + v
table_columns = table_columns + ", FOREIGN KEY(experimentid) REFERENCES experiments(id) ON DELETE CASCADE ON UPDATE CASCADE"
table_columns = table_columns + ", FOREIGN KEY(plannerid) REFERENCES known_planner_configs(id) ON DELETE CASCADE ON UPDATE CASCADE"
planner_table = 'planner_%s' % planner_name
c.execute("CREATE TABLE IF NOT EXISTS `%s` (%s)" % (planner_table, table_columns))
# check if the table has all the needed columns; if not, add them
c.execute('SELECT * FROM `%s`' % planner_table)
added_columns = [ t[0] for t in c.description]
for col in properties.keys():
if not col in added_columns:
c.execute('ALTER TABLE `' + planner_table + '` ADD ' + col + ' ' + properties[col] + ';')
# add measurements
insert_fmt_str = 'INSERT INTO `' + planner_table + '` (' + ','.join(propNames) + ') VALUES (' + ','.join('?'*(num_properties + len(basePropNames))) + ')'
num_runs = int(logfile.readline().split()[0])
for j in range(num_runs):
run = tuple(basePropValues + [None if len(x)==0 else float(x)
for x in logfile.readline().split('; ')[:-1]])
c.execute(insert_fmt_str, run)
logfile.readline()
logfile.close()
conn.commit()
c.close()
def plot_attribute(cur, planners, attribute, typename):
"""Create a box plot for a particular attribute. It will include data for
all planners that have data for this attribute."""
plt.clf()
ax = plt.gca()
labels = []
measurements = []
nan_counts = []
is_bool = True
for planner in planners:
cur.execute('SELECT * FROM `%s`' % planner)
attributes = [ t[0] for t in cur.description]
if attribute in attributes:
cur.execute('SELECT `%s` FROM `%s` WHERE `%s` IS NOT NULL' % (attribute, planner, attribute))
measurement = [ t[0] for t in cur.fetchall() ]
cur.execute('SELECT count(*) FROM `%s` WHERE `%s` IS NULL' % (planner, attribute))
nan_counts.append(cur.fetchone()[0])
cur.execute('SELECT DISTINCT `%s` FROM `%s`' % (attribute, planner))
is_bool = is_bool and set([t[0] for t in cur.fetchall() if not t[0]==None]).issubset(set([0,1]))
measurements.append(measurement)
labels.append(planner.replace('planner_geometric_','').replace('planner_control_',''))
if is_bool:
width = .5
measurements_percentage = [sum(m)*100./len(m) for m in measurements]
ind = range(len(measurements))
plt.bar(ind, measurements_percentage, width)
xtickNames = plt.xticks([x+width/2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_',' ') + ' (%)')
else:
if int(matplotlibversion.split('.')[0])<1:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5)
else:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5, bootstrap=1000)
ax.set_ylabel(attribute.replace('_',' '))
xtickNames = plt.setp(ax,xticklabels=labels)
plt.setp(xtickNames, rotation=25)
ax.set_xlabel('Motion planning algorithm')
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
if max(nan_counts)>0:
maxy = max([max(y) for y in measurements])
for i in range(len(labels)):
x = i+width/2 if is_bool else i+1
ax.text(x, .95*maxy, str(nan_counts[i]), horizontalalignment='center', size='small')
plt.show()
def plot_statistics(dbname, fname):
"""Create a PDF file with box plots for all attributes."""
print("Generating plot...")
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [ str(t[0]) for t in c.fetchall() ]
planner_names = [ t for t in table_names if t.startswith('planner_') ]
attributes = []
types = {}
experiments = []
# merge possible attributes from all planners
for p in planner_names:
c.execute('SELECT * FROM `%s` LIMIT 1' % p)
atr = [ t[0] for t in c.description]
atr.remove('plannerid')
atr.remove('experimentid')
for a in atr:
if a not in attributes:
c.execute('SELECT typeof(`%s`) FROM `%s` WHERE `%s` IS NOT NULL LIMIT 1' % (a, p, a))
attributes.append(a)
types[a] = c.fetchone()[0]
c.execute('SELECT DISTINCT experimentid FROM `%s`' % p)
eid = [t[0] for t in c.fetchall() if not t[0]==None]
for e in eid:
if e not in experiments:
experiments.append(e)
attributes.sort()
pp = PdfPages(fname)
for atr in attributes:
if types[atr]=='integer' or types[atr]=='real':
plot_attribute(c, planner_names, atr, types[atr])
pp.savefig(plt.gcf())
plt.clf()
pagey = 0.9
pagex = 0.06
for e in experiments:
# get the number of runs, per planner, for this experiment
runcount = []
for p in planner_names:
c.execute('SELECT count(*) FROM `%s` WHERE experimentid = %s' % (p, e))
runcount.append(c.fetchone()[0])
# check if this number is the same for all planners
runs = "Number of averaged runs: "
if len([r for r in runcount if not r == runcount[0]]) > 0:
runs = runs + ", ".join([planner_names[i].replace('planner_geometric_','').replace('planner_control_','') +
"=" + str(runcount[i]) for i in range(len(runcount))])
else:
runs = runs + str(runcount[0])
c.execute('SELECT name, timelimit FROM experiments WHERE id = %s' % e)
d = c.fetchone()
plt.figtext(pagex, pagey, "Experiment '%s'" % d[0])
plt.figtext(pagex, pagey-0.05, runs)
plt.figtext(pagex, pagey-0.10, "Time limit per run: %s seconds" % d[1])
pagey -= 0.22
plt.show()
pp.savefig(plt.gcf())
pp.close()
def save_as_mysql(dbname, mysqldump):
# See http://stackoverflow.com/questions/1067060/perl-to-python
import re
print("Saving as MySQL dump file...")
conn = sqlite3.connect(dbname)
mysqldump = open(mysqldump,'w')
# make sure all tables are dropped in an order that keepd foreign keys valid
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [ str(t[0]) for t in c.fetchall() ]
c.close()
last = ['experiments', 'known_planner_configs']
for table in table_names:
if table.startswith("sqlite"):
continue
if not table in last:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for table in last:
if table in table_names:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for line in conn.iterdump():
process = False
for nope in ('BEGIN TRANSACTION','COMMIT',
'sqlite_sequence','CREATE UNIQUE INDEX', 'CREATE VIEW'):
if nope in line: break
else:
process = True
if not process: continue
line = re.sub(r"[\n\r\t ]+", " ", line)
m = re.search('CREATE TABLE ([a-zA-Z0-9_]*)(.*)', line)
if m:
name, sub = m.groups()
sub = sub.replace('"','`')
line = '''CREATE TABLE IF NOT EXISTS %(name)s%(sub)s'''
line = line % dict(name=name, sub=sub)
# make sure we use an engine that supports foreign keys
line = line.rstrip("\n\t ;") + " ENGINE = InnoDB;\n"
else:
m = re.search('INSERT INTO "([a-zA-Z0-9_]*)"(.*)', line)
if m:
line = 'INSERT INTO %s%s\n' % m.groups()
line = line.replace('"', r'\"')
line = line.replace('"', "'")
line = re.sub(r"([^'])'t'(.)", "\\1THIS_IS_TRUE\\2", line)
line = line.replace('THIS_IS_TRUE', '1')
line = re.sub(r"([^'])'f'(.)", "\\1THIS_IS_FALSE\\2", line)
line = line.replace('THIS_IS_FALSE', '0')
line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT')
mysqldump.write(line)
mysqldump.close()
def generate_csv(dbname, fname):
"""Create a csv file with all experiments combined into one list."""
print("Generating CSV output...")
# Open CSV File
csv = open(fname, 'w')
# Connect to database
conn = sqlite3.connect(dbname)
cursor = conn.cursor()
cursor.execute('PRAGMA FOREIGN_KEYS = ON')
# Get planner tables
cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [ str(t[0]) for t in cursor.fetchall() ]
planner_names = [ t for t in table_names if t.startswith('planner_') ]
# Create vectors
attributes = []
types = {}
experiments = []
# merge possible attributes from all planners
for planner_name in planner_names:
cursor.execute('SELECT * FROM `%s` LIMIT 1' % planner_name)
atr = [ t[0] for t in cursor.description]
atr.remove('plannerid')
atr.remove('experimentid')
for attribute in atr:
if attribute not in attributes:
cursor.execute('SELECT typeof(`%s`) FROM `%s` WHERE `%s` IS NOT NULL LIMIT 1' % (attribute, planner_name, attribute))
attributes.append(attribute) # add this new attribute (first time seen)
types[attribute] = cursor.fetchone()[0]
# Find new exeperiments for this planner table and add to our experiment vector
cursor.execute('SELECT DISTINCT experimentid FROM `%s`' % planner_name)
experiment_ids = [t[0] for t in cursor.fetchall() if not t[0]==None]
for experiment_id in experiment_ids:
if experiment_id not in experiments:
experiments.append(experiment_id)
# Sort all found attributes
attributes.sort(reverse=True)
# Create header of the CSV
csv.write('planner_type')
for atr in attributes:
#if types[atr]=='integer' or types[atr]=='real':
csv.write(", %s"%atr)
csv.write('\n') # new line
# Start creating CSV file by polling each planner table and seperating its data into proper column
# format, leaving blanks where planner is missing possible attribute data
for planner_name in planner_names:
cursor.execute('SELECT * FROM `%s`' % planner_name)
# get this planner's attributes
planner_attributes = [ t[0] for t in cursor.description]
#print>>csv, planner_attributes
# loop through each row of the planner experiments, aka each 'run'
for run in cursor.fetchall():
# write a *simplified* planner name
name_short = planner_name.strip('planner')
name_short = name_short.strip('_OMPL_')
name_short = name_short.replace('[','_')
name_short = name_short.strip('kConfigDefault]')
csv.write(name_short)
# loop through each global attribute
for atr in attributes:
# find the global attribute in this table if it exists
if atr in planner_attributes:
# output value
index_of_attr = planner_attributes.index(atr)
csv.write(", %s" %run[index_of_attr])
else:
csv.write(", ")
# done with this line
csv.write("\n")
if __name__ == "__main__":
usage = """%prog [options] [<benchmark.log> ...]"""
parser = OptionParser(usage)
parser.add_option("-d", "--database", dest="dbname", default="benchmark.db",
help="Filename of benchmark database [default: %default]")
parser.add_option("-v", "--view", action="store_true", dest="view", default=False,
help="Compute the views for best planner configurations")
parser.add_option("-p", "--plot", dest="plot", default=None,
help="Create a PDF of plots")
parser.add_option("-c", "--csv", dest="csv", default=None,
help="Create a CSV of combined experiments")
parser.add_option("-m", "--mysql", dest="mysqldb", default=None,
help="Save SQLite3 database as a MySQL dump file")
parser.add_option("-o", "--overwrite", action="store_true", dest="overwrite", default=False,
help="Use this flag to enable overwriting a previous database file with new benchmarks")
if len(argv) == 1:
parser.print_help()
(options, args) = parser.parse_args()
if len(args) > 0:
# Check if user wants to start a new database (delete old one)
if options.overwrite:
try:
os.remove(options.dbname)
except OSError:
pass
read_benchmark_log(options.dbname, args)
if options.plot:
plot_statistics(options.dbname, options.plot)
if options.csv:
generate_csv(options.dbname, options.csv)
if options.mysqldb:
save_as_mysql(options.dbname, options.mysqldb)
|
[
"matplotlib.backends.backend_pdf.PdfPages",
"os.remove",
"matplotlib.pyplot.show",
"optparse.OptionParser",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.boxplot",
"matplotlib.__version__.split",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.figtext",
"matplotlib.use",
"sqlite3.connect",
"re.search",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xticks",
"re.sub"
] |
[((1912, 1933), 'matplotlib.use', 'matplotlib.use', (['"""pdf"""'], {}), "('pdf')\n", (1926, 1933), False, 'import matplotlib\n'), ((2282, 2305), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (2297, 2305), False, 'import sqlite3\n'), ((7346, 7355), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7353, 7355), True, 'import matplotlib.pyplot as plt\n'), ((7365, 7374), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7372, 7374), True, 'import matplotlib.pyplot as plt\n'), ((9387, 9397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9395, 9397), True, 'import matplotlib.pyplot as plt\n'), ((9541, 9564), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (9556, 9564), False, 'import sqlite3\n'), ((10617, 10632), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['fname'], {}), '(fname)\n', (10625, 10632), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((10816, 10825), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10823, 10825), True, 'import matplotlib.pyplot as plt\n'), ((11879, 11889), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11887, 11889), True, 'import matplotlib.pyplot as plt\n'), ((12106, 12129), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (12121, 12129), False, 'import sqlite3\n'), ((14341, 14364), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (14356, 14364), False, 'import sqlite3\n'), ((17462, 17481), 'optparse.OptionParser', 'OptionParser', (['usage'], {}), '(usage)\n', (17474, 17481), False, 'from optparse import OptionParser, OptionGroup\n'), ((8425, 8469), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'measurements_percentage', 'width'], {}), '(ind, measurements_percentage, width)\n', (8432, 8469), True, 'import matplotlib.pyplot as plt\n'), ((8491, 8556), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[(x + width / 2.0) for x in ind]', 'labels'], {'rotation': '(30)'}), '([(x + width / 2.0) for x in ind], labels, rotation=30)\n', (8501, 8556), True, 'import matplotlib.pyplot as plt\n'), ((8921, 8953), 'matplotlib.pyplot.setp', 'plt.setp', (['ax'], {'xticklabels': 'labels'}), '(ax, xticklabels=labels)\n', (8929, 8953), True, 'import matplotlib.pyplot as plt\n'), ((8961, 8994), 'matplotlib.pyplot.setp', 'plt.setp', (['xtickNames'], {'rotation': '(25)'}), '(xtickNames, rotation=25)\n', (8969, 8994), True, 'import matplotlib.pyplot as plt\n'), ((11676, 11727), 'matplotlib.pyplot.figtext', 'plt.figtext', (['pagex', 'pagey', '("Experiment \'%s\'" % d[0])'], {}), '(pagex, pagey, "Experiment \'%s\'" % d[0])\n', (11687, 11727), True, 'import matplotlib.pyplot as plt\n'), ((11736, 11774), 'matplotlib.pyplot.figtext', 'plt.figtext', (['pagex', '(pagey - 0.05)', 'runs'], {}), '(pagex, pagey - 0.05, runs)\n', (11747, 11774), True, 'import matplotlib.pyplot as plt\n'), ((11781, 11853), 'matplotlib.pyplot.figtext', 'plt.figtext', (['pagex', '(pagey - 0.1)', "('Time limit per run: %s seconds' % d[1])"], {}), "(pagex, pagey - 0.1, 'Time limit per run: %s seconds' % d[1])\n", (11792, 11853), True, 'import matplotlib.pyplot as plt\n'), ((11905, 11914), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11912, 11914), True, 'import matplotlib.pyplot as plt\n'), ((13071, 13105), 're.sub', 're.sub', (['"""[\\\\n\\\\r\\\\t ]+"""', '""" """', 'line'], {}), "('[\\\\n\\\\r\\\\t ]+', ' ', line)\n", (13077, 13105), False, 'import re\n'), ((13116, 13167), 're.search', 're.search', (['"""CREATE TABLE ([a-zA-Z0-9_]*)(.*)"""', 'line'], {}), "('CREATE TABLE ([a-zA-Z0-9_]*)(.*)', line)\n", (13125, 13167), False, 'import re\n'), ((13776, 13826), 're.sub', 're.sub', (['"""([^\'])\'t\'(.)"""', '"""\\\\1THIS_IS_TRUE\\\\2"""', 'line'], {}), '("([^\'])\'t\'(.)", \'\\\\1THIS_IS_TRUE\\\\2\', line)\n', (13782, 13826), False, 'import re\n'), ((13892, 13943), 're.sub', 're.sub', (['"""([^\'])\'f\'(.)"""', '"""\\\\1THIS_IS_FALSE\\\\2"""', 'line'], {}), '("([^\'])\'f\'(.)", \'\\\\1THIS_IS_FALSE\\\\2\', line)\n', (13898, 13943), False, 'import re\n'), ((8682, 8744), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['measurements'], {'notch': '(0)', 'sym': '"""k+"""', 'vert': '(1)', 'whis': '(1.5)'}), "(measurements, notch=0, sym='k+', vert=1, whis=1.5)\n", (8693, 8744), True, 'import matplotlib.pyplot as plt\n'), ((8771, 8849), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['measurements'], {'notch': '(0)', 'sym': '"""k+"""', 'vert': '(1)', 'whis': '(1.5)', 'bootstrap': '(1000)'}), "(measurements, notch=0, sym='k+', vert=1, whis=1.5, bootstrap=1000)\n", (8782, 8849), True, 'import matplotlib.pyplot as plt\n'), ((13538, 13590), 're.search', 're.search', (['"""INSERT INTO "([a-zA-Z0-9_]*)"(.*)"""', 'line'], {}), '(\'INSERT INTO "([a-zA-Z0-9_]*)"(.*)\', line)\n', (13547, 13590), False, 'import re\n'), ((10801, 10810), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (10808, 10810), True, 'import matplotlib.pyplot as plt\n'), ((18576, 18601), 'os.remove', 'os.remove', (['options.dbname'], {}), '(options.dbname)\n', (18585, 18601), False, 'import os\n'), ((8634, 8662), 'matplotlib.__version__.split', 'matplotlibversion.split', (['"""."""'], {}), "('.')\n", (8657, 8662), True, 'from matplotlib import __version__ as matplotlibversion\n')]
|
import xmltodict
import json
from elasticsearch import Elasticsearch
from elasticsearch.connection import create_ssl_context
import sys
##By: <NAME>
def Connect2ES(ip='127.0.0.1',port='9200',user="",password="",https=False,CertPath="",ES_Index='reports',Data=""):
## Connection to Elastic Search (http/https)
raiseFieldLimit = '''
{
"index.mapping.total_fields.limit": 500000
}'''
if https :
context = create_ssl_context(cafile=CertPath)
es = Elasticsearch(
[ip],
http_auth=(user, password),
scheme="https",
port=int(port),
ssl_context=context,
)
else :
es = Elasticsearch(
[ip],
scheme="http",
port=int(port),
)
if not es.indices.exists(index=ES_Index):
es.indices.create(index=ES_Index, ignore=400,body=raiseFieldLimit)
es.index(index=ES_Index, doc_type='Report', body=Data)
def XML2JSON(address):
# Any XML 2 JSON (tested on: ZAP, Nesus v2 and higher, acunetix 11, Openvas, Arachni , Nikto, NMAP)
file = open(address,"r")
return (json.dumps(xmltodict.parse(file.read())))
if (sys.argv[1] == 'http'):
Connect2ES (ip=sys.argv[2],port=sys.argv[3],https=False,ES_Index=(sys.argv[5]),Data=XML2JSON(sys.argv[4]))
elif (sys.argv[1] == 'https'):
Connect2ES (ip=sys.argv[2],port=sys.argv[3],https=True,user=sys.argv[6],password=sys.argv[7],CertPath=sys.argv[8],ES_Index=(sys.argv[5]),Data=XML2JSON(sys.argv[4]))
else:
print ('Did not support on this version')
# Usage: python3 scanner.py [HTTP] [ES IP] [ES Port] [XML Path] [ES Index name]
# python3 scanner.py [HTTPs] [ES IP] [ES Port] [XML Path] [ES Index name] [User Name] [Password] [Cert Path]
|
[
"elasticsearch.connection.create_ssl_context"
] |
[((443, 478), 'elasticsearch.connection.create_ssl_context', 'create_ssl_context', ([], {'cafile': 'CertPath'}), '(cafile=CertPath)\n', (461, 478), False, 'from elasticsearch.connection import create_ssl_context\n')]
|
import random
from collections import deque
from typing import Tuple
from game.direction import Directions, GRID_WIDTH, GRID_HEIGHT
class GameState:
apple_pos = (0, 0)
positions = None
direction = 0
def __init__(self, start_pos: Tuple[int, int], length: int = 3) -> None:
self.length = length
self.positions = deque()
self.positions.append(start_pos)
for i in range(1, length):
self.positions.append(Directions.apply(2, self.positions[i - 1]))
self.apple_replace()
def move(self) -> Tuple[bool, bool]:
new_pos = Directions.apply(self.direction, self.positions[0])
# check for edge
if new_pos[0] == -1 or new_pos[0] == GRID_WIDTH or new_pos[1] == -1 or new_pos[1] == GRID_HEIGHT:
return False, False
# check for apple
has_grown = self.apple_pos == new_pos
if has_grown:
self.length += 1
self.apple_replace()
else:
self.positions.pop()
# check for self intersection
self_intersection = new_pos in self.positions
self.positions.appendleft(new_pos)
return not self_intersection, has_grown
def apple_replace(self):
self.apple_pos = (random.randrange(0, GRID_WIDTH), random.randrange(0, GRID_HEIGHT))
while self.apple_pos in self.positions:
self.apple_pos = (random.randrange(0, GRID_WIDTH), random.randrange(0, GRID_HEIGHT))
def distance_to_apple(self):
x, y = self.positions[0]
return abs(x - self.apple_pos[0]) + abs(y - self.apple_pos[1])
|
[
"random.randrange",
"game.direction.Directions.apply",
"collections.deque"
] |
[((346, 353), 'collections.deque', 'deque', ([], {}), '()\n', (351, 353), False, 'from collections import deque\n'), ((598, 649), 'game.direction.Directions.apply', 'Directions.apply', (['self.direction', 'self.positions[0]'], {}), '(self.direction, self.positions[0])\n', (614, 649), False, 'from game.direction import Directions, GRID_WIDTH, GRID_HEIGHT\n'), ((1258, 1289), 'random.randrange', 'random.randrange', (['(0)', 'GRID_WIDTH'], {}), '(0, GRID_WIDTH)\n', (1274, 1289), False, 'import random\n'), ((1291, 1323), 'random.randrange', 'random.randrange', (['(0)', 'GRID_HEIGHT'], {}), '(0, GRID_HEIGHT)\n', (1307, 1323), False, 'import random\n'), ((464, 506), 'game.direction.Directions.apply', 'Directions.apply', (['(2)', 'self.positions[i - 1]'], {}), '(2, self.positions[i - 1])\n', (480, 506), False, 'from game.direction import Directions, GRID_WIDTH, GRID_HEIGHT\n'), ((1403, 1434), 'random.randrange', 'random.randrange', (['(0)', 'GRID_WIDTH'], {}), '(0, GRID_WIDTH)\n', (1419, 1434), False, 'import random\n'), ((1436, 1468), 'random.randrange', 'random.randrange', (['(0)', 'GRID_HEIGHT'], {}), '(0, GRID_HEIGHT)\n', (1452, 1468), False, 'import random\n')]
|
import os
from dotenv import load_dotenv, find_dotenv
from sqlalchemy.sql.sqltypes import Integer, REAL, TEXT
from sqlalchemy import (create_engine, MetaData, Column, Table)
from sqlalchemy_utils import database_exists, create_database
load_dotenv(find_dotenv())
engine = create_engine(f"sqlite:///{os.getenv('SQLITE_DB_NAME')}", echo=False)
if not database_exists(engine.url):
create_database(engine.url)
metadata = MetaData(bind=engine)
produtos = Table('products', metadata,
Column('id', Integer, primary_key=True),
Column('name', TEXT(50), nullable=False),
Column('price', REAL(8,2), nullable=False),
Column('stock', TEXT(50), nullable=False)
)
metadata.create_all()
|
[
"sqlalchemy.MetaData",
"sqlalchemy_utils.create_database",
"dotenv.find_dotenv",
"sqlalchemy_utils.database_exists",
"sqlalchemy.Column",
"sqlalchemy.sql.sqltypes.REAL",
"os.getenv",
"sqlalchemy.sql.sqltypes.TEXT"
] |
[((425, 446), 'sqlalchemy.MetaData', 'MetaData', ([], {'bind': 'engine'}), '(bind=engine)\n', (433, 446), False, 'from sqlalchemy import create_engine, MetaData, Column, Table\n'), ((249, 262), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (260, 262), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((352, 379), 'sqlalchemy_utils.database_exists', 'database_exists', (['engine.url'], {}), '(engine.url)\n', (367, 379), False, 'from sqlalchemy_utils import database_exists, create_database\n'), ((385, 412), 'sqlalchemy_utils.create_database', 'create_database', (['engine.url'], {}), '(engine.url)\n', (400, 412), False, 'from sqlalchemy_utils import database_exists, create_database\n'), ((491, 530), 'sqlalchemy.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)'}), "('id', Integer, primary_key=True)\n", (497, 530), False, 'from sqlalchemy import create_engine, MetaData, Column, Table\n'), ((551, 559), 'sqlalchemy.sql.sqltypes.TEXT', 'TEXT', (['(50)'], {}), '(50)\n', (555, 559), False, 'from sqlalchemy.sql.sqltypes import Integer, REAL, TEXT\n'), ((598, 608), 'sqlalchemy.sql.sqltypes.REAL', 'REAL', (['(8)', '(2)'], {}), '(8, 2)\n', (602, 608), False, 'from sqlalchemy.sql.sqltypes import Integer, REAL, TEXT\n'), ((646, 654), 'sqlalchemy.sql.sqltypes.TEXT', 'TEXT', (['(50)'], {}), '(50)\n', (650, 654), False, 'from sqlalchemy.sql.sqltypes import Integer, REAL, TEXT\n'), ((301, 328), 'os.getenv', 'os.getenv', (['"""SQLITE_DB_NAME"""'], {}), "('SQLITE_DB_NAME')\n", (310, 328), False, 'import os\n')]
|
import json
import time
import datetime
from aliyunsdkcore.client import AcsClient
from cachetools import cached, TTLCache
from prometheus_client.metrics_core import GaugeMetricFamily
import aliyunsdkecs.request.v20140526.DescribeInstancesRequest as DescribeECS
import aliyunsdkrds.request.v20140815.DescribeDBInstancesRequest as DescribeRDS
import aliyunsdkr_kvstore.request.v20150101.DescribeInstancesRequest as DescribeRedis
import aliyunsdkslb.request.v20140515.DescribeLoadBalancersRequest as DescribeSLB
import aliyunsdkslb.request.v20140515.DescribeLoadBalancerAttributeRequest as DescribeSLBAttr
import aliyunsdkslb.request.v20140515.DescribeLoadBalancerTCPListenerAttributeRequest as DescribeSLBTcpAttr
import aliyunsdkslb.request.v20140515.DescribeLoadBalancerHTTPListenerAttributeRequest as DescribeSLBHttpAttr
import aliyunsdkslb.request.v20140515.DescribeLoadBalancerHTTPSListenerAttributeRequest as DescribeSLBHttpsAttr
import aliyunsdkdds.request.v20151201.DescribeDBInstancesRequest as Mongodb
import aliyunsdkcdn.request.v20180510.DescribeUserDomainsRequest as DescribeCDN
from aliyun_exporter.utils import try_or_else
cache = TTLCache(maxsize=100, ttl=3600)
'''
InfoProvider provides the information of cloud resources as metric.
The result from alibaba cloud API will be cached for an hour.
Different resources should implement its own 'xxx_info' function.
Different resource has different information structure, and most of
them are nested, for simplicity, we map the top-level attributes to the
labels of metric, and handle nested attribute specially. If a nested
attribute is not handled explicitly, it will be dropped.
'''
class InfoProvider():
def __init__(self, client: AcsClient):
self.client = client
@cached(cache)
def get_metrics(self, resource: str) -> GaugeMetricFamily:
return {
'ecs': lambda : self.ecs_info(),
'rds': lambda : self.rds_info(),
'cdn': lambda : self.cdn_info(),
'redis': lambda : self.redis_info(),
'slb':lambda : self.slb_info(),
'mongodb':lambda : self.mongodb_info(),
}[resource]()
def ecs_info(self) -> GaugeMetricFamily:
req = DescribeECS.DescribeInstancesRequest()
nested_handler = {
'InnerIpAddress': lambda obj : try_or_else(lambda : obj['IpAddress'][0], ''),
'PublicIpAddress': lambda obj : try_or_else(lambda : obj['IpAddress'][0], ''),
'VpcAttributes': lambda obj : try_or_else(lambda : obj['PrivateIpAddress']['IpAddress'][0], ''),
}
return self.info_template(req, 'aliyun_meta_ecs_info', nested_handler=nested_handler)
def rds_info(self) -> GaugeMetricFamily:
req = DescribeRDS.DescribeDBInstancesRequest()
return self.info_template(req, 'aliyun_meta_rds_info', to_list=lambda data: data['Items']['DBInstance'])
def redis_info(self) -> GaugeMetricFamily:
req = DescribeRedis.DescribeInstancesRequest()
return self.info_template(req, 'aliyun_meta_redis_info', to_list=lambda data: data['Instances']['KVStoreInstance'])
def slb_info(self) -> GaugeMetricFamily:
req = DescribeSLB.DescribeLoadBalancersRequest()
gauge = self.info_template(req, 'aliyun_meta_slb_info', to_list=lambda data: data['LoadBalancers']['LoadBalancer'])
gauge_slb_info = None
for s in gauge.samples:
slb_id = s.labels['LoadBalancerId']
req_slb_attr = DescribeSLBAttr.DescribeLoadBalancerAttributeRequest()
req_slb_attr.set_LoadBalancerId(slb_id)
slb_attrs_resp = self.client.do_action_with_exception(req_slb_attr)
slb_attrs_info = json.loads(slb_attrs_resp)
for protocol_info in slb_attrs_info['ListenerPortsAndProtocol']['ListenerPortAndProtocol']:
protocol = protocol_info['ListenerProtocol']
port = protocol_info['ListenerPort']
req_slb_proto = None
if protocol == 'tcp':
req_slb_proto = DescribeSLBTcpAttr.DescribeLoadBalancerTCPListenerAttributeRequest()
elif protocol == 'http':
req_slb_proto = DescribeSLBHttpAttr.DescribeLoadBalancerHTTPListenerAttributeRequest()
elif protocol == 'https':
req_slb_proto = DescribeSLBHttpsAttr.DescribeLoadBalancerHTTPSListenerAttributeRequest()
req_slb_proto.set_LoadBalancerId(slb_id)
req_slb_proto.set_ListenerPort(int(port))
slb_protocol_resp = self.client.do_action_with_exception(req_slb_proto)
slb_protocol_info: dict = json.loads(slb_protocol_resp)
if 'ForwardCode' in slb_protocol_info.keys():
continue
Bandwidth = slb_protocol_info['Bandwidth']
if gauge_slb_info is None:
gauge_slb_info = GaugeMetricFamily('aliyun_meta_slb_proto_bandwidth', 'protocolBandwidth', labels=['instanceId', 'ListenerProtocol', 'ListenerPort'])
gauge_slb_info.add_metric([slb_id, protocol, str(port)], value=float(Bandwidth))
return gauge_slb_info
def mongodb_info(self) -> GaugeMetricFamily:
req = Mongodb.DescribeDBInstancesRequest()
return self.info_template(req, 'aliyun_meta_mongodb_info', to_list=lambda data: data['DBInstances']['DBInstance'])
def cdn_info(self) -> GaugeMetricFamily:
req = DescribeCDN.DescribeUserDomainsRequest()
req.set_DomainStatus('online')
nested_handler = {
'DomainName': lambda obj: try_or_else(lambda: obj['DomainName'], ''),
}
return self.info_template(req, 'aliyun_meta_cdn_info', to_list=lambda data: data['Domains']['PageData'])
'''
Template method to retrieve resource information and transform to metric.
'''
def info_template(self,
req,
name,
desc='',
page_size=100,
page_num=1,
nested_handler=None,
to_list=(lambda data: data['Instances']['Instance'])) -> GaugeMetricFamily:
gauge = None
label_keys = None
for instance in self.pager_generator(req, page_size, page_num, to_list):
if gauge is None:
label_keys = self.label_keys(instance, nested_handler)
gauge = GaugeMetricFamily(name, desc, labels=label_keys)
gauge.add_metric(labels=self.label_values(instance, label_keys, nested_handler), value=1.0)
return gauge
def info_template_bytime(self,
req,
name,
desc='',
label_keys=None,
nested_handler=None,
to_value=(lambda data: data['Instances']['Instance'])) -> GaugeMetricFamily:
value = self.generator_by_time(req, to_value)
gauge = GaugeMetricFamily(name, desc, labels=label_keys)
gauge.add_metric(labels=[value], value=1.0)
return gauge
def pager_generator(self, req, page_size, page_num, to_list):
req.set_PageSize(page_size)
while True:
req.set_PageNumber(page_num)
resp = self.client.do_action_with_exception(req)
data = json.loads(resp)
instances = to_list(data)
for instance in instances:
if 'test' not in instance.get('DomainName'):
yield instance
if len(instances) < page_size:
break
page_num += 1
def generator_by_time(self, req, to_value):
now = time.time() - 60
start_time = datetime.datetime.utcfromtimestamp(now-120).strftime("%Y-%m-%dT%H:%M:%SZ")
end_time = datetime.datetime.utcfromtimestamp(now).strftime("%Y-%m-%dT%H:%M:%SZ")
req.set_accept_format('json')
req.set_StartTime(start_time)
req.set_EndTime(end_time)
resp = self.client.do_action_with_exception(req)
value = to_value(resp)
return value
def label_keys(self, instance, nested_handler=None):
if nested_handler is None:
nested_handler = {}
return [k for k, v in instance.items()
if k in nested_handler or isinstance(v, str) or isinstance(v, int)]
def label_values(self, instance, label_keys, nested_handler=None):
if nested_handler is None:
nested_handler = {}
return map(lambda k: str(nested_handler[k](instance[k])) if k in nested_handler else try_or_else(lambda: str(instance[k]), ''),
label_keys)
|
[
"aliyunsdkrds.request.v20140815.DescribeDBInstancesRequest.DescribeDBInstancesRequest",
"aliyunsdkslb.request.v20140515.DescribeLoadBalancerHTTPSListenerAttributeRequest.DescribeLoadBalancerHTTPSListenerAttributeRequest",
"aliyunsdkdds.request.v20151201.DescribeDBInstancesRequest.DescribeDBInstancesRequest",
"aliyunsdkr_kvstore.request.v20150101.DescribeInstancesRequest.DescribeInstancesRequest",
"cachetools.TTLCache",
"json.loads",
"aliyunsdkslb.request.v20140515.DescribeLoadBalancerHTTPListenerAttributeRequest.DescribeLoadBalancerHTTPListenerAttributeRequest",
"aliyunsdkslb.request.v20140515.DescribeLoadBalancerTCPListenerAttributeRequest.DescribeLoadBalancerTCPListenerAttributeRequest",
"cachetools.cached",
"time.time",
"datetime.datetime.utcfromtimestamp",
"prometheus_client.metrics_core.GaugeMetricFamily",
"aliyunsdkslb.request.v20140515.DescribeLoadBalancerAttributeRequest.DescribeLoadBalancerAttributeRequest",
"aliyunsdkcdn.request.v20180510.DescribeUserDomainsRequest.DescribeUserDomainsRequest",
"aliyun_exporter.utils.try_or_else",
"aliyunsdkecs.request.v20140526.DescribeInstancesRequest.DescribeInstancesRequest",
"aliyunsdkslb.request.v20140515.DescribeLoadBalancersRequest.DescribeLoadBalancersRequest"
] |
[((1148, 1179), 'cachetools.TTLCache', 'TTLCache', ([], {'maxsize': '(100)', 'ttl': '(3600)'}), '(maxsize=100, ttl=3600)\n', (1156, 1179), False, 'from cachetools import cached, TTLCache\n'), ((1757, 1770), 'cachetools.cached', 'cached', (['cache'], {}), '(cache)\n', (1763, 1770), False, 'from cachetools import cached, TTLCache\n'), ((2213, 2251), 'aliyunsdkecs.request.v20140526.DescribeInstancesRequest.DescribeInstancesRequest', 'DescribeECS.DescribeInstancesRequest', ([], {}), '()\n', (2249, 2251), True, 'import aliyunsdkecs.request.v20140526.DescribeInstancesRequest as DescribeECS\n'), ((2733, 2773), 'aliyunsdkrds.request.v20140815.DescribeDBInstancesRequest.DescribeDBInstancesRequest', 'DescribeRDS.DescribeDBInstancesRequest', ([], {}), '()\n', (2771, 2773), True, 'import aliyunsdkrds.request.v20140815.DescribeDBInstancesRequest as DescribeRDS\n'), ((2949, 2989), 'aliyunsdkr_kvstore.request.v20150101.DescribeInstancesRequest.DescribeInstancesRequest', 'DescribeRedis.DescribeInstancesRequest', ([], {}), '()\n', (2987, 2989), True, 'import aliyunsdkr_kvstore.request.v20150101.DescribeInstancesRequest as DescribeRedis\n'), ((3174, 3216), 'aliyunsdkslb.request.v20140515.DescribeLoadBalancersRequest.DescribeLoadBalancersRequest', 'DescribeSLB.DescribeLoadBalancersRequest', ([], {}), '()\n', (3214, 3216), True, 'import aliyunsdkslb.request.v20140515.DescribeLoadBalancersRequest as DescribeSLB\n'), ((5247, 5283), 'aliyunsdkdds.request.v20151201.DescribeDBInstancesRequest.DescribeDBInstancesRequest', 'Mongodb.DescribeDBInstancesRequest', ([], {}), '()\n', (5281, 5283), True, 'import aliyunsdkdds.request.v20151201.DescribeDBInstancesRequest as Mongodb\n'), ((5467, 5507), 'aliyunsdkcdn.request.v20180510.DescribeUserDomainsRequest.DescribeUserDomainsRequest', 'DescribeCDN.DescribeUserDomainsRequest', ([], {}), '()\n', (5505, 5507), True, 'import aliyunsdkcdn.request.v20180510.DescribeUserDomainsRequest as DescribeCDN\n'), ((7001, 7049), 'prometheus_client.metrics_core.GaugeMetricFamily', 'GaugeMetricFamily', (['name', 'desc'], {'labels': 'label_keys'}), '(name, desc, labels=label_keys)\n', (7018, 7049), False, 'from prometheus_client.metrics_core import GaugeMetricFamily\n'), ((3478, 3532), 'aliyunsdkslb.request.v20140515.DescribeLoadBalancerAttributeRequest.DescribeLoadBalancerAttributeRequest', 'DescribeSLBAttr.DescribeLoadBalancerAttributeRequest', ([], {}), '()\n', (3530, 3532), True, 'import aliyunsdkslb.request.v20140515.DescribeLoadBalancerAttributeRequest as DescribeSLBAttr\n'), ((3694, 3720), 'json.loads', 'json.loads', (['slb_attrs_resp'], {}), '(slb_attrs_resp)\n', (3704, 3720), False, 'import json\n'), ((7367, 7383), 'json.loads', 'json.loads', (['resp'], {}), '(resp)\n', (7377, 7383), False, 'import json\n'), ((7711, 7722), 'time.time', 'time.time', ([], {}), '()\n', (7720, 7722), False, 'import time\n'), ((2322, 2367), 'aliyun_exporter.utils.try_or_else', 'try_or_else', (["(lambda : obj['IpAddress'][0])", '""""""'], {}), "(lambda : obj['IpAddress'][0], '')\n", (2333, 2367), False, 'from aliyun_exporter.utils import try_or_else\n'), ((2413, 2458), 'aliyun_exporter.utils.try_or_else', 'try_or_else', (["(lambda : obj['IpAddress'][0])", '""""""'], {}), "(lambda : obj['IpAddress'][0], '')\n", (2424, 2458), False, 'from aliyun_exporter.utils import try_or_else\n'), ((2502, 2567), 'aliyun_exporter.utils.try_or_else', 'try_or_else', (["(lambda : obj['PrivateIpAddress']['IpAddress'][0])", '""""""'], {}), "(lambda : obj['PrivateIpAddress']['IpAddress'][0], '')\n", (2513, 2567), False, 'from aliyun_exporter.utils import try_or_else\n'), ((4663, 4692), 'json.loads', 'json.loads', (['slb_protocol_resp'], {}), '(slb_protocol_resp)\n', (4673, 4692), False, 'import json\n'), ((5612, 5655), 'aliyun_exporter.utils.try_or_else', 'try_or_else', (["(lambda : obj['DomainName'])", '""""""'], {}), "(lambda : obj['DomainName'], '')\n", (5623, 5655), False, 'from aliyun_exporter.utils import try_or_else\n'), ((6453, 6501), 'prometheus_client.metrics_core.GaugeMetricFamily', 'GaugeMetricFamily', (['name', 'desc'], {'labels': 'label_keys'}), '(name, desc, labels=label_keys)\n', (6470, 6501), False, 'from prometheus_client.metrics_core import GaugeMetricFamily\n'), ((7749, 7794), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(now - 120)'], {}), '(now - 120)\n', (7783, 7794), False, 'import datetime\n'), ((7843, 7882), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['now'], {}), '(now)\n', (7877, 7882), False, 'import datetime\n'), ((4050, 4118), 'aliyunsdkslb.request.v20140515.DescribeLoadBalancerTCPListenerAttributeRequest.DescribeLoadBalancerTCPListenerAttributeRequest', 'DescribeSLBTcpAttr.DescribeLoadBalancerTCPListenerAttributeRequest', ([], {}), '()\n', (4116, 4118), True, 'import aliyunsdkslb.request.v20140515.DescribeLoadBalancerTCPListenerAttributeRequest as DescribeSLBTcpAttr\n'), ((4923, 5059), 'prometheus_client.metrics_core.GaugeMetricFamily', 'GaugeMetricFamily', (['"""aliyun_meta_slb_proto_bandwidth"""', '"""protocolBandwidth"""'], {'labels': "['instanceId', 'ListenerProtocol', 'ListenerPort']"}), "('aliyun_meta_slb_proto_bandwidth', 'protocolBandwidth',\n labels=['instanceId', 'ListenerProtocol', 'ListenerPort'])\n", (4940, 5059), False, 'from prometheus_client.metrics_core import GaugeMetricFamily\n'), ((4196, 4266), 'aliyunsdkslb.request.v20140515.DescribeLoadBalancerHTTPListenerAttributeRequest.DescribeLoadBalancerHTTPListenerAttributeRequest', 'DescribeSLBHttpAttr.DescribeLoadBalancerHTTPListenerAttributeRequest', ([], {}), '()\n', (4264, 4266), True, 'import aliyunsdkslb.request.v20140515.DescribeLoadBalancerHTTPListenerAttributeRequest as DescribeSLBHttpAttr\n'), ((4345, 4417), 'aliyunsdkslb.request.v20140515.DescribeLoadBalancerHTTPSListenerAttributeRequest.DescribeLoadBalancerHTTPSListenerAttributeRequest', 'DescribeSLBHttpsAttr.DescribeLoadBalancerHTTPSListenerAttributeRequest', ([], {}), '()\n', (4415, 4417), True, 'import aliyunsdkslb.request.v20140515.DescribeLoadBalancerHTTPSListenerAttributeRequest as DescribeSLBHttpsAttr\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.