repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
680k
|
---|---|---|---|---|
Sloomey/DeepSpace2019 | src/autonomous/purepursuit.py | dda035c0ac100209b03a2ff04d86df09c6de9a85 | import math
from constants import Constants
from utils import vector2d
from wpilib import SmartDashboard as Dash
from autonomous import pursuitpoint
class PurePursuit():
"""An implementation of the Pure Pursuit path tracking algorithm."""
def __init__(self, path):
self.path = path
self.pursuit_points = [pursuitpoint.PursuitPoint(p, c) for p, c in zip(
self.path.getPoints(), self.path.getCurvatures())]
self.last_lookahead_index = 0
self.cur_curvature = 0
self.target_velocities = vector2d.Vector2D()
self.closest_point_index = 0
def computeVelocities(self):
"""Compute the velocities along the path."""
# Compute the velocities along the path using the curvature and Constants.CURVE_VELOCITY
for ppoint in self.pursuit_points:
if abs(ppoint.curvature) <= Constants.CURVATURE_THRESHOLD:
velocity = Constants.MAX_VELOCITY
else:
velocity = min(Constants.MAX_VELOCITY,
Constants.CURVE_VELOCITY/ppoint.curvature)
ppoint.velocity = velocity
# Limit the acceleration of the velocities
for i in reversed(range(0, len(self.pursuit_points)-1)):
distance = self.pursuit_points[i].point.getDistance(
self.pursuit_points[i+1].point)
new_velocity = math.sqrt(
self.pursuit_points[i+1].velocity**2 + (2 * Constants.MAX_ACCELERATION * distance))
new_velocity = min(self.pursuit_points[i].velocity, new_velocity)
self.pursuit_points[i].velocity = new_velocity
def updateLookaheadPointIndex2(self, state):
"""Update the lookahead point given the current robot state.
Uses the minimum distance point if the state is more than
Constants.LOOKAHEAD_DIST from all points, otherwise uses the
closes point to self.loohead_distance"""
# Compute point distances to state and differences from those distances to Constants.LOOKAHEAD_DIST
distances = [math.hypot(state.x - ppoint.point.x,
state.y - ppoint.point.y) for ppoint in self.pursuit_points]
differences = [abs(d-Constants.LOOKAHEAD_DIST) for d in distances]
min_distance = min(distances)
# Get new lookahead index
if min_distance <= Constants.LOOKAHEAD_DIST:
self.last_lookahead_index = differences.index(min(differences))
else:
self.last_lookahead_index = distances.index(min_distance)
def updateLookaheadPointIndex(self, state):
"""Loop over the points in the path to get the lookahead point given the current robot state."""
for i in range(self.last_lookahead_index, len(self.pursuit_points)-1):
lookahead = self.computeLookaheadPoint(
self.pursuit_points[i].point, self.pursuit_points[i+1].point, state)
if lookahead != None:
self.last_lookahead_index = i
def computeLookaheadPoint(self, start, end, state):
"""Compute the lookahead point given the current robot state.
Returns a point if the current state is Constants.LOOKAHEAD_DIST
from between start and end, otherwise returns None."""
# Algorithm for circle line segment intersection found here: https://stackoverflow.com/questions/1073336/circle-line-segment-collision-detection-algorithm/1084899#1084899
segment_direction = end - start
center_to_start = start - state
a = segment_direction * segment_direction
b = 2 * (center_to_start * segment_direction)
c = (center_to_start * center_to_start) - Constants.LOOKAHEAD_DIST ** 2
discriminant = b**2 - (4 * a * c)
if discriminant < 0:
return None
else:
discriminant = math.sqrt(discriminant)
t0 = (-b - discriminant) / (2 * a)
t1 = (-b + discriminant) / (2 * a)
if t0 >= 0 and t0 <= 1:
return start + t0 * segment_direction
if t1 >= 0 and t1 <= 1:
return start + t1 * segment_direction
return None
def updateCurvature(self, state):
"""Update the curvature from the current lookahead point to the current robot position."""
lookahead = self.pursuit_points[self.last_lookahead_index].point
# Transform the lookahead and state.pos to get an aligned vector
transform = lookahead - state.pos
transform = transform.getRotated(-state.angle)
# Use the transformed vector to calculate the curvature (derived from https://www.ri.cmu.edu/pub_files/pub3/coulter_r_craig_1992_1/coulter_r_craig_1992_1.pdf#page=12)
self.cur_curvature = (2 * transform.x) / Constants.LOOKAHEAD_DIST**2
def updateClosestPointIndex(self, state):
"""Update the index of the closest point to the current robot position."""
index = self.closest_point_index
smallest_distance = self.pursuit_points[index].point.getDistance(state)
for i in range(0, len(self.pursuit_points)):
distance = self.pursuit_points[i].point.getDistance(state)
if smallest_distance > distance:
smallest_distance = distance
index = i
self.closest_point_index = index
def updateTargetVelocities(self, state):
"""Update the target velocities of the left and right wheels."""
robot_velocity = self.pursuit_points[self.closest_point_index].velocity
# Use kinematics (http://robotsforroboticists.com/drive-kinematics/) and algebra to find wheel target velocties
l_velocity = robot_velocity * \
(2 + self.cur_curvature * Constants.TRACK_WIDTH) / \
2 / Constants.PURE_PURSUIT_KV
r_velocity = robot_velocity * \
(2 - self.cur_curvature * Constants.TRACK_WIDTH) / \
2 / Constants.PURE_PURSUIT_KV
scale = max(abs(l_velocity), abs(r_velocity))
if scale > 1:
l_velocity /= scale
r_velocity /= scale
self.target_velocities = vector2d.Vector2D(l_velocity, r_velocity)
def update(self, state):
"""Update the pure pursuit follower(runs all update functions)."""
# TODO which lookahead function to use
self.updateLookaheadPointIndex(state.pos)
# self.updateLookaheadPointIndex2(state.pos)
self.updateCurvature(state)
self.updateClosestPointIndex(state.pos)
self.updateTargetVelocities(state.pos)
def outputToSmartDashboard(self):
"""Output values to the smart dashboard."""
lookahead = self.pursuit_points[self.last_lookahead_index].point
closest = self.pursuit_points[self.closest_point_index].point
Dash.putNumberArray("Lookahead Point", [lookahead.x, lookahead.y])
Dash.putNumber("Curvature", self.cur_curvature)
Dash.putNumberArray("Closes Point", [closest.x, closest.y])
Dash.putNumberArray("Target Velocities", [
self.target_velocities.x, self.target_velocities.y])
#print("Lookahead Point - {}".format(lookahead))
#print("Curvature - {}".format(self.cur_curvature))
#print("Closes Point - {}".format(closest))
#print("Target Velocities - {}".format(self.target_velocities))
# print("------------------------------")
def isDone(self):
"""Check if the path is done being followed."""
return (len(self.pursuit_points) - self.closest_point_index) <= 1
| [((546, 565), 'utils.vector2d.Vector2D', 'vector2d.Vector2D', ([], {}), '()\n', (563, 565), False, 'from utils import vector2d\n'), ((6145, 6186), 'utils.vector2d.Vector2D', 'vector2d.Vector2D', (['l_velocity', 'r_velocity'], {}), '(l_velocity, r_velocity)\n', (6162, 6186), False, 'from utils import vector2d\n'), ((6815, 6881), 'wpilib.SmartDashboard.putNumberArray', 'Dash.putNumberArray', (['"""Lookahead Point"""', '[lookahead.x, lookahead.y]'], {}), "('Lookahead Point', [lookahead.x, lookahead.y])\n", (6834, 6881), True, 'from wpilib import SmartDashboard as Dash\n'), ((6890, 6937), 'wpilib.SmartDashboard.putNumber', 'Dash.putNumber', (['"""Curvature"""', 'self.cur_curvature'], {}), "('Curvature', self.cur_curvature)\n", (6904, 6937), True, 'from wpilib import SmartDashboard as Dash\n'), ((6946, 7005), 'wpilib.SmartDashboard.putNumberArray', 'Dash.putNumberArray', (['"""Closes Point"""', '[closest.x, closest.y]'], {}), "('Closes Point', [closest.x, closest.y])\n", (6965, 7005), True, 'from wpilib import SmartDashboard as Dash\n'), ((7014, 7113), 'wpilib.SmartDashboard.putNumberArray', 'Dash.putNumberArray', (['"""Target Velocities"""', '[self.target_velocities.x, self.target_velocities.y]'], {}), "('Target Velocities', [self.target_velocities.x, self.\n target_velocities.y])\n", (7033, 7113), True, 'from wpilib import SmartDashboard as Dash\n'), ((332, 363), 'autonomous.pursuitpoint.PursuitPoint', 'pursuitpoint.PursuitPoint', (['p', 'c'], {}), '(p, c)\n', (357, 363), False, 'from autonomous import pursuitpoint\n'), ((1393, 1493), 'math.sqrt', 'math.sqrt', (['(self.pursuit_points[i + 1].velocity ** 2 + 2 * Constants.MAX_ACCELERATION *\n distance)'], {}), '(self.pursuit_points[i + 1].velocity ** 2 + 2 * Constants.\n MAX_ACCELERATION * distance)\n', (1402, 1493), False, 'import math\n'), ((2082, 2144), 'math.hypot', 'math.hypot', (['(state.x - ppoint.point.x)', '(state.y - ppoint.point.y)'], {}), '(state.x - ppoint.point.x, state.y - ppoint.point.y)\n', (2092, 2144), False, 'import math\n'), ((3872, 3895), 'math.sqrt', 'math.sqrt', (['discriminant'], {}), '(discriminant)\n', (3881, 3895), False, 'import math\n')] |
TheEggi/esphomeyaml | esphome/voluptuous_schema.py | 98e8cc1edc7b29891e8100eb484922e5c2d4fc33 | import difflib
import itertools
import voluptuous as vol
from esphome.py_compat import string_types
class ExtraKeysInvalid(vol.Invalid):
def __init__(self, *arg, **kwargs):
self.candidates = kwargs.pop('candidates')
vol.Invalid.__init__(self, *arg, **kwargs)
def ensure_multiple_invalid(err):
if isinstance(err, vol.MultipleInvalid):
return err
return vol.MultipleInvalid(err)
# pylint: disable=protected-access, unidiomatic-typecheck
class _Schema(vol.Schema):
"""Custom cv.Schema that prints similar keys on error."""
def __init__(self, schema, extra=vol.PREVENT_EXTRA, extra_schemas=None):
super(_Schema, self).__init__(schema, extra=extra)
# List of extra schemas to apply after validation
# Should be used sparingly, as it's not a very voluptuous-way/clean way of
# doing things.
self._extra_schemas = extra_schemas or []
def __call__(self, data):
res = super(_Schema, self).__call__(data)
for extra in self._extra_schemas:
try:
res = extra(res)
except vol.Invalid as err:
raise ensure_multiple_invalid(err)
return res
def _compile_mapping(self, schema, invalid_msg=None):
invalid_msg = invalid_msg or 'mapping value'
# Check some things that ESPHome's schemas do not allow
# mostly to keep the logic in this method sane (so these may be re-added if needed).
for key in schema:
if key is vol.Extra:
raise ValueError("ESPHome does not allow vol.Extra")
if isinstance(key, vol.Remove):
raise ValueError("ESPHome does not allow vol.Remove")
if isinstance(key, vol.primitive_types):
raise ValueError("All schema keys must be wrapped in cv.Required or cv.Optional")
# Keys that may be required
all_required_keys = set(key for key in schema if isinstance(key, vol.Required))
# Keys that may have defaults
all_default_keys = set(key for key in schema if isinstance(key, vol.Optional))
# Recursively compile schema
_compiled_schema = {}
for skey, svalue in vol.iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
# Sort compiled schema (probably not necessary for esphome, but leave it here just in case)
candidates = list(vol.schema_builder._iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some
# optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in vol.primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, vol.Marker) and type(skey.schema) in vol.primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be
# applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
key_names = []
for skey in schema:
if isinstance(skey, string_types):
key_names.append(skey)
elif isinstance(skey, vol.Marker) and isinstance(skey.schema, string_types):
key_names.append(skey.schema)
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, vol.Undefined) and key.schema not in key_value_map:
# A default value has been specified for this missing key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []),
additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except vol.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
try:
cval = cvalue(key_path, value)
out[new_key] = cval
except vol.MultipleInvalid as e:
exception_errors.extend(e.errors)
except vol.Invalid as e:
exception_errors.append(e)
if exception_errors:
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if self.extra == vol.ALLOW_EXTRA:
out[key] = value
elif self.extra != vol.REMOVE_EXTRA:
if isinstance(key, string_types) and key_names:
matches = difflib.get_close_matches(key, key_names)
errors.append(ExtraKeysInvalid('extra keys not allowed', key_path,
candidates=matches))
else:
errors.append(vol.Invalid('extra keys not allowed', key_path))
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = getattr(key, 'msg', None) or 'required key not provided'
errors.append(vol.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise vol.MultipleInvalid(errors)
return out
return validate_mapping
def add_extra(self, validator):
validator = _Schema(validator)
self._extra_schemas.append(validator)
return self
# pylint: disable=arguments-differ
def extend(self, *schemas, **kwargs):
extra = kwargs.pop('extra', None)
if kwargs:
raise ValueError
if not schemas:
return self.extend({})
if len(schemas) != 1:
ret = self
for schema in schemas:
ret = ret.extend(schema)
return ret
schema = schemas[0]
if isinstance(schema, vol.Schema):
schema = schema.schema
ret = super(_Schema, self).extend(schema, extra=extra)
return _Schema(ret.schema, extra=ret.extra, extra_schemas=self._extra_schemas)
| [((394, 418), 'voluptuous.MultipleInvalid', 'vol.MultipleInvalid', (['err'], {}), '(err)\n', (413, 418), True, 'import voluptuous as vol\n'), ((240, 282), 'voluptuous.Invalid.__init__', 'vol.Invalid.__init__', (['self', '*arg'], {}), '(self, *arg, **kwargs)\n', (260, 282), True, 'import voluptuous as vol\n'), ((2212, 2233), 'voluptuous.iteritems', 'vol.iteritems', (['schema'], {}), '(schema)\n', (2225, 2233), True, 'import voluptuous as vol\n'), ((2508, 2572), 'voluptuous.schema_builder._iterate_mapping_candidates', 'vol.schema_builder._iterate_mapping_candidates', (['_compiled_schema'], {}), '(_compiled_schema)\n', (2554, 2572), True, 'import voluptuous as vol\n'), ((7794, 7821), 'voluptuous.MultipleInvalid', 'vol.MultipleInvalid', (['errors'], {}), '(errors)\n', (7813, 7821), True, 'import voluptuous as vol\n'), ((7704, 7747), 'voluptuous.RequiredFieldInvalid', 'vol.RequiredFieldInvalid', (['msg', '(path + [key])'], {}), '(msg, path + [key])\n', (7728, 7747), True, 'import voluptuous as vol\n'), ((7133, 7174), 'difflib.get_close_matches', 'difflib.get_close_matches', (['key', 'key_names'], {}), '(key, key_names)\n', (7158, 7174), False, 'import difflib\n'), ((7422, 7469), 'voluptuous.Invalid', 'vol.Invalid', (['"""extra keys not allowed"""', 'key_path'], {}), "('extra keys not allowed', key_path)\n", (7433, 7469), True, 'import voluptuous as vol\n')] |
dpr1005/Semisupervised-learning-and-instance-selection-methods | semisupervised/DensityPeaks.py | 646d9e729c85322e859928e71a3241f2aec6d93d | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Filename: DensityPeaks.py
# @Author: Daniel Puente Ramírez
# @Time: 5/3/22 09:55
# @Version: 4.0
import math
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors
from sklearn.preprocessing import LabelEncoder
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.svm import SVC
from instance_selection import ENN
from .utils import split
class STDPNF:
"""
Li, J., Zhu, Q., & Wu, Q. (2019). A self-training method based on density
peaks and an extended parameter-free local noise filter for k nearest
neighbor. Knowledge-Based Systems, 184, 104895.
Wu, D., Shang, M., Luo, X., Xu, J., Yan, H., Deng, W., & Wang, G. (2018).
Self-training semi-supervised classification based on density peaks of
data. Neurocomputing, 275, 180-191.
"""
def __init__(
self,
dc=None,
distance_metric="euclidean",
k=3,
gauss_cutoff=True,
percent=2.0,
density_threshold=None,
distance_threshold=None,
anormal=True,
filtering=False,
classifier=None,
classifier_params=None,
filter_method=None,
):
"""Semi Supervised Algorithm based on Density Peaks."""
self.dc = dc
self.distance_metric = distance_metric
self.k = k
self.gauss_cutoff = gauss_cutoff
self.percent = percent
self.density_threshold = density_threshold
self.distance_threshold = distance_threshold
self.anormal = anormal
self.filtering = filtering
if classifier is not None:
if isinstance(classifier_params, dict):
self.classifier = classifier(**classifier_params)
else:
self.classifier = classifier()
else:
self.classifier = None
if filter_method is not None and filter_method != "ENANE":
self.filter = filter_method()
elif isinstance(filter_method, str) and filter_method == "ENANE":
self.filter = filter_method
else:
self.filter = None
self.y = None
self.low = None
self.u = None
self.classifier_stdpnf = None
self.order = None
self.structure = None
self.structure_stdnpf = None
self.n_id = None
self.distances = None
self.max_dis = None
self.min_dis = None
self.rho = None
self.delta = None
self.nneigh = None
self.data = None
def __build_distance(self):
"""
Calculate distance dict.
:return: distance dict, max distance, min distance
"""
from scipy.spatial.distance import pdist, squareform
distance_matrix = pdist(self.data, metric=self.distance_metric)
distance_matrix = squareform(distance_matrix)
triangle_upper = np.triu_indices(self.data.shape[0], 1)
triangle_upper = distance_matrix[triangle_upper]
distance = {}
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
distance[(i, j)] = distance_matrix[i, j]
distance[(j, i)] = distance_matrix[i, j]
max_dis, min_dis = np.max(triangle_upper), np.min(triangle_upper)
return distance, max_dis, min_dis
def __auto_select_dc(self):
"""
Auto select the local density threshold that let average neighbor is 1-2
percent of all nodes.
:return: dc that local density threshold
"""
max_dis, min_dis = self.max_dis, self.min_dis
dc = (max_dis + min_dis) / 2
while True:
nneighs = (
sum([1 for v in self.distances.values() if v < dc]) / self.n_id**2
)
if 0.01 <= nneighs <= 0.02:
break
# binary search
if nneighs < 0.01:
min_dis = dc
else:
max_dis = dc
dc = (max_dis + min_dis) / 2
if max_dis - min_dis < 0.0001:
break
return dc
def __select_dc(self):
"""
Select the local density threshold, default is the method used in paper,
'auto' is auto select.
:return: dc that local density threshold
"""
if self.dc == "auto":
dc = self.__auto_select_dc()
else:
position = int(self.n_id * (self.n_id + 1) /
2 * self.percent / 100)
dc = np.sort(list(self.distances.values()))[
position * 2 + self.n_id]
return dc
def __local_density(self):
"""
Compute all points' local density.
:return: local density vector that index is the point index
"""
def gauss_func(dij, dc):
"""
> The function takes in a distance value and a cutoff value, and
returns the value of the Gaussian function at that point
:param dij: distance between two nodes
:param dc: The cutoff distance
:return: the value of the gaussian function.
"""
return math.exp(-((dij / dc) ** 2))
def cutoff_func(dij, dc):
"""
If the distance between two atoms is less than the cutoff distance,
return 1, otherwise return 0
:param dij: distance between atoms i and j
:param dc: cutoff distance
:return: 1 if dij < dc, else 0
"""
return 1 if dij < dc else 0
func = gauss_func if self.gauss_cutoff else cutoff_func
rho = [0] * self.n_id
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
temp = func(self.distances[(i, j)], self.dc)
rho[i] += temp
rho[j] += temp
return np.array(rho, np.float32)
def __min_neighbor_and_distance(self):
"""
Compute all points' min util to the higher local density point(which is
the nearest neighbor).
:return: distance vector, nearest neighbor vector
"""
if self.rho is None:
raise ValueError("Encountered rho as None.")
sort_rho_idx = np.argsort(-self.rho)
delta, nneigh = [float(self.max_dis)] * self.n_id, [0] * self.n_id
delta[sort_rho_idx[0]] = -1.0
for i in range(self.n_id):
for j in range(0, i):
old_i, old_j = sort_rho_idx[i], sort_rho_idx[j]
if self.distances[(old_i, old_j)] < delta[old_i]:
delta[old_i] = self.distances[(old_i, old_j)]
nneigh[old_i] = old_j
delta[sort_rho_idx[0]] = max(delta)
return np.array(delta, np.float32), np.array(nneigh, np.float32)
def __structure(self):
"""
The function takes the data and the nearest neighbor indices and creates
a dataframe with the following columns:
- sample: the data point
- next: the index of the nearest neighbor
- previous: the index of the nearest neighbor of the nearest neighbor
- label: the label of the data point
The function also creates a copy of the dataframe called
structure_stdnpf
"""
self.structure = dict.fromkeys(range(self.n_id))
for index, sample in enumerate(self.data):
self.structure[index] = [
sample,
int(self.nneigh[index]),
None,
self.y[index] if index < len(self.y) else -1,
]
for index in range(self.n_id):
if self.structure[self.structure[index][1]][2] is None:
self.structure[self.structure[index][1]][2] = index
self.structure = pd.DataFrame(
self.structure, index=["sample", "next", "previous", "label"]
).transpose()
self.structure_stdnpf = self.structure.copy(deep=True)
def __step_a(self):
"""
> The function takes the labeled samples and trains the classifier on
them
:return: The samples that have been labeled.
"""
samples_labeled = self.structure.loc[self.structure["label"] != -1]
sam_lab = samples_labeled["sample"].to_list()
y_without = samples_labeled["label"].to_list()
self.classifier.fit(sam_lab, y_without)
return samples_labeled
def __discover_structure(self):
"""Discovers the under laying structure."""
self._fit_without()
def __nan_search(self):
"""
For each point, find the set of points that are within a distance of r,
and the set of points that are within a distance of r+1.
The set of points that are within a distance of r+1 is a superset of the
set of points that are within a distance of r.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is also a superset
of the set of points that are within a distance of r+3.
And so on.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is
:return: nan, r
"""
r = 1
nan = defaultdict(set)
nb = dict.fromkeys(range(self.n_id), 0)
knn = defaultdict(set)
rnn = defaultdict(set)
cnt = defaultdict(int)
while True:
search = NearestNeighbors(n_neighbors=r + 1, algorithm="kd_tree")
search.fit(self.data)
for index, sample in enumerate(self.data):
r_neighs = search.kneighbors(
[sample], return_distance=False)[0][1:]
knn[index].update(list(r_neighs))
for neigh in r_neighs:
nb[neigh] += 1
rnn[neigh].add(index)
cnt[r] = np.count_nonzero((np.array(list(nb.values())) == 0))
if r > 2 and cnt[r] == cnt[r - 1]:
r -= 1
break
r += 1
for index in range(self.n_id):
nan[index] = knn[index].intersection(rnn[index])
return nan, r
def __enane(self, fx, nan, r):
"""
> The function takes in the dataframe, the list of indices of the
unlabeled data, the list of indices of the neighbors of the unlabeled
data, and the number of neighbors to use in the KNN classifier. It
then creates a new dataframe with the labeled data and the unlabeled
data, and uses the KNN classifier to predict the labels of the
unlabeled data. It then checks if the predicted label is the same as
the label of the majority of the neighbors of the unlabeled data. If
it is, then it adds the index of the unlabeled data to the list of
indices of the data to be labeled
:param fx: the indexes of the unlabeled data
:param nan: a list of lists, where each list contains the indices of the
neighbors of a sample
:param r: the number of neighbors to consider
:return: The indexes of the samples that are going to be labeled and the
labels that are going to be assigned to them.
"""
es = []
es_pred = []
local_structure = self.structure_stdnpf.copy(deep=True)
base_estimator = KNeighborsClassifier(
n_neighbors=r, metric=self.distance_metric
)
labeled_data = local_structure.loc[local_structure["label"] != -1]
nan_unlabeled = local_structure.loc[fx]
data = pd.concat([labeled_data, nan_unlabeled], join="inner")
enane_model = SelfTrainingClassifier(base_estimator)
enane_model.fit(data["sample"].tolist(), data["label"].tolist())
enane_pred = enane_model.predict(nan_unlabeled["sample"].tolist())
for (row_index, _), pred in zip(nan_unlabeled.iterrows(), enane_pred):
usefulness = 0
harmfulness = 0
for neigh in nan[row_index]:
if local_structure.loc[neigh, "label"] == pred:
usefulness += 1
else:
harmfulness += 1
if usefulness >= harmfulness:
es.append(row_index)
es_pred.append(pred)
return es, es_pred
def __init_values(self, low, u, y):
"""
It takes in the lower and upper bounds of the data, and the data itself,
and then calculates the distances between the data points,
the maximum distance, the minimum distance, the dc value, the rho
value, the delta value, the number of neighbors, and the structure
of the data
:param low: lower bound of the data
:param u: upper bound of the data
:param y: the labels of the data
"""
self.y = y
self.low = low
self.u = u
self.data = np.concatenate((low, u), axis=0)
self.n_id = self.data.shape[0]
self.distances, self.max_dis, self.min_dis = self.__build_distance()
self.dc = self.__select_dc()
self.rho = self.__local_density()
self.delta, self.nneigh = self.__min_neighbor_and_distance()
self.__structure()
def _fit_without(self):
"""
The function takes in a classifier, and then labels the next point,
and then labels the previous points, without filtering.
"""
if self.classifier is None:
self.classifier = SVC()
count = 1
self.order = dict.fromkeys(range(self.n_id), 0)
count = self._label_next_point(count)
self._label_previous_points(count)
def _label_previous_points(self, count):
"""
> The function takes the samples labeled in the previous step and finds
the previous samples of those samples. It then labels those samples
and repeats the process until there are no more samples to label
:param count: the number of the current iteration
"""
while True:
samples_labeled = self.__step_a()
prev_rows = samples_labeled["previous"].to_numpy()
prev_unlabeled = []
samples_labeled_index = samples_labeled.index.to_list()
for prev_row in prev_rows:
if prev_row not in samples_labeled_index and prev_row is not None:
prev_unlabeled.append(prev_row)
self.order[prev_row] = count
if len(prev_unlabeled) == 0:
break
unlabeled_prev_of_labeled = self.structure.loc[prev_unlabeled]
lu = unlabeled_prev_of_labeled["sample"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, prev_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
def _label_next_point(self, count):
"""
> The function takes the samples labeled in the previous step and finds
the next samples in the structure. If the next samples are not
labeled, it labels them and updates the order of the samples
:param count: the number of the next point to be labeled
:return: The number of labeled samples.
"""
while True:
samples_labeled = self.__step_a()
next_rows = samples_labeled["next"].to_numpy()
next_unlabeled = []
samples_labeled_index = samples_labeled.index.to_list()
for next_row in next_rows:
if next_row not in samples_labeled_index:
next_unlabeled.append(next_row)
self.order[next_row] = count
if len(next_unlabeled) == 0:
break
unlabeled_next_of_labeled = self.structure.loc[next_unlabeled]
lu = unlabeled_next_of_labeled["sample"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, next_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
return count
def _fit_stdpnf(self):
"""
Self Training based on Density Peaks and a parameter-free noise
filter.
"""
self.__discover_structure()
nan, lambda_param = self.__nan_search()
self.classifier_stdpnf = KNeighborsClassifier(
n_neighbors=self.k, metric=self.distance_metric
)
self.classifier_stdpnf.fit(self.low, self.y)
count = 1
while count <= max(self.order.values()):
unlabeled_rows = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] == -1
].index.to_list()
unlabeled_indexes = []
for row in unlabeled_rows:
if self.order[row] == count:
unlabeled_indexes.append(row)
if isinstance(self.filter, str) and self.filter == "ENANE":
filtered_indexes, filtered_labels = self.__enane(
unlabeled_indexes, nan, lambda_param
)
self.structure_stdnpf.at[filtered_indexes,
"label"] = filtered_labels
else:
labeled_data = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] != -1
]
complete = labeled_data["sample"]
complete_y = labeled_data["label"]
result = self._if_filter(complete, complete_y)
self._results_to_structure(complete, result)
labeled_data = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] != -1
]
self.classifier_stdpnf.fit(
labeled_data["sample"].tolist(), labeled_data["label"].tolist()
)
count += 1
labeled_data = self.structure_stdnpf.loc[self.structure_stdnpf["label"] != -1]
self.classifier_stdpnf.fit(
labeled_data["sample"].tolist(), labeled_data["label"].tolist()
)
def _results_to_structure(self, complete, result):
"""
> This function takes the results of the model and compares them to the
complete data set. If the result is not in the complete data set, it is
added to the structure data set.
:param complete: the complete dataset
:param result: the result of the clustering
"""
results_to_unlabeled = []
for r in result.to_numpy():
is_in = False
for c in complete:
if np.array_equal(r, c):
is_in = True
if not is_in:
results_to_unlabeled.append(r)
for r in results_to_unlabeled:
self.structure_stdnpf.at[np.array(self.structure_stdnpf["sample"], r)][
"label"
] = -1
def _if_filter(self, complete, complete_y):
"""
If the filter is an ENN, then filter the original data, otherwise
filter the complete data
:param complete: the complete dataframe
:param complete_y: the complete y values
:return: The result is a dataframe with the filtered data.
"""
if isinstance(self.filter, ENN):
original = pd.DataFrame(self.low)
original_y = pd.DataFrame(self.y)
result, _ = self.filter.filter_original_complete(
original, original_y, complete, complete_y
)
else:
result, _ = self.filter.filter(complete, complete_y)
return result
def fit(self, samples, y):
"""Fit method."""
try:
l, u, y = split(samples, y)
except IndexError:
raise ValueError("Dimensions do not match.")
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
self.__init_values(l, u, y)
if self.filtering:
self._fit_stdpnf()
else:
self._fit_without()
def predict(self, src):
"""
Predict based on a trained classifier.
:param src: The source image
:return: The classifier is being returned.
"""
if self.classifier is None:
raise AssertionError("The model needs to be fitted first.")
return self.classifier.predict(src)
| [((2879, 2924), 'scipy.spatial.distance.pdist', 'pdist', (['self.data'], {'metric': 'self.distance_metric'}), '(self.data, metric=self.distance_metric)\n', (2884, 2924), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((2951, 2978), 'scipy.spatial.distance.squareform', 'squareform', (['distance_matrix'], {}), '(distance_matrix)\n', (2961, 2978), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((3005, 3043), 'numpy.triu_indices', 'np.triu_indices', (['self.data.shape[0]', '(1)'], {}), '(self.data.shape[0], 1)\n', (3020, 3043), True, 'import numpy as np\n'), ((5995, 6020), 'numpy.array', 'np.array', (['rho', 'np.float32'], {}), '(rho, np.float32)\n', (6003, 6020), True, 'import numpy as np\n'), ((6370, 6391), 'numpy.argsort', 'np.argsort', (['(-self.rho)'], {}), '(-self.rho)\n', (6380, 6391), True, 'import numpy as np\n'), ((9572, 9588), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (9583, 9588), False, 'from collections import defaultdict\n'), ((9651, 9667), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (9662, 9667), False, 'from collections import defaultdict\n'), ((9682, 9698), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (9693, 9698), False, 'from collections import defaultdict\n'), ((9713, 9729), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (9724, 9729), False, 'from collections import defaultdict\n'), ((11686, 11750), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'r', 'metric': 'self.distance_metric'}), '(n_neighbors=r, metric=self.distance_metric)\n', (11706, 11750), False, 'from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors\n'), ((11912, 11966), 'pandas.concat', 'pd.concat', (['[labeled_data, nan_unlabeled]'], {'join': '"""inner"""'}), "([labeled_data, nan_unlabeled], join='inner')\n", (11921, 11966), True, 'import pandas as pd\n'), ((11990, 12028), 'sklearn.semi_supervised.SelfTrainingClassifier', 'SelfTrainingClassifier', (['base_estimator'], {}), '(base_estimator)\n', (12012, 12028), False, 'from sklearn.semi_supervised import SelfTrainingClassifier\n'), ((13253, 13285), 'numpy.concatenate', 'np.concatenate', (['(low, u)'], {'axis': '(0)'}), '((low, u), axis=0)\n', (13267, 13285), True, 'import numpy as np\n'), ((16722, 16791), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'self.k', 'metric': 'self.distance_metric'}), '(n_neighbors=self.k, metric=self.distance_metric)\n', (16742, 16791), False, 'from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors\n'), ((20196, 20210), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (20208, 20210), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3347, 3369), 'numpy.max', 'np.max', (['triangle_upper'], {}), '(triangle_upper)\n', (3353, 3369), True, 'import numpy as np\n'), ((3371, 3393), 'numpy.min', 'np.min', (['triangle_upper'], {}), '(triangle_upper)\n', (3377, 3393), True, 'import numpy as np\n'), ((5286, 5312), 'math.exp', 'math.exp', (['(-(dij / dc) ** 2)'], {}), '(-(dij / dc) ** 2)\n', (5294, 5312), False, 'import math\n'), ((6873, 6900), 'numpy.array', 'np.array', (['delta', 'np.float32'], {}), '(delta, np.float32)\n', (6881, 6900), True, 'import numpy as np\n'), ((6902, 6930), 'numpy.array', 'np.array', (['nneigh', 'np.float32'], {}), '(nneigh, np.float32)\n', (6910, 6930), True, 'import numpy as np\n'), ((9772, 9828), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(r + 1)', 'algorithm': '"""kd_tree"""'}), "(n_neighbors=r + 1, algorithm='kd_tree')\n", (9788, 9828), False, 'from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors\n'), ((13836, 13841), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (13839, 13841), False, 'from sklearn.svm import SVC\n'), ((19682, 19704), 'pandas.DataFrame', 'pd.DataFrame', (['self.low'], {}), '(self.low)\n', (19694, 19704), True, 'import pandas as pd\n'), ((19730, 19750), 'pandas.DataFrame', 'pd.DataFrame', (['self.y'], {}), '(self.y)\n', (19742, 19750), True, 'import pandas as pd\n'), ((7921, 7996), 'pandas.DataFrame', 'pd.DataFrame', (['self.structure'], {'index': "['sample', 'next', 'previous', 'label']"}), "(self.structure, index=['sample', 'next', 'previous', 'label'])\n", (7933, 7996), True, 'import pandas as pd\n'), ((18979, 18999), 'numpy.array_equal', 'np.array_equal', (['r', 'c'], {}), '(r, c)\n', (18993, 18999), True, 'import numpy as np\n'), ((19183, 19227), 'numpy.array', 'np.array', (["self.structure_stdnpf['sample']", 'r'], {}), "(self.structure_stdnpf['sample'], r)\n", (19191, 19227), True, 'import numpy as np\n')] |
Bit64L/LeetCode-Python- | N-aryTreeLevelOrderTraversal429.py | 64847cbb1adcaca4561b949e8acc52e8e031a6cb | """
# Definition for a Node.
"""
class TreeNode(object):
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def levelOrder(self, root):
"""
:type root: Node
:rtype: List[List[int]]
"""
if root is None:
return []
from Queue import Queue
que = Queue()
que.put(root)
ans, tmp, k = [], [], 1
while que.qsize() != 0:
node = que.get()
tmp.append(node.val)
k -= 1
for child in node.children:
que.put(child)
if k == 0:
k = que.qsize()
ans.append(list(tmp))
tmp = []
return ans
node2 = TreeNode(2, [])
node3 = TreeNode(3, [])
children = [node2, node3]
node1 = TreeNode(1, children)
solution = Solution()
print(solution.levelOrder(node1))
| [((388, 395), 'Queue.Queue', 'Queue', ([], {}), '()\n', (393, 395), False, 'from Queue import Queue\n')] |
akuala/REPO.KUALA | plugin.video.team.milhanos/websocket/_core.py | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | """
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
from __future__ import print_function
import six
import socket
if six.PY3:
from base64 import encodebytes as base64encode
else:
from base64 import encodestring as base64encode
import struct
import threading
# websocket modules
from ._exceptions import *
from ._abnf import *
from ._socket import *
from ._utils import *
from ._url import *
from ._logging import *
from ._http import *
from ._handshake import *
from ._ssl_compat import *
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/receive data.
The following example is an echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setsockopt.
sslopt: dict object for ssl socket option.
fire_cont_frame: fire recv event for each cont frame. default is False
enable_multithread: if set to True, lock send method.
skip_utf8_validation: skip utf8 validation.
"""
def __init__(self, get_mask_key=None, sockopt=None, sslopt=None,
fire_cont_frame=False, enable_multithread=False,
skip_utf8_validation=False, **options):
"""
Initialize WebSocket object.
"""
self.sock_opt = sock_opt(sockopt, sslopt)
self.handshake_response = None
self.sock = None
self.connected = False
self.get_mask_key = get_mask_key
# These buffer over the build-up of a single frame.
self.frame_buffer = frame_buffer(self._recv, skip_utf8_validation)
self.cont_frame = continuous_frame(fire_cont_frame, skip_utf8_validation)
if enable_multithread:
self.lock = threading.Lock()
else:
self.lock = NoLock()
def __iter__(self):
"""
Allow iteration over websocket, implying sequential `recv` executions.
"""
while True:
yield self.recv()
def __next__(self):
return self.recv()
def next(self):
return self.__next__()
def fileno(self):
return self.sock.fileno()
def set_mask_key(self, func):
"""
set function to create musk key. You can customize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the func takes 1 argument as integer.
The argument means length of mask key.
This func must return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self.sock_opt.timeout
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self.sock_opt.timeout = timeout
if self.sock:
self.sock.settimeout(timeout)
timeout = property(gettimeout, settimeout)
def getsubprotocol(self):
"""
get subprotocol
"""
if self.handshake_response:
return self.handshake_response.subprotocol
else:
return None
subprotocol = property(getsubprotocol)
def getstatus(self):
"""
get handshake status
"""
if self.handshake_response:
return self.handshake_response.status
else:
return None
status = property(getstatus)
def getheaders(self):
"""
get handshake response header
"""
if self.handshake_response:
return self.handshake_response.headers
else:
return None
headers = property(getheaders)
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme.
ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth information.
tuple of username and password.
default is None
"subprotocols" - array of available sub protocols.
default is None.
"socket" - pre-initialized stream socket.
"""
self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),
options.pop('socket', None))
try:
self.handshake_response = handshake(self.sock, *addrs, **options)
self.connected = True
except:
if self.sock:
self.sock.close()
self.sock = None
raise
def send(self, payload, opcode=ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicode,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
return self.send_frame(frame)
def send_frame(self, frame):
"""
Send the data frame.
frame: frame data created by ABNF.create_frame
>>> ws = create_connection("ws://echo.websocket.org/")
>>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1)
>>> ws.send_frame(frame)
"""
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
length = len(data)
trace("send: " + repr(data))
with self.lock:
while data:
l = self._send(data)
data = data[l:]
return length
def send_binary(self, payload):
return self.send(payload, ABNF.OPCODE_BINARY)
def ping(self, payload=""):
"""
send ping data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
opcode, data = self.recv_data()
if six.PY3 and opcode == ABNF.OPCODE_TEXT:
return data.decode("utf-8")
elif opcode == ABNF.OPCODE_TEXT or opcode == ABNF.OPCODE_BINARY:
return data
else:
return ''
def recv_data(self, control_frame=False):
"""
Receive data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
opcode, frame = self.recv_data_frame(control_frame)
return opcode, frame.data
def recv_data_frame(self, control_frame=False):
"""
Receive data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketProtocolException("Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT):
self.cont_frame.validate(frame)
self.cont_frame.add(frame)
if self.cont_frame.is_fire(frame):
return self.cont_frame.extract(frame)
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return (frame.opcode, frame)
elif frame.opcode == ABNF.OPCODE_PING:
if len(frame.data) < 126:
self.pong(frame.data)
else:
raise WebSocketProtocolException("Ping message is too long")
if control_frame:
return (frame.opcode, frame)
elif frame.opcode == ABNF.OPCODE_PONG:
if control_frame:
return (frame.opcode, frame)
def recv_frame(self):
"""
receive data as frame from server.
return value: ABNF frame object.
"""
return self.frame_buffer.recv_frame()
def send_close(self, status=STATUS_NORMAL, reason=six.b("")):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string or bytes.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status=STATUS_NORMAL, reason=six.b(""), timeout=3):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
timeout: timeout until receive a close frame.
If None, it will wait forever until receive a close frame.
"""
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
sock_timeout = self.sock.gettimeout()
self.sock.settimeout(timeout)
try:
frame = self.recv_frame()
if isEnabledForError():
recv_status = struct.unpack("!H", frame.data)[0]
if recv_status != STATUS_NORMAL:
error("close status: " + repr(recv_status))
except:
pass
self.sock.settimeout(sock_timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self.shutdown()
def abort(self):
"""
Low-level asynchronous abort, wakes up other threads that are waiting in recv_*
"""
if self.connected:
self.sock.shutdown(socket.SHUT_RDWR)
def shutdown(self):
"close socket, immediately."
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
def _send(self, data):
return send(self.sock, data)
def _recv(self, bufsize):
try:
return recv(self.sock, bufsize)
except WebSocketConnectionClosedException:
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
raise
def create_connection(url, timeout=None, class_=WebSocket, **options):
"""
connect to url and return websocket object.
Connect to url and return the WebSocket object.
Passing optional timeout parameter will set the timeout on the socket.
If no timeout is supplied,
the global default timeout setting returned by getdefauttimeout() is used.
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> conn = create_connection("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
class_: class to instantiate when creating the connection. It has to implement
settimeout and connect. It's __init__ should be compatible with
WebSocket.__init__, i.e. accept all of it's kwargs.
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth information.
tuple of username and password.
default is None
"enable_multithread" -> enable lock for multithread.
"sockopt" -> socket options
"sslopt" -> ssl option
"subprotocols" - array of available sub protocols.
default is None.
"skip_utf8_validation" - skip utf8 validation.
"socket" - pre-initialized stream socket.
"""
sockopt = options.pop("sockopt", [])
sslopt = options.pop("sslopt", {})
fire_cont_frame = options.pop("fire_cont_frame", False)
enable_multithread = options.pop("enable_multithread", False)
skip_utf8_validation = options.pop("skip_utf8_validation", False)
websock = class_(sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=fire_cont_frame,
enable_multithread=enable_multithread,
skip_utf8_validation=skip_utf8_validation, **options)
websock.settimeout(timeout if timeout is not None else getdefaulttimeout())
websock.connect(url, **options)
return websock
| [((11409, 11418), 'six.b', 'six.b', (['""""""'], {}), "('')\n", (11414, 11418), False, 'import six\n'), ((11865, 11874), 'six.b', 'six.b', (['""""""'], {}), "('')\n", (11870, 11874), False, 'import six\n'), ((3191, 3207), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (3205, 3207), False, 'import threading\n'), ((11760, 11785), 'struct.pack', 'struct.pack', (['"""!H"""', 'status'], {}), "('!H', status)\n", (11771, 11785), False, 'import struct\n'), ((12405, 12430), 'struct.pack', 'struct.pack', (['"""!H"""', 'status'], {}), "('!H', status)\n", (12416, 12430), False, 'import struct\n'), ((12709, 12740), 'struct.unpack', 'struct.unpack', (['"""!H"""', 'frame.data'], {}), "('!H', frame.data)\n", (12722, 12740), False, 'import struct\n')] |
Unanimad/lais_046_2020_etapa_2 | vaccine_card/logistic/models.py | 630efc6b25a580be44b6cd50be6744a01221a2c4 | from django.db import models
from vaccine_card.vaccination.models import Vaccine
class State(models.Model):
name = models.CharField(max_length=20, verbose_name='Nome')
class Meta:
verbose_name = 'Unidade Federativa'
def __str__(self):
return self.name
class City(models.Model):
name = models.CharField(max_length=50, verbose_name='Nome')
state = models.ForeignKey(State, on_delete=models.CASCADE, verbose_name=State._meta.verbose_name)
class Meta:
verbose_name = 'Município'
def __str__(self):
return self.name
class Address(models.Model):
logradouro = models.CharField(max_length=150, verbose_name='Logradouro')
numero = models.CharField(max_length=4, verbose_name='Número')
complemento = models.CharField(max_length=50, null=True, blank=True, verbose_name='Complemento')
bairro = models.CharField(max_length=150, verbose_name='Bairro')
cep = models.CharField(max_length=8, verbose_name='CEP')
# state = models.ForeignKey(State, on_delete=models.CASCADE, verbose_name=State._meta.verbose_name)
city = models.ForeignKey(City, on_delete=models.CASCADE, verbose_name=City._meta.verbose_name)
class Meta:
verbose_name = 'Endereço'
class HealthCenter(models.Model):
cnes = models.CharField(max_length=7, verbose_name='CNES')
cnpj = models.CharField(max_length=14, verbose_name='CNPJ')
name = models.CharField(max_length=255, verbose_name='Razão Social')
created_at = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name='Criado em:')
updated_at = models.DateTimeField(auto_now_add=False, auto_now=True, verbose_name='Atualizado em:')
address = models.ManyToManyField(Address, verbose_name=Address._meta.verbose_name)
class Meta:
verbose_name = 'Estabelecimento de Saúde'
verbose_name_plural = 'Estabelecimentos de Saúde'
def __str__(self):
return self.name
class Stock(models.Model):
lot = models.PositiveSmallIntegerField(verbose_name='Lote')
created_at = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name='Criado em:')
updated_at = models.DateTimeField(auto_now_add=False, auto_now=True, verbose_name='Atualizado em:')
health_center = models.ForeignKey(HealthCenter, on_delete=models.CASCADE,
verbose_name=HealthCenter._meta.verbose_name)
vaccines = models.ManyToManyField(Vaccine, through='VaccineStock', verbose_name=Vaccine._meta.verbose_name)
class Meta:
verbose_name = 'Estoque'
class VaccineStock(models.Model):
amount = models.PositiveSmallIntegerField(verbose_name='Quantidade recebida')
remaining = models.PositiveSmallIntegerField(verbose_name='Quantidade restante')
vaccine = models.ForeignKey(Vaccine, on_delete=models.DO_NOTHING, verbose_name=Vaccine._meta.verbose_name)
stock = models.ForeignKey(Stock, on_delete=models.DO_NOTHING, verbose_name=Stock._meta.verbose_name)
class Meta:
verbose_name = 'Estoque de Vacina'
def __str__(self):
return self.vaccine.name
| [((122, 174), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'verbose_name': '"""Nome"""'}), "(max_length=20, verbose_name='Nome')\n", (138, 174), False, 'from django.db import models\n'), ((324, 376), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""Nome"""'}), "(max_length=50, verbose_name='Nome')\n", (340, 376), False, 'from django.db import models\n'), ((389, 483), 'django.db.models.ForeignKey', 'models.ForeignKey', (['State'], {'on_delete': 'models.CASCADE', 'verbose_name': 'State._meta.verbose_name'}), '(State, on_delete=models.CASCADE, verbose_name=State._meta\n .verbose_name)\n', (406, 483), False, 'from django.db import models\n'), ((628, 687), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)', 'verbose_name': '"""Logradouro"""'}), "(max_length=150, verbose_name='Logradouro')\n", (644, 687), False, 'from django.db import models\n'), ((701, 754), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'verbose_name': '"""Número"""'}), "(max_length=4, verbose_name='Número')\n", (717, 754), False, 'from django.db import models\n'), ((773, 860), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)', 'verbose_name': '"""Complemento"""'}), "(max_length=50, null=True, blank=True, verbose_name=\n 'Complemento')\n", (789, 860), False, 'from django.db import models\n'), ((869, 924), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)', 'verbose_name': '"""Bairro"""'}), "(max_length=150, verbose_name='Bairro')\n", (885, 924), False, 'from django.db import models\n'), ((935, 985), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(8)', 'verbose_name': '"""CEP"""'}), "(max_length=8, verbose_name='CEP')\n", (951, 985), False, 'from django.db import models\n'), ((1102, 1194), 'django.db.models.ForeignKey', 'models.ForeignKey', (['City'], {'on_delete': 'models.CASCADE', 'verbose_name': 'City._meta.verbose_name'}), '(City, on_delete=models.CASCADE, verbose_name=City._meta.\n verbose_name)\n', (1119, 1194), False, 'from django.db import models\n'), ((1288, 1339), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(7)', 'verbose_name': '"""CNES"""'}), "(max_length=7, verbose_name='CNES')\n", (1304, 1339), False, 'from django.db import models\n'), ((1351, 1403), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(14)', 'verbose_name': '"""CNPJ"""'}), "(max_length=14, verbose_name='CNPJ')\n", (1367, 1403), False, 'from django.db import models\n'), ((1415, 1476), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""Razão Social"""'}), "(max_length=255, verbose_name='Razão Social')\n", (1431, 1476), False, 'from django.db import models\n'), ((1495, 1582), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'auto_now': '(False)', 'verbose_name': '"""Criado em:"""'}), "(auto_now_add=True, auto_now=False, verbose_name=\n 'Criado em:')\n", (1515, 1582), False, 'from django.db import models\n'), ((1595, 1686), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(False)', 'auto_now': '(True)', 'verbose_name': '"""Atualizado em:"""'}), "(auto_now_add=False, auto_now=True, verbose_name=\n 'Atualizado em:')\n", (1615, 1686), False, 'from django.db import models\n'), ((1697, 1769), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Address'], {'verbose_name': 'Address._meta.verbose_name'}), '(Address, verbose_name=Address._meta.verbose_name)\n', (1719, 1769), False, 'from django.db import models\n'), ((1983, 2036), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'verbose_name': '"""Lote"""'}), "(verbose_name='Lote')\n", (2015, 2036), False, 'from django.db import models\n'), ((2055, 2142), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'auto_now': '(False)', 'verbose_name': '"""Criado em:"""'}), "(auto_now_add=True, auto_now=False, verbose_name=\n 'Criado em:')\n", (2075, 2142), False, 'from django.db import models\n'), ((2155, 2246), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(False)', 'auto_now': '(True)', 'verbose_name': '"""Atualizado em:"""'}), "(auto_now_add=False, auto_now=True, verbose_name=\n 'Atualizado em:')\n", (2175, 2246), False, 'from django.db import models\n'), ((2263, 2371), 'django.db.models.ForeignKey', 'models.ForeignKey', (['HealthCenter'], {'on_delete': 'models.CASCADE', 'verbose_name': 'HealthCenter._meta.verbose_name'}), '(HealthCenter, on_delete=models.CASCADE, verbose_name=\n HealthCenter._meta.verbose_name)\n', (2280, 2371), False, 'from django.db import models\n'), ((2421, 2522), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Vaccine'], {'through': '"""VaccineStock"""', 'verbose_name': 'Vaccine._meta.verbose_name'}), "(Vaccine, through='VaccineStock', verbose_name=\n Vaccine._meta.verbose_name)\n", (2443, 2522), False, 'from django.db import models\n'), ((2617, 2685), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'verbose_name': '"""Quantidade recebida"""'}), "(verbose_name='Quantidade recebida')\n", (2649, 2685), False, 'from django.db import models\n'), ((2702, 2770), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'verbose_name': '"""Quantidade restante"""'}), "(verbose_name='Quantidade restante')\n", (2734, 2770), False, 'from django.db import models\n'), ((2786, 2887), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Vaccine'], {'on_delete': 'models.DO_NOTHING', 'verbose_name': 'Vaccine._meta.verbose_name'}), '(Vaccine, on_delete=models.DO_NOTHING, verbose_name=\n Vaccine._meta.verbose_name)\n', (2803, 2887), False, 'from django.db import models\n'), ((2895, 2992), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Stock'], {'on_delete': 'models.DO_NOTHING', 'verbose_name': 'Stock._meta.verbose_name'}), '(Stock, on_delete=models.DO_NOTHING, verbose_name=Stock.\n _meta.verbose_name)\n', (2912, 2992), False, 'from django.db import models\n')] |
shenoyn/libcloud | test/test_rimuhosting.py | bd902992a658b6a99193d69323e051ffa7388253 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# libcloud.org licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2009 RedRata Ltd
from libcloud.drivers.rimuhosting import RimuHostingNodeDriver
from test import MockHttp
from test import MockHttp, TestCaseMixin
import unittest
import httplib
class RimuHostingTest(unittest.TestCase, TestCaseMixin):
def setUp(self):
RimuHostingNodeDriver.connectionCls.conn_classes = (None,
RimuHostingMockHttp)
self.driver = RimuHostingNodeDriver('foo')
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes),1)
node = nodes[0]
self.assertEqual(node.public_ip[0], "1.2.3.4")
self.assertEqual(node.public_ip[1], "1.2.3.5")
self.assertEqual(node.extra['order_oid'], 88833465)
self.assertEqual(node.id, "order-88833465-api-ivan-net-nz")
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes),1)
size = sizes[0]
self.assertEqual(size.ram,950)
self.assertEqual(size.disk,20)
self.assertEqual(size.bandwidth,75)
self.assertEqual(size.price,32.54)
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images),6)
image = images[0]
self.assertEqual(image.name,"Debian 5.0 (aka Lenny, RimuHosting"\
" recommended distro)")
self.assertEqual(image.id, "lenny")
def test_reboot_node(self):
# Raises exception on failure
node = self.driver.list_nodes()[0]
self.driver.reboot_node(node)
def test_destroy_node(self):
# Raises exception on failure
node = self.driver.list_nodes()[0]
self.driver.destroy_node(node)
def test_create_node(self):
# Raises exception on failure
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
self.driver.create_node(name="api.ivan.net.nz", image=image, size=size)
class RimuHostingMockHttp(MockHttp):
def _r_orders(self,method,url,body,headers):
body = """
{ "get_orders_response" :
{ "status_message" : null
, "status_code" : 200
, "error_info" : null
, "response_type" : "OK"
, "human_readable_message" : "Found 15 orders"
, "response_display_duration_type" : "REGULAR",
"about_orders" :
[{ "order_oid" : 88833465
, "domain_name" : "api.ivan.net.nz"
, "slug" : "order-88833465-api-ivan-net-nz"
, "billing_oid" : 96122465
, "is_on_customers_own_physical_server" : false
, "vps_parameters" : { "memory_mb" : 160
, "disk_space_mb" : 4096
, "disk_space_2_mb" : 0}
, "host_server_oid" : "764"
, "server_type" : "VPS"
, "data_transfer_allowance" : { "data_transfer_gb" : 30
, "data_transfer" : "30"}
, "billing_info" : { }
, "allocated_ips" : { "primary_ip" : "1.2.3.4"
, "secondary_ips" : ["1.2.3.5","1.2.3.6"]}
, "running_state" : "RUNNING"}]}}"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_pricing_plans(self,method,url,body,headers):
body = """
{"get_pricing_plans_response" :
{ "status_message" : null
, "status_code" : 200
, "error_info" : null
, "response_type" : "OK"
, "human_readable_message" : "Here some pricing plans we are offering on new orders. Note we offer most disk and memory sizes. So if you setup a new server feel free to vary these (e.g. different memory, disk, etc) and we will just adjust the pricing to suit. Pricing is in USD. If you are an NZ-based customer then we would need to add GST."
, "response_display_duration_type" : "REGULAR"
, "pricing_plan_infos" :
[{ "pricing_plan_code" : "MiroVPSLowContention"
, "pricing_plan_description" : "MiroVPS Semi-Dedicated Server (Dallas)"
, "monthly_recurring_fee" : 32.54
, "monthly_recurring_amt" : { "amt" : 35.0
, "currency" : "CUR_AUD"
,"amt_usd" : 32.54}
, "minimum_memory_mb" : 950
, "minimum_disk_gb" : 20
, "minimum_data_transfer_allowance_gb" : 75
, "see_also_url" : "http://rimuhosting.com/order/serverdetails.jsp?plan=MiroVPSLowContention"
, "server_type" : "VPS"
, "offered_at_data_center" :
{ "data_center_location_code" : "DCDALLAS"
, "data_center_location_name" : "Dallas"}}
]}}
"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_distributions(self, method, url, body, headers):
body = """
{ "get_distros_response" : { "status_message" : null
, "status_code" : 200
, "error_info" : null
, "response_type" : "OK"
, "human_readable_message" : "Here are the distros we are offering on new orders."
, "response_display_duration_type" : "REGULAR"
, "distro_infos" : [{ "distro_code" : "lenny"
, "distro_description" : "Debian 5.0 (aka Lenny, RimuHosting recommended distro)"}
, { "distro_code" : "centos5"
, "distro_description" : "Centos5"}
, { "distro_code" : "ubuntu904"
, "distro_description" : "Ubuntu 9.04 (Jaunty Jackalope, from 2009-04)"}
, { "distro_code" : "ubuntu804"
, "distro_description" : "Ubuntu 8.04 (Hardy Heron, 5 yr long term support (LTS))"}
, { "distro_code" : "ubuntu810"
, "distro_description" : "Ubuntu 8.10 (Intrepid Ibex, from 2008-10)"}
, { "distro_code" : "fedora10"
, "distro_description" : "Fedora 10"}]}}
"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_orders_new_vps(self, method, url, body, headers):
body = """
{ "post_new_vps_response" :
{ "status_message" : null
, "status_code" : 200
, "error_info" : null
, "response_type" : "OK"
, "human_readable_message" : null
, "response_display_duration_type" : "REGULAR"
, "setup_messages" :
["Using user-specified billing data: Wire Transfer" , "Selected user as the owner of the billing details: Ivan Meredith"
, "No VPS paramters provided, using default values."]
, "about_order" :
{ "order_oid" : 52255865
, "domain_name" : "api.ivan.net.nz"
, "slug" : "order-52255865-api-ivan-net-nz"
, "billing_oid" : 96122465
, "is_on_customers_own_physical_server" : false
, "vps_parameters" :
{ "memory_mb" : 160
, "disk_space_mb" : 4096
, "disk_space_2_mb" : 0}
, "host_server_oid" : "764"
, "server_type" : "VPS"
, "data_transfer_allowance" :
{ "data_transfer_gb" : 30 , "data_transfer" : "30"}
, "billing_info" : { }
, "allocated_ips" :
{ "primary_ip" : "74.50.57.80", "secondary_ips" : []}
, "running_state" : "RUNNING"}
, "new_order_request" :
{ "billing_oid" : 96122465
, "user_oid" : 0
, "host_server_oid" : null
, "vps_order_oid_to_clone" : 0
, "ip_request" :
{ "num_ips" : 1, "extra_ip_reason" : ""}
, "vps_parameters" :
{ "memory_mb" : 160
, "disk_space_mb" : 4096
, "disk_space_2_mb" : 0}
, "pricing_plan_code" : "MIRO1B"
, "instantiation_options" :
{ "control_panel" : "webmin"
, "domain_name" : "api.ivan.net.nz"
, "password" : "aruxauce27"
, "distro" : "lenny"}}
, "running_vps_info" :
{ "pings_ok" : true
, "current_kernel" : "default"
, "current_kernel_canonical" : "2.6.30.5-xenU.i386"
, "last_backup_message" : ""
, "is_console_login_enabled" : false
, "console_public_authorized_keys" : null
, "is_backup_running" : false
, "is_backups_enabled" : true
, "next_backup_time" :
{ "ms_since_epoch": 1256446800000, "iso_format" : "2009-10-25T05:00:00Z", "users_tz_offset_ms" : 46800000}
, "vps_uptime_s" : 31
, "vps_cpu_time_s" : 6
, "running_state" : "RUNNING"
, "is_suspended" : false}}}
"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_orders_order_88833465_api_ivan_net_nz_vps(self, method, url, body, headers):
body = """
{ "delete_server_response" :
{ "status_message" : null
, "status_code" : 200
, "error_info" : null
, "response_type" : "OK"
, "human_readable_message" : "Server removed"
, "response_display_duration_type" : "REGULAR"
, "cancel_messages" :
["api.ivan.net.nz is being shut down."
, "A $7.98 credit has been added to your account."
, "If you need to un-cancel the server please contact our support team."]
}
}
"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_orders_order_88833465_api_ivan_net_nz_vps_running_state(self, method,
url, body,
headers):
body = """
{ "put_running_state_response" :
{ "status_message" : null
, "status_code" : 200
, "error_info" : null
, "response_type" : "OK"
, "human_readable_message" : "api.ivan.net.nz restarted. After the reboot api.ivan.net.nz is pinging OK."
, "response_display_duration_type" : "REGULAR"
, "is_restarted" : true
, "is_pinging" : true
, "running_vps_info" :
{ "pings_ok" : true
, "current_kernel" : "default"
, "current_kernel_canonical" : "2.6.30.5-xenU.i386"
, "last_backup_message" : ""
, "is_console_login_enabled" : false
, "console_public_authorized_keys" : null
, "is_backup_running" : false
, "is_backups_enabled" : true
, "next_backup_time" :
{ "ms_since_epoch": 1256446800000, "iso_format" : "2009-10-25T05:00:00Z", "users_tz_offset_ms" : 46800000}
, "vps_uptime_s" : 19
, "vps_cpu_time_s" : 5
, "running_state" : "RUNNING"
, "is_suspended" : false}
, "host_server_info" : { "is_host64_bit_capable" : true
, "default_kernel_i386" : "2.6.30.5-xenU.i386"
, "default_kernel_x86_64" : "2.6.30.5-xenU.x86_64"
, "cpu_model_name" : "Intel(R) Xeon(R) CPU E5506 @ 2.13GHz"
, "host_num_cores" : 1
, "host_xen_version" : "3.4.1"
, "hostload" : [1.45
, 0.56
, 0.28]
, "host_uptime_s" : 3378276
, "host_mem_mb_free" : 51825
, "host_mem_mb_total" : 73719
, "running_vpss" : 34}
, "running_state_messages" : null}}
"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
| [((1226, 1254), 'libcloud.drivers.rimuhosting.RimuHostingNodeDriver', 'RimuHostingNodeDriver', (['"""foo"""'], {}), "('foo')\n", (1247, 1254), False, 'from libcloud.drivers.rimuhosting import RimuHostingNodeDriver\n')] |
lycantropos/gon | tests/base_tests/polygon_tests/test_contains.py | b3f811ece5989d1623b17d633a84071fbff6dd69 | from typing import Tuple
from hypothesis import given
from gon.base import (Point,
Polygon)
from tests.utils import (equivalence,
implication)
from . import strategies
@given(strategies.polygons)
def test_vertices(polygon: Polygon) -> None:
assert all(vertex in polygon
for vertex in polygon.border.vertices)
assert all(vertex in polygon
for hole in polygon.holes
for vertex in hole.vertices)
@given(strategies.polygons_with_points)
def test_convex_hull(polygon_with_point: Tuple[Polygon, Point]) -> None:
polygon, point = polygon_with_point
assert implication(point in polygon, point in polygon.convex_hull)
@given(strategies.polygons_with_points)
def test_indexing(polygon_with_point: Tuple[Polygon, Point]) -> None:
polygon, point = polygon_with_point
before_indexing = point in polygon
polygon.index()
after_indexing = point in polygon
assert equivalence(before_indexing, after_indexing)
| [((220, 246), 'hypothesis.given', 'given', (['strategies.polygons'], {}), '(strategies.polygons)\n', (225, 246), False, 'from hypothesis import given\n'), ((500, 538), 'hypothesis.given', 'given', (['strategies.polygons_with_points'], {}), '(strategies.polygons_with_points)\n', (505, 538), False, 'from hypothesis import given\n'), ((727, 765), 'hypothesis.given', 'given', (['strategies.polygons_with_points'], {}), '(strategies.polygons_with_points)\n', (732, 765), False, 'from hypothesis import given\n'), ((664, 723), 'tests.utils.implication', 'implication', (['(point in polygon)', '(point in polygon.convex_hull)'], {}), '(point in polygon, point in polygon.convex_hull)\n', (675, 723), False, 'from tests.utils import equivalence, implication\n'), ((988, 1032), 'tests.utils.equivalence', 'equivalence', (['before_indexing', 'after_indexing'], {}), '(before_indexing, after_indexing)\n', (999, 1032), False, 'from tests.utils import equivalence, implication\n')] |
HowcanoeWang/EasyIDP | easyidp/core/tests/test_class_reconsproject.py | 0d0a0df1287e3c15cda17e8e4cdcbe05f21f7272 | import os
import numpy as np
import pytest
import easyidp
from easyidp.core.objects import ReconsProject, Points
from easyidp.io import metashape
module_path = os.path.join(easyidp.__path__[0], "io/tests")
def test_init_reconsproject():
attempt1 = ReconsProject("agisoft")
assert attempt1.software == "metashape"
attempt2 = ReconsProject("Metashape")
assert attempt2.software == "metashape"
with pytest.raises(LookupError):
attempt3 = ReconsProject("not_supported_sfm")
def test_local2world2local():
attempt1 = ReconsProject("agisoft")
attempt1.transform.matrix = np.asarray([[-0.86573098, -0.01489186, 0.08977677, 7.65034123],
[0.06972335, 0.44334391, 0.74589315, 1.85910928],
[-0.05848325, 0.74899678, -0.43972184, -0.1835615],
[0., 0., 0., 1.]], dtype=np.float)
w_pos = Points([0.5, 1, 1.5])
l_pos = Points([7.960064093299587, 1.3019528769064523, -2.6697181763370965])
w_pos_ans = Points([0.4999999999999978, 0.9999999999999993, 1.5])
world_pos = attempt1.local2world(l_pos)
np.testing.assert_array_almost_equal(w_pos_ans.values, world_pos.values, decimal=6)
local_pos = attempt1.world2local(w_pos)
np.testing.assert_array_almost_equal(l_pos.values, local_pos.values, decimal=6)
def test_metashape_project_local_points_on_raw():
test_project_folder = easyidp.test_full_path("data/metashape/goya_test.psx")
chunks = metashape.open_project(test_project_folder)
chunk = chunks[0]
# test for single point
l_pos = Points([7.960064093299587, 1.3019528769064523, -2.6697181763370965])
p_dis_out = chunk.project_local_points_on_raw(l_pos, 0, distortion_correct=False)
p_undis_out = chunk.project_local_points_on_raw(l_pos, 0, distortion_correct=True)
# pro_api_out = np.asarray([2218.883386793118, 1991.4709388015149])
my_undistort_out = Points([2220.854889556147, 1992.6933680261686])
my_distort_out = Points([2218.47960556, 1992.46356322])
np.testing.assert_array_almost_equal(p_dis_out.values, my_distort_out.values)
np.testing.assert_array_almost_equal(p_undis_out.values, my_undistort_out.values)
# test for multiple points
l_pos_points = Points([[7.960064093299587, 1.3019528769064523, -2.6697181763370965],
[7.960064093299587, 1.3019528769064523, -2.6697181763370965]])
p_dis_outs = chunk.project_local_points_on_raw(l_pos_points, 0, distortion_correct=False)
p_undis_outs = chunk.project_local_points_on_raw(l_pos_points, 0, distortion_correct=True)
my_undistort_outs = Points([[2220.854889556147, 1992.6933680261686],
[2220.854889556147, 1992.6933680261686]])
my_distort_outs = Points([[2218.47960556, 1992.46356322],
[2218.47960556, 1992.46356322]])
np.testing.assert_array_almost_equal(p_dis_outs.values, my_distort_outs.values)
np.testing.assert_array_almost_equal(p_undis_outs.values, my_undistort_outs.values)
def test_world2crs_and_on_raw_images():
test_project_folder = easyidp.test_full_path("data/metashape/wheat_tanashi.psx")
chunks = metashape.open_project(test_project_folder)
chunk = chunks[0]
local = Points([11.870130675203006, 0.858098777517136, -12.987136541275])
geocentric = Points([-3943658.7087006606, 3363404.124223561, 3704651.3067566575])
geodetic = Points([139.54033578028609, 35.73756358928734, 96.87827569602781], columns=['lon', 'lat', 'alt'])
idp_world = chunk.local2world(local)
np.testing.assert_array_almost_equal(idp_world.values, geocentric.values, decimal=1)
idp_crs = chunk.world2crs(idp_world)
np.testing.assert_array_almost_equal(idp_crs.values, geodetic.values)
camera_id = 56 # camera_label = 'DJI_0057'
camera_pix_ans = Points([2391.7104647010146, 1481.8987733175165])
idp_cam_pix = chunk.project_local_points_on_raw(local, camera_id, distortion_correct=True)
np.testing.assert_array_almost_equal(camera_pix_ans.values, idp_cam_pix.values)
| [((162, 207), 'os.path.join', 'os.path.join', (['easyidp.__path__[0]', '"""io/tests"""'], {}), "(easyidp.__path__[0], 'io/tests')\n", (174, 207), False, 'import os\n'), ((256, 280), 'easyidp.core.objects.ReconsProject', 'ReconsProject', (['"""agisoft"""'], {}), "('agisoft')\n", (269, 280), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((341, 367), 'easyidp.core.objects.ReconsProject', 'ReconsProject', (['"""Metashape"""'], {}), "('Metashape')\n", (354, 367), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((551, 575), 'easyidp.core.objects.ReconsProject', 'ReconsProject', (['"""agisoft"""'], {}), "('agisoft')\n", (564, 575), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((608, 821), 'numpy.asarray', 'np.asarray', (['[[-0.86573098, -0.01489186, 0.08977677, 7.65034123], [0.06972335, \n 0.44334391, 0.74589315, 1.85910928], [-0.05848325, 0.74899678, -\n 0.43972184, -0.1835615], [0.0, 0.0, 0.0, 1.0]]'], {'dtype': 'np.float'}), '([[-0.86573098, -0.01489186, 0.08977677, 7.65034123], [0.06972335,\n 0.44334391, 0.74589315, 1.85910928], [-0.05848325, 0.74899678, -\n 0.43972184, -0.1835615], [0.0, 0.0, 0.0, 1.0]], dtype=np.float)\n', (618, 821), True, 'import numpy as np\n'), ((953, 974), 'easyidp.core.objects.Points', 'Points', (['[0.5, 1, 1.5]'], {}), '([0.5, 1, 1.5])\n', (959, 974), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((987, 1055), 'easyidp.core.objects.Points', 'Points', (['[7.960064093299587, 1.3019528769064523, -2.6697181763370965]'], {}), '([7.960064093299587, 1.3019528769064523, -2.6697181763370965])\n', (993, 1055), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((1072, 1125), 'easyidp.core.objects.Points', 'Points', (['[0.4999999999999978, 0.9999999999999993, 1.5]'], {}), '([0.4999999999999978, 0.9999999999999993, 1.5])\n', (1078, 1125), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((1175, 1262), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['w_pos_ans.values', 'world_pos.values'], {'decimal': '(6)'}), '(w_pos_ans.values, world_pos.values,\n decimal=6)\n', (1211, 1262), True, 'import numpy as np\n'), ((1308, 1387), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['l_pos.values', 'local_pos.values'], {'decimal': '(6)'}), '(l_pos.values, local_pos.values, decimal=6)\n', (1344, 1387), True, 'import numpy as np\n'), ((1466, 1520), 'easyidp.test_full_path', 'easyidp.test_full_path', (['"""data/metashape/goya_test.psx"""'], {}), "('data/metashape/goya_test.psx')\n", (1488, 1520), False, 'import easyidp\n'), ((1534, 1577), 'easyidp.io.metashape.open_project', 'metashape.open_project', (['test_project_folder'], {}), '(test_project_folder)\n', (1556, 1577), False, 'from easyidp.io import metashape\n'), ((1642, 1710), 'easyidp.core.objects.Points', 'Points', (['[7.960064093299587, 1.3019528769064523, -2.6697181763370965]'], {}), '([7.960064093299587, 1.3019528769064523, -2.6697181763370965])\n', (1648, 1710), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((1981, 2028), 'easyidp.core.objects.Points', 'Points', (['[2220.854889556147, 1992.6933680261686]'], {}), '([2220.854889556147, 1992.6933680261686])\n', (1987, 2028), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2050, 2088), 'easyidp.core.objects.Points', 'Points', (['[2218.47960556, 1992.46356322]'], {}), '([2218.47960556, 1992.46356322])\n', (2056, 2088), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2094, 2171), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['p_dis_out.values', 'my_distort_out.values'], {}), '(p_dis_out.values, my_distort_out.values)\n', (2130, 2171), True, 'import numpy as np\n'), ((2176, 2262), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['p_undis_out.values', 'my_undistort_out.values'], {}), '(p_undis_out.values, my_undistort_out.\n values)\n', (2212, 2262), True, 'import numpy as np\n'), ((2309, 2446), 'easyidp.core.objects.Points', 'Points', (['[[7.960064093299587, 1.3019528769064523, -2.6697181763370965], [\n 7.960064093299587, 1.3019528769064523, -2.6697181763370965]]'], {}), '([[7.960064093299587, 1.3019528769064523, -2.6697181763370965], [\n 7.960064093299587, 1.3019528769064523, -2.6697181763370965]])\n', (2315, 2446), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2684, 2779), 'easyidp.core.objects.Points', 'Points', (['[[2220.854889556147, 1992.6933680261686], [2220.854889556147, \n 1992.6933680261686]]'], {}), '([[2220.854889556147, 1992.6933680261686], [2220.854889556147, \n 1992.6933680261686]])\n', (2690, 2779), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2829, 2901), 'easyidp.core.objects.Points', 'Points', (['[[2218.47960556, 1992.46356322], [2218.47960556, 1992.46356322]]'], {}), '([[2218.47960556, 1992.46356322], [2218.47960556, 1992.46356322]])\n', (2835, 2901), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2937, 3016), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['p_dis_outs.values', 'my_distort_outs.values'], {}), '(p_dis_outs.values, my_distort_outs.values)\n', (2973, 3016), True, 'import numpy as np\n'), ((3021, 3109), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['p_undis_outs.values', 'my_undistort_outs.values'], {}), '(p_undis_outs.values, my_undistort_outs\n .values)\n', (3057, 3109), True, 'import numpy as np\n'), ((3173, 3231), 'easyidp.test_full_path', 'easyidp.test_full_path', (['"""data/metashape/wheat_tanashi.psx"""'], {}), "('data/metashape/wheat_tanashi.psx')\n", (3195, 3231), False, 'import easyidp\n'), ((3245, 3288), 'easyidp.io.metashape.open_project', 'metashape.open_project', (['test_project_folder'], {}), '(test_project_folder)\n', (3267, 3288), False, 'from easyidp.io import metashape\n'), ((3325, 3390), 'easyidp.core.objects.Points', 'Points', (['[11.870130675203006, 0.858098777517136, -12.987136541275]'], {}), '([11.870130675203006, 0.858098777517136, -12.987136541275])\n', (3331, 3390), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((3408, 3476), 'easyidp.core.objects.Points', 'Points', (['[-3943658.7087006606, 3363404.124223561, 3704651.3067566575]'], {}), '([-3943658.7087006606, 3363404.124223561, 3704651.3067566575])\n', (3414, 3476), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((3492, 3594), 'easyidp.core.objects.Points', 'Points', (['[139.54033578028609, 35.73756358928734, 96.87827569602781]'], {'columns': "['lon', 'lat', 'alt']"}), "([139.54033578028609, 35.73756358928734, 96.87827569602781], columns=\n ['lon', 'lat', 'alt'])\n", (3498, 3594), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((3636, 3724), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['idp_world.values', 'geocentric.values'], {'decimal': '(1)'}), '(idp_world.values, geocentric.values,\n decimal=1)\n', (3672, 3724), True, 'import numpy as np\n'), ((3767, 3836), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['idp_crs.values', 'geodetic.values'], {}), '(idp_crs.values, geodetic.values)\n', (3803, 3836), True, 'import numpy as np\n'), ((3908, 3956), 'easyidp.core.objects.Points', 'Points', (['[2391.7104647010146, 1481.8987733175165]'], {}), '([2391.7104647010146, 1481.8987733175165])\n', (3914, 3956), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((4057, 4136), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['camera_pix_ans.values', 'idp_cam_pix.values'], {}), '(camera_pix_ans.values, idp_cam_pix.values)\n', (4093, 4136), True, 'import numpy as np\n'), ((422, 448), 'pytest.raises', 'pytest.raises', (['LookupError'], {}), '(LookupError)\n', (435, 448), False, 'import pytest\n'), ((469, 503), 'easyidp.core.objects.ReconsProject', 'ReconsProject', (['"""not_supported_sfm"""'], {}), "('not_supported_sfm')\n", (482, 503), False, 'from easyidp.core.objects import ReconsProject, Points\n')] |
tiloc/python_withings_api | withings_api/const.py | 64c9706ab70c93e4c54cc843a778ecd3f9960980 | """Constant values."""
STATUS_SUCCESS = (0,)
STATUS_AUTH_FAILED = (100, 101, 102, 200, 401)
STATUS_INVALID_PARAMS = (
201,
202,
203,
204,
205,
206,
207,
208,
209,
210,
211,
212,
213,
216,
217,
218,
220,
221,
223,
225,
227,
228,
229,
230,
234,
235,
236,
238,
240,
241,
242,
243,
244,
245,
246,
247,
248,
249,
250,
251,
252,
254,
260,
261,
262,
263,
264,
265,
266,
267,
271,
272,
275,
276,
283,
284,
285,
286,
287,
288,
290,
293,
294,
295,
297,
300,
301,
302,
303,
304,
321,
323,
324,
325,
326,
327,
328,
329,
330,
331,
332,
333,
334,
335,
336,
337,
338,
339,
340,
341,
342,
343,
344,
345,
346,
347,
348,
349,
350,
351,
352,
353,
380,
381,
382,
400,
501,
502,
503,
504,
505,
506,
509,
510,
511,
523,
532,
3017,
3018,
3019,
)
STATUS_UNAUTHORIZED = (214, 277, 2553, 2554, 2555)
STATUS_ERROR_OCCURRED = (
215,
219,
222,
224,
226,
231,
233,
237,
253,
255,
256,
257,
258,
259,
268,
269,
270,
273,
274,
278,
279,
280,
281,
282,
289,
291,
292,
296,
298,
305,
306,
308,
309,
310,
311,
312,
313,
314,
315,
316,
317,
318,
319,
320,
322,
370,
371,
372,
373,
374,
375,
383,
391,
402,
516,
517,
518,
519,
520,
521,
525,
526,
527,
528,
529,
530,
531,
533,
602,
700,
1051,
1052,
1053,
1054,
2551,
2552,
2556,
2557,
2558,
2559,
3000,
3001,
3002,
3003,
3004,
3005,
3006,
3007,
3008,
3009,
3010,
3011,
3012,
3013,
3014,
3015,
3016,
3020,
3021,
3022,
3023,
3024,
5000,
5001,
5005,
5006,
6000,
6010,
6011,
9000,
10000,
)
STATUS_TIMEOUT = (522,)
STATUS_BAD_STATE = (524,)
STATUS_TOO_MANY_REQUESTS = (601,)
| [] |
sirpercival/kivy | examples/canvas/bezier.py | 29ef854a200e6764aae60ea29324379c69d271a3 | #!/usr/bin/env python
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.slider import Slider
from kivy.graphics import Color, Bezier, Line
class BezierTest(FloatLayout):
def __init__(self, points=[], loop=False, *args, **kwargs):
super(BezierTest, self).__init__(*args, **kwargs)
self.d = 10
self.points = points
self.loop = loop
self.current_point = None
with self.canvas:
Color(1.0, 0.0, 0.0)
self.bezier = Bezier(
points=self.points,
segments=150,
loop=self.loop,
dash_length=100,
dash_offset=10)
Color(1.0, 0.0, 1.0)
self.line = Line(
points=self.points+self.points[:2],
dash_offset=10,
dash_length=100)
s = Slider(y=0, pos_hint={'x': .3}, size_hint=(.7, None), height=50)
s.bind(value=self._set_bezier_dash_offset)
self.add_widget(s)
s = Slider(y=50, pos_hint={'x': .3}, size_hint=(.7, None), height=50)
s.bind(value=self._set_line_dash_offset)
self.add_widget(s)
def _set_bezier_dash_offset(self, instance, value):
# effect to reduce length while increase offset
self.bezier.dash_length = 100 - value
self.bezier.dash_offset = value
def _set_line_dash_offset(self, instance, value):
# effect to reduce length while increase offset
self.line.dash_length = 100 - value
self.line.dash_offset = value
def on_touch_down(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
for i, p in enumerate(list(zip(self.points[::2], self.points[1::2]))):
if (
abs(touch.pos[0] - self.pos[0] - p[0]) < self.d and
abs(touch.pos[1] - self.pos[1] - p[1]) < self.d):
self.current_point = i + 1
return True
return super(BezierTest, self).on_touch_down(touch)
def on_touch_up(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
if self.current_point:
self.current_point = None
return True
return super(BezierTest, self).on_touch_up(touch)
def on_touch_move(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
c = self.current_point
if c:
self.points[(c - 1) * 2] = touch.pos[0] - self.pos[0]
self.points[(c - 1) * 2 + 1] = touch.pos[1] - self.pos[1]
self.bezier.points = self.points
self.line.points = self.points + self.points[:2]
return True
return super(BezierTest, self).on_touch_move(touch)
class Main(App):
def build(self):
from math import cos, sin, radians
x = y = 150
l = 100
# Pacman !
points = [x, y]
for i in range(45, 360, 45):
i = radians(i)
points.extend([x + cos(i) * l, y + sin(i) * l])
return BezierTest(points=points, loop=True)
if __name__ == '__main__':
Main().run()
| [((921, 987), 'kivy.uix.slider.Slider', 'Slider', ([], {'y': '(0)', 'pos_hint': "{'x': 0.3}", 'size_hint': '(0.7, None)', 'height': '(50)'}), "(y=0, pos_hint={'x': 0.3}, size_hint=(0.7, None), height=50)\n", (927, 987), False, 'from kivy.uix.slider import Slider\n'), ((1077, 1144), 'kivy.uix.slider.Slider', 'Slider', ([], {'y': '(50)', 'pos_hint': "{'x': 0.3}", 'size_hint': '(0.7, None)', 'height': '(50)'}), "(y=50, pos_hint={'x': 0.3}, size_hint=(0.7, None), height=50)\n", (1083, 1144), False, 'from kivy.uix.slider import Slider\n'), ((476, 496), 'kivy.graphics.Color', 'Color', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (481, 496), False, 'from kivy.graphics import Color, Bezier, Line\n'), ((524, 617), 'kivy.graphics.Bezier', 'Bezier', ([], {'points': 'self.points', 'segments': '(150)', 'loop': 'self.loop', 'dash_length': '(100)', 'dash_offset': '(10)'}), '(points=self.points, segments=150, loop=self.loop, dash_length=100,\n dash_offset=10)\n', (530, 617), False, 'from kivy.graphics import Color, Bezier, Line\n'), ((728, 748), 'kivy.graphics.Color', 'Color', (['(1.0)', '(0.0)', '(1.0)'], {}), '(1.0, 0.0, 1.0)\n', (733, 748), False, 'from kivy.graphics import Color, Bezier, Line\n'), ((773, 848), 'kivy.graphics.Line', 'Line', ([], {'points': '(self.points + self.points[:2])', 'dash_offset': '(10)', 'dash_length': '(100)'}), '(points=self.points + self.points[:2], dash_offset=10, dash_length=100)\n', (777, 848), False, 'from kivy.graphics import Color, Bezier, Line\n'), ((3080, 3090), 'math.radians', 'radians', (['i'], {}), '(i)\n', (3087, 3090), False, 'from math import cos, sin, radians\n'), ((3122, 3128), 'math.cos', 'cos', (['i'], {}), '(i)\n', (3125, 3128), False, 'from math import cos, sin, radians\n'), ((3138, 3144), 'math.sin', 'sin', (['i'], {}), '(i)\n', (3141, 3144), False, 'from math import cos, sin, radians\n')] |
reevespaul/firebird-qa | tests/bugs/core_6489_test.py | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | #coding:utf-8
#
# id: bugs.core_6489
# title: User without ALTER ANY ROLE privilege can use COMMENT ON ROLE
# decription:
# Test creates two users: one of them has no any rights, second is granted with 'alter any role' privilege.
# First user ('junior') must not have ability to add comment to rdb$admin role, but second ('senior') must
# be able to set comment to any string and make it null.
#
# Confirmed bug on 4.0.0.2384, 3.0.8.33425
# Checked on: 4.0.0.2387, 3.0.8.33426 -- all OK.
#
# NOTE:
# phrase '-Effective user is ...' presents only in FB 4.x and is suppressed here.
#
# tracker_id: CORE-6489
# min_versions: ['3.0.8']
# versions: 3.0.8
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0.8
# resources: None
substitutions_1 = [('ROLE_DESCR_BLOB_ID .*', ''), ('[\t ]+', ' '), ('(-)?Effective user is.*', '')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
create or alter user tmp$c6489_junior password '123' using plugin Srp;
create or alter user tmp$c6489_senior password '456' using plugin Srp;
commit;
grant alter any role to user tmp$c6489_senior;
commit;
connect '$(DSN)' user tmp$c6489_junior password '123';
comment on role rdb$admin is 'Comment by tmp$c6489_junior';
commit;
connect '$(DSN)' user tmp$c6489_senior password '456';
comment on role rdb$admin is 'Comment by tmp$c6489_senior';
commit;
set list on;
select r.rdb$description as role_descr_blob_id from rdb$roles r where r.rdb$role_name = upper('rdb$admin');
commit;
comment on role rdb$admin is null;
commit;
connect '$(DSN)' user 'SYSDBA' password 'masterkey';
drop user tmp$c6489_junior using plugin Srp;
drop user tmp$c6489_senior using plugin Srp;
commit;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Comment by tmp$c6489_senior
"""
expected_stderr_1 = """
Statement failed, SQLSTATE = 28000
unsuccessful metadata update
-COMMENT ON RDB$ADMIN failed
-no permission for ALTER access to ROLE RDB$ADMIN
-Effective user is TMP$C6489_JUNIOR
"""
@pytest.mark.version('>=3.0.8')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_expected_stderr == act_1.clean_stderr
assert act_1.clean_expected_stdout == act_1.clean_stdout
| [((1099, 1144), 'firebird.qa.db_factory', 'db_factory', ([], {'sql_dialect': '(3)', 'init': 'init_script_1'}), '(sql_dialect=3, init=init_script_1)\n', (1109, 1144), False, 'from firebird.qa import db_factory, isql_act, Action\n'), ((2048, 2110), 'firebird.qa.isql_act', 'isql_act', (['"""db_1"""', 'test_script_1'], {'substitutions': 'substitutions_1'}), "('db_1', test_script_1, substitutions=substitutions_1)\n", (2056, 2110), False, 'from firebird.qa import db_factory, isql_act, Action\n'), ((2405, 2435), 'pytest.mark.version', 'pytest.mark.version', (['""">=3.0.8"""'], {}), "('>=3.0.8')\n", (2424, 2435), False, 'import pytest\n')] |
MasoonZhang/FasterRConvMixer | utils/utils_bbox.py | a7a17d00f716a28a5b301088053e00840c222524 | import numpy as np
import torch
from torch.nn import functional as F
from torchvision.ops import nms
def loc2bbox(src_bbox, loc):
if src_bbox.size()[0] == 0:
return torch.zeros((0, 4), dtype=loc.dtype)
src_width = torch.unsqueeze(src_bbox[:, 2] - src_bbox[:, 0], -1)
src_height = torch.unsqueeze(src_bbox[:, 3] - src_bbox[:, 1], -1)
src_ctr_x = torch.unsqueeze(src_bbox[:, 0], -1) + 0.5 * src_width
src_ctr_y = torch.unsqueeze(src_bbox[:, 1], -1) + 0.5 * src_height
dx = loc[:, 0::4]
dy = loc[:, 1::4]
dw = loc[:, 2::4]
dh = loc[:, 3::4]
ctr_x = dx * src_width + src_ctr_x
ctr_y = dy * src_height + src_ctr_y
w = torch.exp(dw) * src_width
h = torch.exp(dh) * src_height
dst_bbox = torch.zeros_like(loc)
dst_bbox[:, 0::4] = ctr_x - 0.5 * w
dst_bbox[:, 1::4] = ctr_y - 0.5 * h
dst_bbox[:, 2::4] = ctr_x + 0.5 * w
dst_bbox[:, 3::4] = ctr_y + 0.5 * h
return dst_bbox
class DecodeBox():
def __init__(self, std, num_classes):
self.std = std
self.num_classes = num_classes + 1
def frcnn_correct_boxes(self, box_xy, box_wh, input_shape, image_shape):
#-----------------------------------------------------------------#
# 把y轴放前面是因为方便预测框和图像的宽高进行相乘
#-----------------------------------------------------------------#
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = np.array(input_shape)
image_shape = np.array(image_shape)
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = np.concatenate([box_mins[..., 0:1], box_mins[..., 1:2], box_maxes[..., 0:1], box_maxes[..., 1:2]], axis=-1)
boxes *= np.concatenate([image_shape, image_shape], axis=-1)
return boxes
def forward(self, roi_cls_locs, roi_scores, rois, image_shape, input_shape, nms_iou = 0.3, confidence = 0.5):
results = []
bs = len(roi_cls_locs)
#--------------------------------#
# batch_size, num_rois, 4
#--------------------------------#
rois = rois.view((bs, -1, 4))
#----------------------------------------------------------------------------------------------------------------#
# 对每一张图片进行处理,由于在predict.py的时候,我们只输入一张图片,所以for i in range(len(mbox_loc))只进行一次
#----------------------------------------------------------------------------------------------------------------#
for i in range(bs):
#----------------------------------------------------------#
# 对回归参数进行reshape
#----------------------------------------------------------#
roi_cls_loc = roi_cls_locs[i] * self.std
#----------------------------------------------------------#
# 第一维度是建议框的数量,第二维度是每个种类
# 第三维度是对应种类的调整参数
#----------------------------------------------------------#
roi_cls_loc = roi_cls_loc.view([-1, self.num_classes, 4])
#-------------------------------------------------------------#
# 利用classifier网络的预测结果对建议框进行调整获得预测框
# num_rois, 4 -> num_rois, 1, 4 -> num_rois, num_classes, 4
#-------------------------------------------------------------#
roi = rois[i].view((-1, 1, 4)).expand_as(roi_cls_loc)
cls_bbox = loc2bbox(roi.contiguous().view((-1, 4)), roi_cls_loc.contiguous().view((-1, 4)))
cls_bbox = cls_bbox.view([-1, (self.num_classes), 4])
#-------------------------------------------------------------#
# 对预测框进行归一化,调整到0-1之间
#-------------------------------------------------------------#
cls_bbox[..., [0, 2]] = (cls_bbox[..., [0, 2]]) / input_shape[1]
cls_bbox[..., [1, 3]] = (cls_bbox[..., [1, 3]]) / input_shape[0]
roi_score = roi_scores[i]
prob = F.softmax(roi_score, dim=-1)
results.append([])
for c in range(1, self.num_classes):
#--------------------------------#
# 取出属于该类的所有框的置信度
# 判断是否大于门限
#--------------------------------#
c_confs = prob[:, c]
c_confs_m = c_confs > confidence
if len(c_confs[c_confs_m]) > 0:
#-----------------------------------------#
# 取出得分高于confidence的框
#-----------------------------------------#
boxes_to_process = cls_bbox[c_confs_m, c]
confs_to_process = c_confs[c_confs_m]
keep = nms(
boxes_to_process,
confs_to_process,
nms_iou
)
#-----------------------------------------#
# 取出在非极大抑制中效果较好的内容
#-----------------------------------------#
good_boxes = boxes_to_process[keep]
confs = confs_to_process[keep][:, None]
labels = (c - 1) * torch.ones((len(keep), 1)).cuda() if confs.is_cuda else (c - 1) * torch.ones((len(keep), 1))
#-----------------------------------------#
# 将label、置信度、框的位置进行堆叠。
#-----------------------------------------#
c_pred = torch.cat((good_boxes, confs, labels), dim=1).cpu().numpy()
# 添加进result里
results[-1].extend(c_pred)
if len(results[-1]) > 0:
results[-1] = np.array(results[-1])
box_xy, box_wh = (results[-1][:, 0:2] + results[-1][:, 2:4])/2, results[-1][:, 2:4] - results[-1][:, 0:2]
results[-1][:, :4] = self.frcnn_correct_boxes(box_xy, box_wh, input_shape, image_shape)
return results
| [((235, 291), 'torch.unsqueeze', 'torch.unsqueeze', (['(src_bbox[:, (2)] - src_bbox[:, (0)])', '(-1)'], {}), '(src_bbox[:, (2)] - src_bbox[:, (0)], -1)\n', (250, 291), False, 'import torch\n'), ((306, 362), 'torch.unsqueeze', 'torch.unsqueeze', (['(src_bbox[:, (3)] - src_bbox[:, (1)])', '(-1)'], {}), '(src_bbox[:, (3)] - src_bbox[:, (1)], -1)\n', (321, 362), False, 'import torch\n'), ((794, 815), 'torch.zeros_like', 'torch.zeros_like', (['loc'], {}), '(loc)\n', (810, 815), False, 'import torch\n'), ((179, 215), 'torch.zeros', 'torch.zeros', (['(0, 4)'], {'dtype': 'loc.dtype'}), '((0, 4), dtype=loc.dtype)\n', (190, 215), False, 'import torch\n'), ((377, 414), 'torch.unsqueeze', 'torch.unsqueeze', (['src_bbox[:, (0)]', '(-1)'], {}), '(src_bbox[:, (0)], -1)\n', (392, 414), False, 'import torch\n'), ((449, 486), 'torch.unsqueeze', 'torch.unsqueeze', (['src_bbox[:, (1)]', '(-1)'], {}), '(src_bbox[:, (1)], -1)\n', (464, 486), False, 'import torch\n'), ((717, 730), 'torch.exp', 'torch.exp', (['dw'], {}), '(dw)\n', (726, 730), False, 'import torch\n'), ((751, 764), 'torch.exp', 'torch.exp', (['dh'], {}), '(dh)\n', (760, 764), False, 'import torch\n'), ((1502, 1523), 'numpy.array', 'np.array', (['input_shape'], {}), '(input_shape)\n', (1510, 1523), True, 'import numpy as np\n'), ((1546, 1567), 'numpy.array', 'np.array', (['image_shape'], {}), '(image_shape)\n', (1554, 1567), True, 'import numpy as np\n'), ((1676, 1795), 'numpy.concatenate', 'np.concatenate', (['[box_mins[(...), 0:1], box_mins[(...), 1:2], box_maxes[(...), 0:1],\n box_maxes[(...), 1:2]]'], {'axis': '(-1)'}), '([box_mins[(...), 0:1], box_mins[(...), 1:2], box_maxes[(...),\n 0:1], box_maxes[(...), 1:2]], axis=-1)\n', (1690, 1795), True, 'import numpy as np\n'), ((1801, 1852), 'numpy.concatenate', 'np.concatenate', (['[image_shape, image_shape]'], {'axis': '(-1)'}), '([image_shape, image_shape], axis=-1)\n', (1815, 1852), True, 'import numpy as np\n'), ((4019, 4047), 'torch.nn.functional.softmax', 'F.softmax', (['roi_score'], {'dim': '(-1)'}), '(roi_score, dim=-1)\n', (4028, 4047), True, 'from torch.nn import functional as F\n'), ((5742, 5763), 'numpy.array', 'np.array', (['results[-1]'], {}), '(results[-1])\n', (5750, 5763), True, 'import numpy as np\n'), ((4755, 4803), 'torchvision.ops.nms', 'nms', (['boxes_to_process', 'confs_to_process', 'nms_iou'], {}), '(boxes_to_process, confs_to_process, nms_iou)\n', (4758, 4803), False, 'from torchvision.ops import nms\n'), ((5534, 5579), 'torch.cat', 'torch.cat', (['(good_boxes, confs, labels)'], {'dim': '(1)'}), '((good_boxes, confs, labels), dim=1)\n', (5543, 5579), False, 'import torch\n')] |
woozhijun/cat | lib/python/test/__init__.py | 3d523202c38e37b1a2244b26d4336ebbea5db001 | #!/usr/bin/env python
# encoding: utf-8
import sys
reload(sys)
sys.setdefaultencoding("utf-8") | [((65, 96), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (87, 96), False, 'import sys\n')] |
odidev/pyclipper | tests/test_pyclipper.py | 3de54fa4c4d5b8efeede364fbe69336f935f88f2 | #!/usr/bin/python
"""
Tests for Pyclipper wrapper library.
"""
from __future__ import print_function
from unittest2 import TestCase, main
import sys
if sys.version_info < (3,):
integer_types = (int, long)
else:
integer_types = (int,)
import pyclipper
# Example polygons from http://www.angusj.com/delphi/clipper.php
PATH_SUBJ_1 = [[180, 200], [260, 200], [260, 150], [180, 150]] # square, orientation is False
PATH_SUBJ_2 = [[215, 160], [230, 190], [200, 190]] # triangle
PATH_CLIP_1 = [[190, 210], [240, 210], [240, 130], [190, 130]] # square
PATH_SIGMA = [[300, 400], [100, 400], [200, 300], [100, 200], [300, 200]] # greek letter sigma
PATTERN = [[4, -6], [6, -6], [-4, 6], [-6, 6]]
INVALID_PATH = [[1, 1], ] # less than 2 vertices
class TestPyclipperModule(TestCase):
def test_has_classes(self):
self.assertTrue(hasattr(pyclipper, 'Pyclipper'))
self.assertTrue(hasattr(pyclipper, 'PyclipperOffset'))
def test_has_namespace_methods(self):
for method in ('Orientation', 'Area', 'PointInPolygon', 'SimplifyPolygon', 'SimplifyPolygons',
'CleanPolygon', 'CleanPolygons', 'MinkowskiSum', 'MinkowskiSum2', 'MinkowskiDiff',
'PolyTreeToPaths', 'ClosedPathsFromPolyTree', 'OpenPathsFromPolyTree',
'ReversePath', 'ReversePaths'):
self.assertTrue(hasattr(pyclipper, method))
class TestNamespaceMethods(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
def test_orientation(self):
self.assertFalse(pyclipper.Orientation(PATH_SUBJ_1))
self.assertTrue(pyclipper.Orientation(PATH_SUBJ_1[::-1]))
def test_area(self):
# area less than 0 because orientation is False
area_neg = pyclipper.Area(PATH_SUBJ_1)
area_pos = pyclipper.Area(PATH_SUBJ_1[::-1])
self.assertLess(area_neg, 0)
self.assertGreater(area_pos, 0)
self.assertEqual(abs(area_neg), area_pos)
def test_point_in_polygon(self):
# on polygon
self.assertEqual(pyclipper.PointInPolygon((180, 200), PATH_SUBJ_1), -1)
# in polygon
self.assertEqual(pyclipper.PointInPolygon((200, 180), PATH_SUBJ_1), 1)
# outside of polygon
self.assertEqual(pyclipper.PointInPolygon((500, 500), PATH_SUBJ_1), 0)
def test_minkowski_sum(self):
solution = pyclipper.MinkowskiSum(PATTERN, PATH_SIGMA, False)
self.assertGreater(len(solution), 0)
def test_minkowski_sum2(self):
solution = pyclipper.MinkowskiSum2(PATTERN, [PATH_SIGMA], False)
self.assertGreater(len(solution), 0)
def test_minkowski_diff(self):
solution = pyclipper.MinkowskiDiff(PATH_SUBJ_1, PATH_SUBJ_2)
self.assertGreater(len(solution), 0)
def test_reverse_path(self):
solution = pyclipper.ReversePath(PATH_SUBJ_1)
manualy_reversed = PATH_SUBJ_1[::-1]
self.check_reversed_path(solution, manualy_reversed)
def test_reverse_paths(self):
solution = pyclipper.ReversePaths([PATH_SUBJ_1])
manualy_reversed = [PATH_SUBJ_1[::-1]]
self.check_reversed_path(solution[0], manualy_reversed[0])
def check_reversed_path(self, path_1, path_2):
if len(path_1) is not len(path_2):
return False
for i in range(len(path_1)):
self.assertEqual(path_1[i][0], path_2[i][0])
self.assertEqual(path_1[i][1], path_2[i][1])
def test_simplify_polygon(self):
solution = pyclipper.SimplifyPolygon(PATH_SUBJ_1)
self.assertEqual(len(solution), 1)
def test_simplify_polygons(self):
solution = pyclipper.SimplifyPolygons([PATH_SUBJ_1])
solution_single = pyclipper.SimplifyPolygon(PATH_SUBJ_1)
self.assertEqual(len(solution), 1)
self.assertEqual(len(solution), len(solution_single))
_do_solutions_match(solution, solution_single)
def test_clean_polygon(self):
solution = pyclipper.CleanPolygon(PATH_CLIP_1)
self.assertEqual(len(solution), len(PATH_CLIP_1))
def test_clean_polygons(self):
solution = pyclipper.CleanPolygons([PATH_CLIP_1])
self.assertEqual(len(solution), 1)
self.assertEqual(len(solution[0]), len(PATH_CLIP_1))
class TestFilterPyPolyNode(TestCase):
def setUp(self):
tree = pyclipper.PyPolyNode()
tree.Contour.append(PATH_CLIP_1)
tree.IsOpen = True
child = pyclipper.PyPolyNode()
child.IsOpen = False
child.Parent = tree
child.Contour = PATH_SUBJ_1
tree.Childs.append(child)
child = pyclipper.PyPolyNode()
child.IsOpen = True
child.Parent = tree
child.Contour = PATH_SUBJ_2
tree.Childs.append(child)
child2 = pyclipper.PyPolyNode()
child2.IsOpen = False
child2.Parent = child
child2.Contour = PATTERN
child.Childs.append(child2)
# empty contour should not
# be included in filtered results
child2 = pyclipper.PyPolyNode()
child2.IsOpen = False
child2.Parent = child
child2.Contour = []
child.Childs.append(child2)
self.tree = tree
def test_polytree_to_paths(self):
paths = pyclipper.PolyTreeToPaths(self.tree)
self.check_paths(paths, 4)
def test_closed_paths_from_polytree(self):
paths = pyclipper.ClosedPathsFromPolyTree(self.tree)
self.check_paths(paths, 2)
def test_open_paths_from_polytree(self):
paths = pyclipper.OpenPathsFromPolyTree(self.tree)
self.check_paths(paths, 2)
def check_paths(self, paths, expected_nr):
self.assertEqual(len(paths), expected_nr)
self.assertTrue(all((len(path) > 0 for path in paths)))
class TestPyclipperAddPaths(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
self.pc = pyclipper.Pyclipper()
def test_add_path(self):
# should not raise an exception
self.pc.AddPath(PATH_CLIP_1, poly_type=pyclipper.PT_CLIP)
def test_add_paths(self):
# should not raise an exception
self.pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], poly_type=pyclipper.PT_SUBJECT)
def test_add_path_invalid_path(self):
self.assertRaises(pyclipper.ClipperException, self.pc.AddPath, INVALID_PATH, pyclipper.PT_CLIP, True)
def test_add_paths_invalid_path(self):
self.assertRaises(pyclipper.ClipperException, self.pc.AddPaths, [INVALID_PATH, INVALID_PATH],
pyclipper.PT_CLIP, True)
try:
self.pc.AddPaths([INVALID_PATH, PATH_CLIP_1], pyclipper.PT_CLIP)
self.pc.AddPaths([PATH_CLIP_1, INVALID_PATH], pyclipper.PT_CLIP)
except pyclipper.ClipperException:
self.fail("add_paths raised ClipperException when not all paths were invalid")
class TestClassProperties(TestCase):
def check_property_assignment(self, pc, prop_name, values):
for val in values:
setattr(pc, prop_name, val)
self.assertEqual(getattr(pc, prop_name), val)
def test_pyclipper_properties(self):
pc = pyclipper.Pyclipper()
for prop_name in ('ReverseSolution', 'PreserveCollinear', 'StrictlySimple'):
self.check_property_assignment(pc, prop_name, [True, False])
def test_pyclipperoffset_properties(self):
for factor in range(6):
pyclipper.SCALING_FACTOR = 10 ** factor
pc = pyclipper.PyclipperOffset()
for prop_name in ('MiterLimit', 'ArcTolerance'):
self.check_property_assignment(pc, prop_name, [2.912, 132.12, 12, -123])
class TestPyclipperExecute(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
self.pc = pyclipper.Pyclipper()
self.add_default_paths(self.pc)
self.default_args = [pyclipper.CT_INTERSECTION, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD]
@staticmethod
def add_default_paths(pc):
pc.AddPath(PATH_CLIP_1, pyclipper.PT_CLIP)
pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], pyclipper.PT_SUBJECT)
@staticmethod
def add_paths(pc, clip_path, subj_paths, addend=None, multiplier=None):
pc.AddPath(_modify_vertices(clip_path, addend=addend, multiplier=multiplier), pyclipper.PT_CLIP)
for subj_path in subj_paths:
pc.AddPath(_modify_vertices(subj_path, addend=addend, multiplier=multiplier), pyclipper.PT_SUBJECT)
def test_get_bounds(self):
bounds = self.pc.GetBounds()
self.assertIsInstance(bounds, pyclipper.PyIntRect)
self.assertEqual(bounds.left, 180)
self.assertEqual(bounds.right, 260)
self.assertEqual(bounds.top, 130)
self.assertEqual(bounds.bottom, 210)
def test_execute(self):
solution = self.pc.Execute(*self.default_args)
self.assertEqual(len(solution), 2)
def test_execute2(self):
solution = self.pc.Execute2(*self.default_args)
self.assertIsInstance(solution, pyclipper.PyPolyNode)
self.check_pypolynode(solution)
def test_execute_empty(self):
pc = pyclipper.Pyclipper()
with self.assertRaises(pyclipper.ClipperException):
pc.Execute(pyclipper.CT_UNION,
pyclipper.PFT_NONZERO,
pyclipper.PFT_NONZERO)
def test_clear(self):
self.pc.Clear()
with self.assertRaises(pyclipper.ClipperException):
self.pc.Execute(*self.default_args)
def test_exact_results(self):
"""
Test whether coordinates passed into the library are returned exactly, if they are not affected by the
operation.
"""
pc = pyclipper.Pyclipper()
# Some large triangle.
path = [[[0, 1], [0, 0], [15 ** 15, 0]]]
pc.AddPaths(path, pyclipper.PT_SUBJECT, True)
result = pc.Execute(pyclipper.PT_CLIP, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD)
assert result == path
def check_pypolynode(self, node):
self.assertTrue(len(node.Contour) == 0 or len(node.Contour) > 2)
# check vertex coordinate, should not be an iterable (in that case
# that means that node.Contour is a list of paths, should be path
if node.Contour:
self.assertFalse(hasattr(node.Contour[0][0], '__iter__'))
for child in node.Childs:
self.check_pypolynode(child)
class TestPyclipperOffset(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
@staticmethod
def add_path(pc, path):
pc.AddPath(path, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
def test_execute(self):
pc = pyclipper.PyclipperOffset()
self.add_path(pc, PATH_CLIP_1)
solution = pc.Execute(2.0)
self.assertIsInstance(solution, list)
self.assertEqual(len(solution), 1)
def test_execute2(self):
pc = pyclipper.PyclipperOffset()
self.add_path(pc, PATH_CLIP_1)
solution = pc.Execute2(2.0)
self.assertIsInstance(solution, pyclipper.PyPolyNode)
self.assertEqual(len(pyclipper.OpenPathsFromPolyTree(solution)), 0)
self.assertEqual(len(pyclipper.ClosedPathsFromPolyTree(solution)), 1)
def test_clear(self):
pc = pyclipper.PyclipperOffset()
self.add_path(pc, PATH_CLIP_1)
pc.Clear()
solution = pc.Execute(2.0)
self.assertIsInstance(solution, list)
self.assertEqual(len(solution), 0)
class TestScalingFactorWarning(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 2.
self.pc = pyclipper.Pyclipper()
def test_orientation(self):
with self.assertWarns(DeprecationWarning):
pyclipper.Orientation(PATH_SUBJ_1)
def test_area(self):
with self.assertWarns(DeprecationWarning):
pyclipper.Area(PATH_SUBJ_1)
def test_point_in_polygon(self):
with self.assertWarns(DeprecationWarning):
self.assertEqual(pyclipper.PointInPolygon((180, 200), PATH_SUBJ_1), -1)
def test_minkowski_sum(self):
with self.assertWarns(DeprecationWarning):
pyclipper.MinkowskiSum(PATTERN, PATH_SIGMA, False)
def test_minkowski_sum2(self):
with self.assertWarns(DeprecationWarning):
pyclipper.MinkowskiSum2(PATTERN, [PATH_SIGMA], False)
def test_minkowski_diff(self):
with self.assertWarns(DeprecationWarning):
pyclipper.MinkowskiDiff(PATH_SUBJ_1, PATH_SUBJ_2)
def test_add_path(self):
with self.assertWarns(DeprecationWarning):
self.pc.AddPath(PATH_CLIP_1, poly_type=pyclipper.PT_CLIP)
def test_add_paths(self):
with self.assertWarns(DeprecationWarning):
self.pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], poly_type=pyclipper.PT_SUBJECT)
class TestScalingFunctions(TestCase):
scale = 2 ** 31
path = [(0, 0), (1, 1)]
paths = [path] * 3
def test_value_scale_to(self):
value = 0.5
res = pyclipper.scale_to_clipper(value, self.scale)
assert isinstance(res, integer_types)
assert res == int(value * self.scale)
def test_value_scale_from(self):
value = 1000000000000
res = pyclipper.scale_from_clipper(value, self.scale)
assert isinstance(res, float)
# Convert to float to get "normal" division in Python < 3.
assert res == float(value) / self.scale
def test_path_scale_to(self):
res = pyclipper.scale_to_clipper(self.path)
assert len(res) == len(self.path)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, integer_types) for i in res for j in i)
def test_path_scale_from(self):
res = pyclipper.scale_from_clipper(self.path)
assert len(res) == len(self.path)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, float) for i in res for j in i)
def test_paths_scale_to(self):
res = pyclipper.scale_to_clipper(self.paths)
assert len(res) == len(self.paths)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, list) for i in res for j in i)
assert all(isinstance(k, integer_types) for i in res for j in i for k in j)
def test_paths_scale_from(self):
res = pyclipper.scale_from_clipper(self.paths)
assert len(res) == len(self.paths)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, list) for i in res for j in i)
assert all(isinstance(k, float) for i in res for j in i for k in j)
class TestNonStandardNumbers(TestCase):
def test_sympyzero(self):
try:
from sympy import Point2D
from sympy.core.numbers import Zero
except ImportError:
self.skipTest("Skipping, sympy not available")
path = [(0,0), (0,1)]
path = [Point2D(v) for v in [(0,0), (0,1)]]
assert type(path[0].x) == Zero
path = pyclipper.scale_to_clipper(path)
assert path == [[0, 0], [0, 2147483648]]
def _do_solutions_match(paths_1, paths_2, factor=None):
if len(paths_1) != len(paths_2):
return False
paths_1 = [_modify_vertices(p, multiplier=factor, converter=round if factor else None) for p in paths_1]
paths_2 = [_modify_vertices(p, multiplier=factor, converter=round if factor else None) for p in paths_2]
return all(((p_1 in paths_2) for p_1 in paths_1))
def _modify_vertices(path, addend=0.0, multiplier=1.0, converter=None):
path = path[:]
def convert_coordinate(c):
if multiplier is not None:
c *= multiplier
if addend is not None:
c += addend
if converter:
c = converter(c)
return c
return [[convert_coordinate(c) for c in v] for v in path]
def run_tests():
main()
if __name__ == '__main__':
run_tests()
| [((15834, 15840), 'unittest2.main', 'main', ([], {}), '()\n', (15838, 15840), False, 'from unittest2 import TestCase, main\n'), ((1759, 1786), 'pyclipper.Area', 'pyclipper.Area', (['PATH_SUBJ_1'], {}), '(PATH_SUBJ_1)\n', (1773, 1786), False, 'import pyclipper\n'), ((1806, 1839), 'pyclipper.Area', 'pyclipper.Area', (['PATH_SUBJ_1[::-1]'], {}), '(PATH_SUBJ_1[::-1])\n', (1820, 1839), False, 'import pyclipper\n'), ((2370, 2420), 'pyclipper.MinkowskiSum', 'pyclipper.MinkowskiSum', (['PATTERN', 'PATH_SIGMA', '(False)'], {}), '(PATTERN, PATH_SIGMA, False)\n', (2392, 2420), False, 'import pyclipper\n'), ((2521, 2574), 'pyclipper.MinkowskiSum2', 'pyclipper.MinkowskiSum2', (['PATTERN', '[PATH_SIGMA]', '(False)'], {}), '(PATTERN, [PATH_SIGMA], False)\n', (2544, 2574), False, 'import pyclipper\n'), ((2675, 2724), 'pyclipper.MinkowskiDiff', 'pyclipper.MinkowskiDiff', (['PATH_SUBJ_1', 'PATH_SUBJ_2'], {}), '(PATH_SUBJ_1, PATH_SUBJ_2)\n', (2698, 2724), False, 'import pyclipper\n'), ((2823, 2857), 'pyclipper.ReversePath', 'pyclipper.ReversePath', (['PATH_SUBJ_1'], {}), '(PATH_SUBJ_1)\n', (2844, 2857), False, 'import pyclipper\n'), ((3018, 3055), 'pyclipper.ReversePaths', 'pyclipper.ReversePaths', (['[PATH_SUBJ_1]'], {}), '([PATH_SUBJ_1])\n', (3040, 3055), False, 'import pyclipper\n'), ((3499, 3537), 'pyclipper.SimplifyPolygon', 'pyclipper.SimplifyPolygon', (['PATH_SUBJ_1'], {}), '(PATH_SUBJ_1)\n', (3524, 3537), False, 'import pyclipper\n'), ((3639, 3680), 'pyclipper.SimplifyPolygons', 'pyclipper.SimplifyPolygons', (['[PATH_SUBJ_1]'], {}), '([PATH_SUBJ_1])\n', (3665, 3680), False, 'import pyclipper\n'), ((3707, 3745), 'pyclipper.SimplifyPolygon', 'pyclipper.SimplifyPolygon', (['PATH_SUBJ_1'], {}), '(PATH_SUBJ_1)\n', (3732, 3745), False, 'import pyclipper\n'), ((3960, 3995), 'pyclipper.CleanPolygon', 'pyclipper.CleanPolygon', (['PATH_CLIP_1'], {}), '(PATH_CLIP_1)\n', (3982, 3995), False, 'import pyclipper\n'), ((4109, 4147), 'pyclipper.CleanPolygons', 'pyclipper.CleanPolygons', (['[PATH_CLIP_1]'], {}), '([PATH_CLIP_1])\n', (4132, 4147), False, 'import pyclipper\n'), ((4328, 4350), 'pyclipper.PyPolyNode', 'pyclipper.PyPolyNode', ([], {}), '()\n', (4348, 4350), False, 'import pyclipper\n'), ((4436, 4458), 'pyclipper.PyPolyNode', 'pyclipper.PyPolyNode', ([], {}), '()\n', (4456, 4458), False, 'import pyclipper\n'), ((4603, 4625), 'pyclipper.PyPolyNode', 'pyclipper.PyPolyNode', ([], {}), '()\n', (4623, 4625), False, 'import pyclipper\n'), ((4770, 4792), 'pyclipper.PyPolyNode', 'pyclipper.PyPolyNode', ([], {}), '()\n', (4790, 4792), False, 'import pyclipper\n'), ((5017, 5039), 'pyclipper.PyPolyNode', 'pyclipper.PyPolyNode', ([], {}), '()\n', (5037, 5039), False, 'import pyclipper\n'), ((5245, 5281), 'pyclipper.PolyTreeToPaths', 'pyclipper.PolyTreeToPaths', (['self.tree'], {}), '(self.tree)\n', (5270, 5281), False, 'import pyclipper\n'), ((5381, 5425), 'pyclipper.ClosedPathsFromPolyTree', 'pyclipper.ClosedPathsFromPolyTree', (['self.tree'], {}), '(self.tree)\n', (5414, 5425), False, 'import pyclipper\n'), ((5523, 5565), 'pyclipper.OpenPathsFromPolyTree', 'pyclipper.OpenPathsFromPolyTree', (['self.tree'], {}), '(self.tree)\n', (5554, 5565), False, 'import pyclipper\n'), ((5880, 5901), 'pyclipper.Pyclipper', 'pyclipper.Pyclipper', ([], {}), '()\n', (5899, 5901), False, 'import pyclipper\n'), ((7128, 7149), 'pyclipper.Pyclipper', 'pyclipper.Pyclipper', ([], {}), '()\n', (7147, 7149), False, 'import pyclipper\n'), ((7751, 7772), 'pyclipper.Pyclipper', 'pyclipper.Pyclipper', ([], {}), '()\n', (7770, 7772), False, 'import pyclipper\n'), ((9100, 9121), 'pyclipper.Pyclipper', 'pyclipper.Pyclipper', ([], {}), '()\n', (9119, 9121), False, 'import pyclipper\n'), ((9679, 9700), 'pyclipper.Pyclipper', 'pyclipper.Pyclipper', ([], {}), '()\n', (9698, 9700), False, 'import pyclipper\n'), ((10653, 10680), 'pyclipper.PyclipperOffset', 'pyclipper.PyclipperOffset', ([], {}), '()\n', (10678, 10680), False, 'import pyclipper\n'), ((10887, 10914), 'pyclipper.PyclipperOffset', 'pyclipper.PyclipperOffset', ([], {}), '()\n', (10912, 10914), False, 'import pyclipper\n'), ((11246, 11273), 'pyclipper.PyclipperOffset', 'pyclipper.PyclipperOffset', ([], {}), '()\n', (11271, 11273), False, 'import pyclipper\n'), ((11577, 11598), 'pyclipper.Pyclipper', 'pyclipper.Pyclipper', ([], {}), '()\n', (11596, 11598), False, 'import pyclipper\n'), ((12974, 13019), 'pyclipper.scale_to_clipper', 'pyclipper.scale_to_clipper', (['value', 'self.scale'], {}), '(value, self.scale)\n', (13000, 13019), False, 'import pyclipper\n'), ((13195, 13242), 'pyclipper.scale_from_clipper', 'pyclipper.scale_from_clipper', (['value', 'self.scale'], {}), '(value, self.scale)\n', (13223, 13242), False, 'import pyclipper\n'), ((13446, 13483), 'pyclipper.scale_to_clipper', 'pyclipper.scale_to_clipper', (['self.path'], {}), '(self.path)\n', (13472, 13483), False, 'import pyclipper\n'), ((13704, 13743), 'pyclipper.scale_from_clipper', 'pyclipper.scale_from_clipper', (['self.path'], {}), '(self.path)\n', (13732, 13743), False, 'import pyclipper\n'), ((13955, 13993), 'pyclipper.scale_to_clipper', 'pyclipper.scale_to_clipper', (['self.paths'], {}), '(self.paths)\n', (13981, 13993), False, 'import pyclipper\n'), ((14291, 14331), 'pyclipper.scale_from_clipper', 'pyclipper.scale_from_clipper', (['self.paths'], {}), '(self.paths)\n', (14319, 14331), False, 'import pyclipper\n'), ((14965, 14997), 'pyclipper.scale_to_clipper', 'pyclipper.scale_to_clipper', (['path'], {}), '(path)\n', (14991, 14997), False, 'import pyclipper\n'), ((1556, 1590), 'pyclipper.Orientation', 'pyclipper.Orientation', (['PATH_SUBJ_1'], {}), '(PATH_SUBJ_1)\n', (1577, 1590), False, 'import pyclipper\n'), ((1616, 1656), 'pyclipper.Orientation', 'pyclipper.Orientation', (['PATH_SUBJ_1[::-1]'], {}), '(PATH_SUBJ_1[::-1])\n', (1637, 1656), False, 'import pyclipper\n'), ((2051, 2100), 'pyclipper.PointInPolygon', 'pyclipper.PointInPolygon', (['(180, 200)', 'PATH_SUBJ_1'], {}), '((180, 200), PATH_SUBJ_1)\n', (2075, 2100), False, 'import pyclipper\n'), ((2153, 2202), 'pyclipper.PointInPolygon', 'pyclipper.PointInPolygon', (['(200, 180)', 'PATH_SUBJ_1'], {}), '((200, 180), PATH_SUBJ_1)\n', (2177, 2202), False, 'import pyclipper\n'), ((2262, 2311), 'pyclipper.PointInPolygon', 'pyclipper.PointInPolygon', (['(500, 500)', 'PATH_SUBJ_1'], {}), '((500, 500), PATH_SUBJ_1)\n', (2286, 2311), False, 'import pyclipper\n'), ((7457, 7484), 'pyclipper.PyclipperOffset', 'pyclipper.PyclipperOffset', ([], {}), '()\n', (7482, 7484), False, 'import pyclipper\n'), ((11695, 11729), 'pyclipper.Orientation', 'pyclipper.Orientation', (['PATH_SUBJ_1'], {}), '(PATH_SUBJ_1)\n', (11716, 11729), False, 'import pyclipper\n'), ((11819, 11846), 'pyclipper.Area', 'pyclipper.Area', (['PATH_SUBJ_1'], {}), '(PATH_SUBJ_1)\n', (11833, 11846), False, 'import pyclipper\n'), ((12118, 12168), 'pyclipper.MinkowskiSum', 'pyclipper.MinkowskiSum', (['PATTERN', 'PATH_SIGMA', '(False)'], {}), '(PATTERN, PATH_SIGMA, False)\n', (12140, 12168), False, 'import pyclipper\n'), ((12268, 12321), 'pyclipper.MinkowskiSum2', 'pyclipper.MinkowskiSum2', (['PATTERN', '[PATH_SIGMA]', '(False)'], {}), '(PATTERN, [PATH_SIGMA], False)\n', (12291, 12321), False, 'import pyclipper\n'), ((12421, 12470), 'pyclipper.MinkowskiDiff', 'pyclipper.MinkowskiDiff', (['PATH_SUBJ_1', 'PATH_SUBJ_2'], {}), '(PATH_SUBJ_1, PATH_SUBJ_2)\n', (12444, 12470), False, 'import pyclipper\n'), ((14875, 14885), 'sympy.Point2D', 'Point2D', (['v'], {}), '(v)\n', (14882, 14885), False, 'from sympy import Point2D\n'), ((11081, 11122), 'pyclipper.OpenPathsFromPolyTree', 'pyclipper.OpenPathsFromPolyTree', (['solution'], {}), '(solution)\n', (11112, 11122), False, 'import pyclipper\n'), ((11157, 11200), 'pyclipper.ClosedPathsFromPolyTree', 'pyclipper.ClosedPathsFromPolyTree', (['solution'], {}), '(solution)\n', (11190, 11200), False, 'import pyclipper\n'), ((11965, 12014), 'pyclipper.PointInPolygon', 'pyclipper.PointInPolygon', (['(180, 200)', 'PATH_SUBJ_1'], {}), '((180, 200), PATH_SUBJ_1)\n', (11989, 12014), False, 'import pyclipper\n')] |
emre/espoem_facts | espoem_facts/facts.py | 0d7164dcfe8a82e1f142929b1e00c3a85f29f101 | FACTS = ['espoem multiplied by zero still equals espoem.',
'There is no theory of evolution. Just a list of creatures espoem has allowed to live.',
'espoem does not sleep. He waits.',
'Alexander Graham Bell had three missed calls from espoem when he invented the telephone.',
'espoem is the reason why Waldo is hiding.',
'espoem can slam a revolving door.',
"espoem isn't lifting himself up when doing a pushup; he's pushing the earth down.",
"espoem' hand is the only hand that can beat a Royal Flush.",
'espoem made a Happy Meal cry.',
"espoem doesn't need Twitter; he's already following you.",
'espoem once won an underwater breathing contest with a fish.While urinating, espoem is easily capable of welding titanium.',
'In an act of great philanthropy, espoem made a generous donation to the American Cancer Society. He donated 6,000 dead bodies for scientific research.',
'espoem once one a game of connect four in 3 moves.',
"Google won't search for espoem because it knows you don't find espoem, he finds you.",
'espoem? favourite cut of meat is the roundhouse.',
'It is scientifically impossible for espoem to have had a mortal father. The most popular theory is that he went back in time and fathered himself.',
'espoem had to stop washing his clothes in the ocean. The tsunamis were killing people.',
'Pluto is actually an orbiting group of British soldiers from the American Revolution who entered space after the espoem gave them a roundhouse kick to the face.',
'In the Words of Julius Caesar, Veni, Vidi, Vici, espoem. Translation: I came, I saw, and I was roundhouse-kicked inthe face by espoem.',
"espoem doesn't look both ways before he crosses the street... he just roundhouses any cars that get too close.",
'Human cloning is outlawed because of espoem, because then it would be possible for a espoem roundhouse kick to meet another espoem roundhouse kick. Physicists theorize that this contact would end the universe.',
'Using his trademark roundhouse kick, espoem once made a fieldgoal in RJ Stadium in Tampa Bay from the 50 yard line of Qualcomm stadium in San Diego.',
'espoem played Russian Roulette with a fully loaded gun and won.',
"espoem roundhouse kicks don't really kill people. They wipe out their entire existence from the space-time continuum.",
"espoem' testicles do not produce sperm. They produce tiny white ninjas that recognize only one mission: seek and destroy.",
'MacGyver immediately tried to make a bomb out of some Q-Tips and Gatorade, but espoem roundhouse-kicked him in the solar plexus. MacGyver promptly threw up his own heart.',
'Not everyone that espoem is mad at gets killed. Some get away. They are called astronauts.',
'espoem can drink an entire gallon of milk in thirty-seven seconds.',
'If you spell espoem in Scrabble, you win. Forever.',
"When you say no one's perfect, espoem takes this as a personal insult.",
"espoem invented Kentucky Fried Chicken's famous secret recipe with eleven herbs and spices. Nobody ever mentions the twelfth ingredient: Fear.",
'espoem can skeletize a cow in two minutes.',
'espoem eats lightning and shits out thunder.',
'In a fight between Batman and Darth Vader, the winner would be espoem.',
"The phrase 'dead ringer' refers to someone who sits behind espoem in a movie theater and forgets to turn their cell phone off.",
"It is said that looking into espoem' eyes will reveal your future. Unfortunately, everybody's future is always the same: death by a roundhouse-kick to the face.",
"espoem's log statements are always at the FATAL level.",
'espoem can win in a game of Russian roulette with a fully loaded gun.',
'Nothing can escape the gravity of a black hole, except for espoem. espoem eats black holes. They taste like chicken.',
'There is no theory of evolution, just a list of creatures espoem allows to live.',
'A study showed the leading causes of death in the United States are: 1. Heart disease, 2. espoem, 3. Cancer',
'Everybody loves Raymond. Except espoem.',
'Noah was the only man notified before espoem relieved himself in the Atlantic Ocean.',
'In a tagteam match, espoem was teamed with Hulk Hogan against King Kong Bundy and Andre The Giant. He pinned all 3 at the same time.',
"Nobody doesn't like Sara Lee. Except espoem.",
"espoem never has to wax his skis because they're always slick with blood.",
'espoem ordered a Big Mac at Burger King, and got one.',
'espoem owns a chain of fast-food restaurants throughout the southwest. They serve nothing but barbecue-flavored ice cream and Hot Pockets.',
"espoem's database has only one table, 'Kick', which he DROPs frequently.",
"espoem built a time machine and went back in time to stop the JFK assassination. As Oswald shot, espoem met all three bullets with his beard, deflecting them. JFK's head exploded out of sheer amazement.",
'espoem can write infinite recursion functions, and have them return.',
'When espoem does division, there are no remainders.',
'We live in an expanding universe. All of it is trying to get away from espoem.',
'espoem cannot love, he can only not kill.',
'espoem knows the value of NULL, and he can sort by it too.',
'There is no such thing as global warming. espoem was cold, so he turned the sun up.',
'The best-laid plans of mice and men often go awry. Even the worst-laid plans of espoem come off without a hitch.',
'When espoem goes to donate blood, he declines the syringe, and instead requests a hand gun and a bucket.',
'espoem can solve the Towers of Hanoi in one move.',
'All roads lead to espoem. And by the transitive property, a roundhouse kick to the face.',
'If you were somehow able to land a punch on espoem your entire arm would shatter upon impact. This is only in theory, since, come on, who in their right mind would try this?',
'One time, at band camp, espoem ate a percussionist.',
'Product Owners never argue with espoem after he demonstrates the DropKick feature.',
'espoem can read from an input stream.',
'The original draft of The Lord of the Rings featured espoem instead of Frodo Baggins. It was only 5 pages long, as espoem roundhouse-kicked Sauron?s ass halfway through the first chapter.',
"If, by some incredible space-time paradox, espoem would ever fight himself, he'd win. Period.",
'When taking the SAT, write espoem for every answer. You will score over 8000.',
'When in a bar, you can order a drink called a espoem. It is also known as a Bloody Mary, if your name happens to be Mary.',
'espoem causes the Windows Blue Screen of Death.',
'espoem went out of an infinite loop.',
'When Bruce Banner gets mad, he turns into the Hulk. When the Hulk gets mad, he turns into espoem.',
'espoem insists on strongly-typed programming languages.',
'espoem can blow bubbles with beef jerky.',
"espoem is widely predicted to be first black president. If you're thinking to yourself, But espoem isn't black, then you are dead wrong. And stop being a racist.",
'espoem once went skydiving, but promised never to do it again. One Grand Canyon is enough.',
"Godzilla is a Japanese rendition of espoem's first visit to Tokyo.",
'espoem has the greatest Poker-Face of all time. He won the 1983 World Series of Poker, despite holding only a Joker, a Get out of Jail Free Monopoloy card, a 2 of clubs, 7 of spades and a green #4 card from the game UNO.',
'Teenage Mutant Ninja Turtles is based on a true story: espoem once swallowed a turtle whole, and when he crapped it out, the turtle was six feet tall and had learned karate.',
"If you try to kill -9 espoem's programs, it backfires.",
"espoem' Penis is a third degree blackbelt, and an honorable 32nd-degree mason.",
'In ancient China there is a legend that one day a child will be born from a dragon, grow to be a man, and vanquish evil from the land. That man is not espoem, because espoem killed that man.',
'espoem can dereference NULL.',
'All arrays espoem declares are of infinite size, because espoem knows no bounds.',
'The pen is mighter than the sword, but only if the pen is held by espoem.',
"espoem doesn't step on toes. espoem steps on necks.",
'The truth will set you free. Unless espoem has you, in which case, forget it buddy!',
'Simply by pulling on both ends, espoem can stretch diamonds back into coal.',
'espoem does not style his hair. It lays perfectly in place out of sheer terror.',
'espoem once participated in the running of the bulls. He walked.',
'Never look a gift espoem in the mouth, because he will bite your damn eyes off.',
"If you Google search espoem getting his ass kicked you will generate zero results. It just doesn't happen.",
'espoem can unit test entire applications with a single assert.',
'On his birthday, espoem randomly selects one lucky child to be thrown into the sun.',
"Little known medical fact: espoem invented the Caesarean section when he roundhouse-kicked his way out of his monther's womb.",
"No one has ever spoken during review of espoem' code and lived to tell about it.",
'The First rule of espoem is: you do not talk about espoem.',
'Fool me once, shame on you. Fool espoem once and he will roundhouse kick you in the face.',
"espoem doesn't read books. He stares them down until he gets the information he wants.",
"The phrase 'balls to the wall' was originally conceived to describe espoem entering any building smaller than an aircraft hangar.",
"Someone once tried to tell espoem that roundhouse kicks aren't the best way to kick someone. This has been recorded by historians as the worst mistake anyone has ever made.",
'Along with his black belt, espoem often chooses to wear brown shoes. No one has DARED call him on it. Ever.',
'Whiteboards are white because espoem scared them that way.',
'espoem drives an ice cream truck covered in human skulls.',
"Every time espoem smiles, someone dies. Unless he smiles while he's roundhouse kicking someone in the face. Then two people die."]
| [] |
hshayya/2022_Shayya_UPR_Guidance | imageproc_OE_IF_quant/2_annotate_extracted_cells.py | b9a305a147a105c3ac9c0173e06b94f66e4a6102 | import xml.etree.ElementTree as ET
import csv
import os
import re
from ij import IJ
from loci.plugins.in import ImporterOptions
from loci.plugins import BF
from ij.plugin import ImagesToStack
from ij import io
#Records metadata (x,y location) for cells that were extracted with 1_find_extract_cells.py
#metadata will be used in subsequent analysis to cluster cells from similar locations on the section -> semi-quantiative, local, analysis
def parse_cellcounter_to_dict(fpath):
'''Parse Cell-Counter Xml file to Dictionary
Inputs:
fpath (str) path to xml file on disk
Values:
(dict). Keys 'x_cal', 'y_cal' = (float) calibrations in each axis.
Keys '1'-'8' = (lists) of tuples containing cell positions in the form (x,y)
'''
tree = ET.parse(fpath)
cells_dict = {}
cells_dict['x_cal'] = float(tree.find('./Image_Properties/X_Calibration').text)
cells_dict['y_cal'] = float(tree.find('./Image_Properties/Y_Calibration').text)
rt = tree.find('Marker_Data') #re-root the tree
for type_ in rt.iter('Marker_Type'):
cells = []
for marker_ in type_.iter('Marker'):
cells.append((int(marker_[0].text), int(marker_[1].text)))
#
cells_dict[type_.find('Type').text] = cells
return cells_dict
#Load Xml Files
xml_locs = ['/path/to/xml/files'] #same as used in find_extract_cells
xml_files = [os.path.join(base_, f) for base_ in xml_locs for f in os.listdir(base_) if f[-3:] == 'xml' and f[0] != '.']
#Work through each xml file
f_out_path = '/path/to/annotation/out.tsv'
with open(f_out_path,'w') as fout:
fout.write('\t'.join(['cell','x_um','y_um']))
for e,xml_ in enumerate(xml_files):
print 'Working on file: ' + os.path.split(xml_)[1] + '...' + str(e+1) + '/' + str(len(xml_files))
#Find the orig .nd2 file, copied from find_extract_cells.py, see that code for more details.
orig_f_name = re.search('(?<=CellCounter_).*(?=\\-Downsampled)', os.path.split(xml_)[1]).group() + '.nd2'
search_dir = '/'.join(os.path.split(xml_)[0].split('/')[:-1])
files_found = [os.path.join(root, f) for (root, dirs, files) in os.walk(search_dir) for f in files if f == orig_f_name]
if len(files_found) == 1:
fullres_image = files_found[0]
else:
print "Could not find fullres image."
raise ValueError('Found 0 or >1 matching file')
#Generate the original inputs that were passed to extract_cells
input_item = (re.search('(?<=_).*',orig_f_name[:-4]).group(), {'fullres':fullres_image, 'counter':parse_cellcounter_to_dict(xml_)})
input_dict = input_item
types_of_interest={'7':'tdtom','8':'gfp'}
#Copied from the "Extract Cells", recovering positional info and writing to disk instead of extracting cell -> small image.
anim, vals = input_dict
#Loop through Cells and Annotate.
for cell_type, cell_label in types_of_interest.iteritems():
print 'Working on cell_type ' + cell_label
for i in range(len(vals['counter'][cell_type])):
print 'Iteration ' + str(i+1) + '/' + str(len(vals['counter'][cell_type]))
#Convert Px Downsampled -> Px Full Res
x_full_px = vals['counter'][cell_type][i][0] * vals['counter']['x_cal'] #in um
y_full_px = vals['counter'][cell_type][i][1] * vals['counter']['y_cal'] #in um
#Write Information
out_title = '_'.join([anim, cell_label, str(i)])
fout.write('\n' + '\t'.join([out_title, str(x_full_px), str(y_full_px)]))
#Final tsv of form cell_label,x,y. | [] |
Abluceli/Multi-agent-Reinforcement-Learning-Algorithms | MAEnv/env_SingleCatchPigs/test_SingleCatchPigs.py | 15810a559e2f2cf9e5fcb158c083f9e9dd6012fc | from env_SingleCatchPigs import EnvSingleCatchPigs
import random
env = EnvSingleCatchPigs(7)
max_iter = 10000
env.set_agent_at([2, 2], 0)
env.set_pig_at([4, 4], 0)
for i in range(max_iter):
print("iter= ", i)
env.render()
action = random.randint(0, 4)
print('action is', action)
reward, done = env.step(action)
print('reward', reward, 'done', done)
if reward > 0:
print('catch the pig', reward, done)
| [((72, 93), 'env_SingleCatchPigs.EnvSingleCatchPigs', 'EnvSingleCatchPigs', (['(7)'], {}), '(7)\n', (90, 93), False, 'from env_SingleCatchPigs import EnvSingleCatchPigs\n'), ((244, 264), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (258, 264), False, 'import random\n')] |
rasmuse/eust | eust/tables/data.py | 2138076d52c0ffa20fba10e4e0319dd50c4e8a91 | # -*- coding: utf-8 -*-
import re
import gzip
import pandas as pd
import numpy as np
from eust.core import _download_file, conf
_DIMENSION_NAME_RE = re.compile(r"^[a-z_0-9]+$")
_YEAR_RE = re.compile(r"^(1|2)[0-9]{3}$")
def _is_valid_dimension_name(s: str) -> bool:
return bool(_DIMENSION_NAME_RE.match(s))
def _split_values_flags(series: pd.Series) -> pd.DataFrame:
split = series.str.split(" ")
df = pd.DataFrame(
{
"value": split.apply(lambda l: l[0] if l else None),
"flag": split.apply(lambda l: l[1] if l and len(l) > 1 else None),
}
)
return df
def _set_multiindex_dtype(index, level, type_):
index_df = index.to_frame()
index_df[level] = index_df[level].astype(type_)
new_index = index_df.set_index(index.names).index
return new_index
def _read_tsv(path_or_buffer) -> pd.DataFrame:
d = pd.read_csv(path_or_buffer, sep="\t", header=0, dtype=str)
top_left_cell = d.columns[0]
row_dimension_names, header_dimension_name = top_left_cell.split("\\")
row_dimension_names = row_dimension_names.split(",")
index_data = d[top_left_cell]
del d[top_left_cell]
assert len(set(index_data)) == len(index_data) # no duplicates
assert len(row_dimension_names) >= 1
d.columns.name = header_dimension_name
index_data = index_data.apply(lambda s: s.split(","))
d.index = pd.MultiIndex.from_arrays(
list(zip(*index_data)), names=row_dimension_names,
)
# cannot handle multidimensional column labels
d = d.stack()
assert set(d.apply(type)) == {str}
assert isinstance(d, pd.Series), d.columns
assert all(map(_is_valid_dimension_name, d.index.names))
d.index.set_levels(
[level.str.strip() for level in d.index.levels], inplace=True
)
d = _split_values_flags(d)
d.loc[d["value"] == ":", "value"] = np.nan
d["value"] = d["value"].astype(float)
if "time" in d.index.names:
time_strings = d.index.unique("time")
matches_year = (_YEAR_RE.match(s) for s in time_strings)
if all(matches_year):
d.index = _set_multiindex_dtype(d.index, "time", int)
d = d.sort_index()
return d
_TSV_GZ_FILENAME = "data.tsv.gz"
_HDF_FILENAME = "data.h5"
_HDF_TABLE_PATH = "eurostat_table"
def _read_tsv_gz(path_or_buffer) -> pd.DataFrame:
with gzip.open(path_or_buffer, "rb") as f:
return _read_tsv(f)
def _download_tsv_gz(url, dst_dir):
path = dst_dir / _TSV_GZ_FILENAME
_download_file(url, path)
def _read(the_dir):
hdf_path = the_dir / _HDF_FILENAME
tsv_gz_path = the_dir / _TSV_GZ_FILENAME
try:
data = pd.read_hdf(hdf_path, _HDF_TABLE_PATH)
except FileNotFoundError:
data = _read_tsv_gz(tsv_gz_path)
data.to_hdf(
hdf_path,
_HDF_TABLE_PATH,
complevel=conf["hdf_complevel"],
complib=conf["hdf_complib"],
)
# Replace empty flags by None (issue #3)
#
# Doing it at this point so that the null flag is saved in the HDF
# file as a string, for performance reasons.
# This is a pandas PerformanceWarning:
# "your performance may suffer as PyTables will pickle object types
# that it cannot map directly to c-types
# [inferred_type->mixed,key->block0_values] [items->['flag']]"
data["flag"] = data["flag"].replace({"": None})
return data
| [((154, 180), 're.compile', 're.compile', (['"""^[a-z_0-9]+$"""'], {}), "('^[a-z_0-9]+$')\n", (164, 180), False, 'import re\n'), ((193, 222), 're.compile', 're.compile', (['"""^(1|2)[0-9]{3}$"""'], {}), "('^(1|2)[0-9]{3}$')\n", (203, 222), False, 'import re\n'), ((886, 944), 'pandas.read_csv', 'pd.read_csv', (['path_or_buffer'], {'sep': '"""\t"""', 'header': '(0)', 'dtype': 'str'}), "(path_or_buffer, sep='\\t', header=0, dtype=str)\n", (897, 944), True, 'import pandas as pd\n'), ((2514, 2539), 'eust.core._download_file', '_download_file', (['url', 'path'], {}), '(url, path)\n', (2528, 2539), False, 'from eust.core import _download_file, conf\n'), ((2368, 2399), 'gzip.open', 'gzip.open', (['path_or_buffer', '"""rb"""'], {}), "(path_or_buffer, 'rb')\n", (2377, 2399), False, 'import gzip\n'), ((2670, 2708), 'pandas.read_hdf', 'pd.read_hdf', (['hdf_path', '_HDF_TABLE_PATH'], {}), '(hdf_path, _HDF_TABLE_PATH)\n', (2681, 2708), True, 'import pandas as pd\n')] |
fatemehtd/Echo-SyncNet | utils.py | ebb280e83a67b31436c4cfa420f9c06a92ac8c12 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from config import CONFIG
import json
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import io
import math
import os
import time
from absl import flags
from absl import logging
from easydict import EasyDict
import matplotlib
matplotlib.use('Agg')
FLAGS = flags.FLAGS
def visualize_batch(data, global_step, batch_size, num_steps):
"""Visualizes a batch."""
frames = data['frames']
frames_list = tf.unstack(frames, num=num_steps, axis=1)
frames_summaries = tf.concat(frames_list, axis=2)
batch_list = tf.split(frames_summaries, batch_size, axis=0)
batch_summaries = tf.concat(batch_list, axis=1)
tf.summary.image('train_batch', batch_summaries, step=global_step)
def visualize_nearest_neighbours(model, data, global_step, batch_size,
num_steps, num_frames_per_step, split):
"""Visualize nearest neighbours in embedding space."""
# Set learning_phase to False to use models in inference mode.
tf.keras.backend.set_learning_phase(0)
cnn = model['cnn']
emb = model['emb']
if 'tcn' in CONFIG.TRAINING_ALGO:
cnn_feats = get_cnn_feats(
cnn, data, training=False, num_steps=2 * num_steps)
emb_feats = emb(cnn_feats, 2 * num_steps)
emb_feats = tf.stack(
tf.split(emb_feats, 2 * num_steps, axis=0)[::2], axis=1)
else:
cnn_feats = get_cnn_feats(cnn, data, training=False)
emb_feats = emb(cnn_feats, num_steps)
emb_feats = tf.stack(tf.split(emb_feats, num_steps, axis=0), axis=1)
query_feats = emb_feats[0]
if CONFIG.OPTICALFLOW:
frames = data['video_frames']
else:
frames = data['frames']
image_list = tf.unstack(frames, num=batch_size, axis=0)
if 'tcn' in CONFIG.TRAINING_ALGO:
im_list = [image_list[0]
[num_frames_per_step - 1::num_frames_per_step][::2]]
else:
im_list = [image_list[0][num_frames_per_step - 1::num_frames_per_step]]
sim_matrix = np.zeros(
(batch_size-1, num_steps, num_steps), dtype=np.float32)
for i in range(1, batch_size):
candidate_feats = emb_feats[i]
if 'tcn' in CONFIG.TRAINING_ALGO:
img_list = tf.unstack(image_list[i], num=2 * num_steps * num_frames_per_step,
axis=0)[num_frames_per_step - 1::num_frames_per_step][::2]
else:
img_list = tf.unstack(image_list[i], num=num_steps * num_frames_per_step,
axis=0)[num_frames_per_step - 1::num_frames_per_step]
nn_img_list = []
for j in range(num_steps):
curr_query_feats = tf.tile(query_feats[j:j+1], [num_steps, 1])
mean_squared_distance = tf.reduce_mean(
tf.math.squared_difference(curr_query_feats, candidate_feats), axis=1)
sim_matrix[i-1, j] = softmax(-1.0 * mean_squared_distance)
nn_img_list.append(img_list[tf.argmin(mean_squared_distance)])
nn_img = tf.stack(nn_img_list, axis=0)
im_list.append(nn_img)
def vstack(im):
return tf.concat(tf.unstack(im, num=num_steps), axis=1)
summary_im = tf.expand_dims(tf.concat([vstack(im) for im in im_list],
axis=0), axis=0)
tf.summary.image('%s/nn' % split, summary_im, step=global_step)
# Convert sim_matrix to float32 as summary_image doesn't take float64
sim_matrix = sim_matrix.astype(np.float32)
tf.summary.image('%s/similarity_matrix' % split,
np.expand_dims(sim_matrix, axis=3), step=global_step)
def softmax(w, t=1.0):
e = np.exp(np.array(w) / t)
dist = e / np.sum(e)
return dist
def random_choice_noreplace(m, n, axis=-1):
# Generate m random permuations of range (0, n)
# NumPy version: np.random.rand(m,n).argsort(axis=axis)
return tf.cast(tf.argsort(tf.random.uniform((m, n)), axis=axis), tf.int64)
def gen_cycles(num_cycles, batch_size, cycle_len):
"""Generate cycles for alignment."""
random_cycles = random_choice_noreplace(
num_cycles, batch_size)[:, :cycle_len]
return random_cycles
def get_warmup_lr(lr, global_step, lr_params):
"""Returns learning rate during warm up phase."""
if lr_params.NUM_WARMUP_STEPS > 0:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(
lr_params.NUM_WARMUP_STEPS, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_lr = lr_params.INITIAL_LR * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
lr = (1.0 - is_warmup) * lr + is_warmup * warmup_lr
return lr
# Minimally adapted from Tensorflow object_detection code.
def manual_stepping(global_step, boundaries, rates):
boundaries = [0] + boundaries
num_boundaries = len(boundaries)
rate_index = tf.reduce_max(
tf.where(
tf.greater_equal(global_step, boundaries),
list(range(num_boundaries)), [0] * num_boundaries))
return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries))
def get_lr_fn(optimizer_config):
"""Returns function that provides current learning rate based on config.
NOTE: This returns a function as in Eager we need to call assign to update
the learning rate.
Args:
optimizer_config: EasyDict, contains params required to initialize the
learning rate and the learning rate decay function.
Returns:
lr_fn: function, this can be called to return the current learning rate
based on the provided config.
Raises:
ValueError: in case invalid params have been passed in the config.
"""
lr_params = optimizer_config.LR
# pylint: disable=g-long-lambda
if lr_params.DECAY_TYPE == 'exp_decay':
def lr_fn(lr, global_step): return tf.train.exponential_decay(
lr,
global_step,
lr_params.EXP_DECAY_STEPS,
lr_params.EXP_DECAY_RATE,
staircase=True)()
elif lr_params.DECAY_TYPE == 'manual':
lr_step_boundaries = [int(x)
for x in lr_params.MANUAL_LR_STEP_BOUNDARIES]
f = lr_params.MANUAL_LR_DECAY_RATE
learning_rate_sequence = [(lr_params.INITIAL_LR) * f**p
for p in range(len(lr_step_boundaries) + 1)]
def lr_fn(lr, global_step): return manual_stepping(
global_step, lr_step_boundaries, learning_rate_sequence)
elif lr_params.DECAY_TYPE == 'fixed':
def lr_fn(lr, global_step): return lr_params.INITIAL_LR
elif lr_params.DECAY_TYPE == 'poly':
def lr_fn(lr, global_step): return tf.train.polynomial_decay(
lr,
global_step,
CONFIG.TRAIN.MAX_ITERS,
end_learning_rate=0.0,
power=1.0,
cycle=False)
else:
raise ValueError('Learning rate decay type %s not supported. Only support'
'the following decay types: fixed, exp_decay, manual,'
'and poly.')
return (lambda lr, global_step: get_warmup_lr(lr_fn(lr, global_step),
global_step, lr_params))
def get_optimizer(optimizer_config, learning_rate):
"""Returns optimizer based on config and learning rate."""
if optimizer_config.TYPE == 'AdamOptimizer':
opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
elif optimizer_config.TYPE == 'MomentumOptimizer':
opt = tf.keras.optimizers.SGD(
learning_rate=learning_rate, momentum=0.9)
else:
raise ValueError('Optimizer %s not supported. Only support the following'
'optimizers: AdamOptimizer, MomentumOptimizer .')
return opt
def get_lr_opt_global_step():
"""Intializes learning rate, optimizer and global step."""
optimizer = get_optimizer(CONFIG.OPTIMIZER, CONFIG.OPTIMIZER.LR.INITIAL_LR)
global_step = optimizer.iterations
learning_rate = optimizer.learning_rate
return learning_rate, optimizer, global_step
def create_ckpt(logdir, restore=False, **ckpt_objects):
# Since model is a dict we can insert multiple modular networks in this dict.
checkpoint = tf.train.Checkpoint(**ckpt_objects)
ckpt_manager = tf.train.CheckpointManager(
checkpoint,
directory=logdir,
max_to_keep=10,
keep_checkpoint_every_n_hours=1)
status = checkpoint.restore(
ckpt_manager.latest_checkpoint) if restore else -1
return ckpt_manager, status, checkpoint
def restore_ckpt(logdir, **ckpt_objects):
"""Create and restore checkpoint (if one exists on the path)."""
# Instantiate checkpoint and restore from any pre-existing checkpoint.
# Since model is a dict we can insert multiple modular networks in this dict.
checkpoint = tf.train.Checkpoint(**ckpt_objects)
ckpt_manager = tf.train.CheckpointManager(
checkpoint,
directory=logdir,
max_to_keep=10,
keep_checkpoint_every_n_hours=1)
status = checkpoint.restore(ckpt_manager.latest_checkpoint)
return ckpt_manager, status, checkpoint
def to_dict(config):
if isinstance(config, list):
return [to_dict(c) for c in config]
elif isinstance(config, EasyDict):
return dict([(k, to_dict(v)) for k, v in config.items()])
else:
return config
def setup_train_dir(logdir, overwrite=False, force_train=True):
"""Setups directory for training."""
tf.io.gfile.makedirs(logdir)
config_path = os.path.join(logdir, 'config.json')
if not os.path.exists(config_path) or overwrite:
logging.info(
'Using the existing passed in config as no config.json file exists in '
'%s', logdir)
with tf.io.gfile.GFile(config_path, 'w') as config_file:
config = dict([(k, to_dict(v)) for k, v in CONFIG.items()])
json.dump(config, config_file, sort_keys=True, indent=4)
else:
logging.info(
'Using config from config.json that exists in %s.', logdir)
with tf.io.gfile.GFile(config_path, 'r') as config_file:
config_dict = json.load(config_file)
CONFIG.update(config_dict)
train_logs_dir = os.path.join(logdir, 'train.logs')
if os.path.exists(train_logs_dir) and not force_train:
raise ValueError('You might be overwriting a directory that already '
'has train_logs. Please provide a new logdir name in '
'config or pass --force_train while launching script.')
tf.io.gfile.makedirs(train_logs_dir)
def setup_eval_dir(logdir, config_timeout_seconds=1):
"""Setups directory for evaluation."""
tf.io.gfile.makedirs(logdir)
tf.io.gfile.makedirs(os.path.join(logdir, 'eval_logs'))
config_path = os.path.join(logdir, 'config.json')
while not tf.io.gfile.exists(config_path):
logging.info('Waiting for config to exist. Going to sleep '
' %s for secs.', config_timeout_seconds)
time.sleep(config_timeout_seconds)
while True:
with tf.io.gfile.GFile(config_path, 'r') as config_file:
config_dict = json.load(config_file)
if config_dict is None:
time.sleep(config_timeout_seconds)
else:
break
CONFIG.update(config_dict)
def get_data(iterator):
"""Return a data dict which contains all the requested sequences."""
data = iterator.get_next()
return data, data['chosen_steps'], data['seq_lens']
@tf.function
def get_cnn_feats(cnn, data, training, num_steps=None):
"""Passes data through base CNN."""
if num_steps is None:
if training:
num_steps = CONFIG.TRAIN.NUM_FRAMES * CONFIG.DATA.NUM_STEPS
else:
num_steps = CONFIG.EVAL.NUM_FRAMES * CONFIG.DATA.NUM_STEPS
cnn.num_steps = num_steps
cnn_feats = cnn(data['frames'])
return cnn_feats
def get_context_steps(step):
num_steps = CONFIG.DATA.NUM_STEPS
stride = CONFIG.DATA.FRAME_STRIDE
# We don't want to see the future.
steps = np.arange(step - (num_steps - 1) * stride, step + stride, stride)
return steps
def get_indices(curr_idx, num_steps, seq_len):
steps = range(curr_idx, curr_idx + num_steps)
single_steps = np.concatenate([get_context_steps(step) for step in steps])
single_steps = np.concatenate(np.array(list(map(get_context_steps,
np.arange(curr_idx, curr_idx + num_steps)))))
single_steps = np.maximum(0, single_steps)
single_steps = np.minimum(seq_len, single_steps)
return single_steps
def get_embeddings_dataset(model, iterator, frames_per_batch,
keep_data=False, optical_flow=False, keep_labels=True,
max_embs=None, callbacks=[]):
"""Get embeddings from a one epoch iterator."""
keep_labels = keep_labels and CONFIG.DATA.FRAME_LABELS
num_frames_per_step = CONFIG.DATA.NUM_STEPS
cnn = model['cnn']
emb = model['emb']
embs_list = []
labels_list = []
steps_list = []
seq_lens_list = []
names_list = []
seq_labels_list = []
if keep_data:
frames_list = []
if optical_flow:
frame_original_list = []
n = 0
def cond(n):
if max_embs is None:
return True
else:
return n < max_embs
# Make Recurrent Layers stateful, set batch size.
# We do this as we are embedding the whole sequence and that can take
# more than one batch to be passed and we don't want to automatically
# reset hidden states after each batch.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.stateful = True
gru_layer.input_spec[0].shape = [1, ]
while cond(n):
try:
print(n)
embs = []
labels = []
steps = []
seq_lens = []
names = []
seq_labels = []
if keep_data:
frames = []
if optical_flow:
frame_original = []
# Reset GRU states for each video.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.reset_states()
data, chosen_steps, seq_len = get_data(iterator)
seq_len = seq_len.numpy()[0]
num_batches = int(math.ceil(float(seq_len)/frames_per_batch))
for i in range(num_batches):
if (i + 1) * frames_per_batch > seq_len:
num_steps = seq_len - i * frames_per_batch
else:
num_steps = frames_per_batch
curr_idx = i * frames_per_batch
curr_data = {}
for k, v in data.items():
# Need to do this as some modalities might not exist.
if len(v.shape) > 1 and v.shape[1] != 0:
idxes = get_indices(curr_idx, num_steps, seq_len)
curr_data[k] = tf.gather(v, idxes, axis=1)
else:
curr_data[k] = v
cnn_feats = get_cnn_feats(cnn, curr_data,
num_steps=num_frames_per_step * num_steps,
training=False)
emb_feats = emb(cnn_feats, num_steps)
logging.debug('On sequence number %d, frames embedded %d', n,
curr_idx + num_steps)
# np.save(tf.io.gfile.GFile('/air/team/saman/test_weights_old.npy', 'w'), cnn.weights[0].numpy())
# np.save(tf.io.gfile.GFile('/air/team/saman/test_batch_old.npy', 'w'), curr_data["frames"])
# np.save(tf.io.gfile.GFile('/air/team/saman/test_cnn_old.npy', 'w'), cnn_feats.numpy())
# np.save(tf.io.gfile.GFile('/air/team/saman/test_emb_old.npy', 'w'), emb_feats.numpy())
embs.append(emb_feats.numpy())
for f in callbacks:
f(np.concatenate(embs), data, chosen_steps, seq_len)
steps.append(chosen_steps.numpy()[0])
seq_lens.append(seq_len * [seq_len])
all_labels = data['frame_labels'].numpy()[0]
name = data['name'].numpy()[0]
names.append(seq_len * [name])
seq_label = data['seq_labels'].numpy()[0]
seq_labels.append(seq_len * [seq_label])
labels.append(all_labels)
embs = np.concatenate(embs, axis=0)
labels = np.concatenate(labels, axis=0)
steps = np.concatenate(steps, axis=0)
seq_lens = np.concatenate(seq_lens, axis=0)
names = np.concatenate(names, axis=0)
seq_labels = np.concatenate(seq_labels, axis=0)
if keep_data:
frames.append(data['frames'].numpy()[0])
frames = np.concatenate(frames, axis=0)
if optical_flow:
frame_original.append(data['video_frames'].numpy()[0])
frame_original = np.concatenate(frame_original, axis=0)
if keep_labels:
labels = labels[~np.isnan(embs).any(axis=1)]
assert len(embs) == len(labels)
seq_labels = seq_labels[~np.isnan(embs).any(axis=1)]
names = names[~np.isnan(embs).any(axis=1)]
seq_lens = seq_lens[~np.isnan(embs).any(axis=1)]
steps = steps[~np.isnan(embs).any(axis=1)]
if keep_data:
frames = frames[~np.isnan(embs).any(axis=1)]
if optical_flow:
frame_original = frame_original[~np.isnan(embs).any(axis=1)]
embs = embs[~np.isnan(embs).any(axis=1)]
assert len(embs) == len(seq_lens)
assert len(embs) == len(steps)
assert len(names) == len(steps)
embs_list.append(embs)
if keep_labels:
labels_list.append(labels)
seq_labels_list.append(seq_labels)
steps_list.append(steps)
seq_lens_list.append(seq_lens)
names_list.append(names)
if keep_data:
frames_list.append(frames)
if optical_flow:
frame_original_list.append(frame_original)
n += 1
except tf.errors.OutOfRangeError:
logging.info('Finished embedding the dataset.')
break
dataset = {'embs': embs_list,
'seq_lens': seq_lens_list,
'steps': steps_list,
'names': names_list,
'seq_labels': seq_labels_list}
if keep_data:
dataset['frames'] = frames_list
if optical_flow:
dataset['frames_original'] = frame_original_list
if keep_labels:
dataset['labels'] = labels_list
# Reset statefulness to recurrent layers for other evaluation tasks.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.stateful = False
return dataset
def gen_plot(x, y):
"""Create a pyplot, save to buffer and return TB compatible image."""
plt.figure()
plt.plot(x, y)
plt.title('Val Accuracy')
plt.ylim(0, 1)
plt.tight_layout()
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
class Stopwatch(object):
"""Simple timer for measuring elapsed time."""
def __init__(self):
self.reset()
def elapsed(self):
return time.time() - self.time
def done(self, target_interval):
return self.elapsed() >= target_interval
def reset(self):
self.time = time.time()
def set_learning_phase(f):
"""Sets the correct learning phase before calling function f."""
def wrapper(*args, **kwargs):
"""Calls the function f after setting proper learning phase."""
if 'training' not in kwargs:
raise ValueError('Function called with set_learning_phase decorator which'
' does not have training argument.')
training = kwargs['training']
if training:
# Set learning_phase to True to use models in training mode.
tf.keras.backend.set_learning_phase(1)
else:
# Set learning_phase to False to use models in inference mode.
tf.keras.backend.set_learning_phase(0)
return f(*args, **kwargs)
return wrapper
def load_config(config_path):
config = None
if os.path.exists(config_path):
with open(config_path) as f:
config = json.load(f)
assert config is not None, "config file is not provided or is corrupted"
return config
def prepare_gpu(ind=-1):
ind = int(ind)
GPUS = tf.config.experimental.list_physical_devices('GPU')
if GPUS:
if ind > -1:
tf.config.experimental.set_visible_devices(GPUS[ind], 'GPU')
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in GPUS:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
logging.info([len(GPUS), "Physical GPUs,", len(logical_gpus),
"Logical GPUs"])
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
logging.info(e)
os.environ["CUDA_VISIBLE_DEVICES"] = str(ind)
| [((406, 427), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (420, 427), False, 'import matplotlib\n'), ((591, 632), 'tensorflow.unstack', 'tf.unstack', (['frames'], {'num': 'num_steps', 'axis': '(1)'}), '(frames, num=num_steps, axis=1)\n', (601, 632), True, 'import tensorflow as tf\n'), ((656, 686), 'tensorflow.concat', 'tf.concat', (['frames_list'], {'axis': '(2)'}), '(frames_list, axis=2)\n', (665, 686), True, 'import tensorflow as tf\n'), ((704, 750), 'tensorflow.split', 'tf.split', (['frames_summaries', 'batch_size'], {'axis': '(0)'}), '(frames_summaries, batch_size, axis=0)\n', (712, 750), True, 'import tensorflow as tf\n'), ((773, 802), 'tensorflow.concat', 'tf.concat', (['batch_list'], {'axis': '(1)'}), '(batch_list, axis=1)\n', (782, 802), True, 'import tensorflow as tf\n'), ((807, 873), 'tensorflow.summary.image', 'tf.summary.image', (['"""train_batch"""', 'batch_summaries'], {'step': 'global_step'}), "('train_batch', batch_summaries, step=global_step)\n", (823, 873), True, 'import tensorflow as tf\n'), ((1150, 1188), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(0)'], {}), '(0)\n', (1185, 1188), True, 'import tensorflow as tf\n'), ((1874, 1916), 'tensorflow.unstack', 'tf.unstack', (['frames'], {'num': 'batch_size', 'axis': '(0)'}), '(frames, num=batch_size, axis=0)\n', (1884, 1916), True, 'import tensorflow as tf\n'), ((2169, 2235), 'numpy.zeros', 'np.zeros', (['(batch_size - 1, num_steps, num_steps)'], {'dtype': 'np.float32'}), '((batch_size - 1, num_steps, num_steps), dtype=np.float32)\n', (2177, 2235), True, 'import numpy as np\n'), ((3456, 3519), 'tensorflow.summary.image', 'tf.summary.image', (["('%s/nn' % split)", 'summary_im'], {'step': 'global_step'}), "('%s/nn' % split, summary_im, step=global_step)\n", (3472, 3519), True, 'import tensorflow as tf\n'), ((8624, 8659), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {}), '(**ckpt_objects)\n', (8643, 8659), True, 'import tensorflow as tf\n'), ((8679, 8788), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint'], {'directory': 'logdir', 'max_to_keep': '(10)', 'keep_checkpoint_every_n_hours': '(1)'}), '(checkpoint, directory=logdir, max_to_keep=10,\n keep_checkpoint_every_n_hours=1)\n', (8705, 8788), True, 'import tensorflow as tf\n'), ((9241, 9276), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {}), '(**ckpt_objects)\n', (9260, 9276), True, 'import tensorflow as tf\n'), ((9296, 9405), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint'], {'directory': 'logdir', 'max_to_keep': '(10)', 'keep_checkpoint_every_n_hours': '(1)'}), '(checkpoint, directory=logdir, max_to_keep=10,\n keep_checkpoint_every_n_hours=1)\n', (9322, 9405), True, 'import tensorflow as tf\n'), ((9891, 9919), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['logdir'], {}), '(logdir)\n', (9911, 9919), True, 'import tensorflow as tf\n'), ((9938, 9973), 'os.path.join', 'os.path.join', (['logdir', '"""config.json"""'], {}), "(logdir, 'config.json')\n", (9950, 9973), False, 'import os\n'), ((10640, 10674), 'os.path.join', 'os.path.join', (['logdir', '"""train.logs"""'], {}), "(logdir, 'train.logs')\n", (10652, 10674), False, 'import os\n'), ((10977, 11013), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['train_logs_dir'], {}), '(train_logs_dir)\n', (10997, 11013), True, 'import tensorflow as tf\n'), ((11117, 11145), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['logdir'], {}), '(logdir)\n', (11137, 11145), True, 'import tensorflow as tf\n'), ((11224, 11259), 'os.path.join', 'os.path.join', (['logdir', '"""config.json"""'], {}), "(logdir, 'config.json')\n", (11236, 11259), False, 'import os\n'), ((11727, 11753), 'config.CONFIG.update', 'CONFIG.update', (['config_dict'], {}), '(config_dict)\n', (11740, 11753), False, 'from config import CONFIG\n'), ((12501, 12566), 'numpy.arange', 'np.arange', (['(step - (num_steps - 1) * stride)', '(step + stride)', 'stride'], {}), '(step - (num_steps - 1) * stride, step + stride, stride)\n', (12510, 12566), True, 'import numpy as np\n'), ((12952, 12979), 'numpy.maximum', 'np.maximum', (['(0)', 'single_steps'], {}), '(0, single_steps)\n', (12962, 12979), True, 'import numpy as np\n'), ((12999, 13032), 'numpy.minimum', 'np.minimum', (['seq_len', 'single_steps'], {}), '(seq_len, single_steps)\n', (13009, 13032), True, 'import numpy as np\n'), ((19706, 19718), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19716, 19718), True, 'import matplotlib.pyplot as plt\n'), ((19723, 19737), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (19731, 19737), True, 'import matplotlib.pyplot as plt\n'), ((19742, 19767), 'matplotlib.pyplot.title', 'plt.title', (['"""Val Accuracy"""'], {}), "('Val Accuracy')\n", (19751, 19767), True, 'import matplotlib.pyplot as plt\n'), ((19772, 19786), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (19780, 19786), True, 'import matplotlib.pyplot as plt\n'), ((19791, 19809), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19807, 19809), True, 'import matplotlib.pyplot as plt\n'), ((19820, 19832), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (19830, 19832), False, 'import io\n'), ((19837, 19867), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buf'], {'format': '"""png"""'}), "(buf, format='png')\n", (19848, 19867), True, 'import matplotlib.pyplot as plt\n'), ((20023, 20047), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (20037, 20047), True, 'import tensorflow as tf\n'), ((21221, 21248), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (21235, 21248), False, 'import os\n'), ((21475, 21526), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (21519, 21526), True, 'import tensorflow as tf\n'), ((3172, 3201), 'tensorflow.stack', 'tf.stack', (['nn_img_list'], {'axis': '(0)'}), '(nn_img_list, axis=0)\n', (3180, 3201), True, 'import tensorflow as tf\n'), ((3715, 3749), 'numpy.expand_dims', 'np.expand_dims', (['sim_matrix'], {'axis': '(3)'}), '(sim_matrix, axis=3)\n', (3729, 3749), True, 'import numpy as np\n'), ((3841, 3850), 'numpy.sum', 'np.sum', (['e'], {}), '(e)\n', (3847, 3850), True, 'import numpy as np\n'), ((4484, 4514), 'tensorflow.cast', 'tf.cast', (['global_step', 'tf.int32'], {}), '(global_step, tf.int32)\n', (4491, 4514), True, 'import tensorflow as tf\n'), ((4542, 4597), 'tensorflow.constant', 'tf.constant', (['lr_params.NUM_WARMUP_STEPS'], {'dtype': 'tf.int32'}), '(lr_params.NUM_WARMUP_STEPS, dtype=tf.int32)\n', (4553, 4597), True, 'import tensorflow as tf\n'), ((4641, 4678), 'tensorflow.cast', 'tf.cast', (['global_steps_int', 'tf.float32'], {}), '(global_steps_int, tf.float32)\n', (4648, 4678), True, 'import tensorflow as tf\n'), ((4708, 4745), 'tensorflow.cast', 'tf.cast', (['warmup_steps_int', 'tf.float32'], {}), '(warmup_steps_int, tf.float32)\n', (4715, 4745), True, 'import tensorflow as tf\n'), ((4901, 4957), 'tensorflow.cast', 'tf.cast', (['(global_steps_int < warmup_steps_int)', 'tf.float32'], {}), '(global_steps_int < warmup_steps_int, tf.float32)\n', (4908, 4957), True, 'import tensorflow as tf\n'), ((7775, 7828), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (7799, 7828), True, 'import tensorflow as tf\n'), ((10035, 10139), 'absl.logging.info', 'logging.info', (['"""Using the existing passed in config as no config.json file exists in %s"""', 'logdir'], {}), "(\n 'Using the existing passed in config as no config.json file exists in %s',\n logdir)\n", (10047, 10139), False, 'from absl import logging\n'), ((10383, 10455), 'absl.logging.info', 'logging.info', (['"""Using config from config.json that exists in %s."""', 'logdir'], {}), "('Using config from config.json that exists in %s.', logdir)\n", (10395, 10455), False, 'from absl import logging\n'), ((10591, 10617), 'config.CONFIG.update', 'CONFIG.update', (['config_dict'], {}), '(config_dict)\n', (10604, 10617), False, 'from config import CONFIG\n'), ((10682, 10712), 'os.path.exists', 'os.path.exists', (['train_logs_dir'], {}), '(train_logs_dir)\n', (10696, 10712), False, 'import os\n'), ((11171, 11204), 'os.path.join', 'os.path.join', (['logdir', '"""eval_logs"""'], {}), "(logdir, 'eval_logs')\n", (11183, 11204), False, 'import os\n'), ((11274, 11305), 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['config_path'], {}), '(config_path)\n', (11292, 11305), True, 'import tensorflow as tf\n'), ((11315, 11417), 'absl.logging.info', 'logging.info', (['"""Waiting for config to exist. Going to sleep %s for secs."""', 'config_timeout_seconds'], {}), "('Waiting for config to exist. Going to sleep %s for secs.',\n config_timeout_seconds)\n", (11327, 11417), False, 'from absl import logging\n'), ((11446, 11480), 'time.sleep', 'time.sleep', (['config_timeout_seconds'], {}), '(config_timeout_seconds)\n', (11456, 11480), False, 'import time\n'), ((20381, 20392), 'time.time', 'time.time', ([], {}), '()\n', (20390, 20392), False, 'import time\n'), ((1669, 1707), 'tensorflow.split', 'tf.split', (['emb_feats', 'num_steps'], {'axis': '(0)'}), '(emb_feats, num_steps, axis=0)\n', (1677, 1707), True, 'import tensorflow as tf\n'), ((2825, 2870), 'tensorflow.tile', 'tf.tile', (['query_feats[j:j + 1]', '[num_steps, 1]'], {}), '(query_feats[j:j + 1], [num_steps, 1])\n', (2832, 2870), True, 'import tensorflow as tf\n'), ((3279, 3308), 'tensorflow.unstack', 'tf.unstack', (['im'], {'num': 'num_steps'}), '(im, num=num_steps)\n', (3289, 3308), True, 'import tensorflow as tf\n'), ((3809, 3820), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (3817, 3820), True, 'import numpy as np\n'), ((4055, 4080), 'tensorflow.random.uniform', 'tf.random.uniform', (['(m, n)'], {}), '((m, n))\n', (4072, 4080), True, 'import tensorflow as tf\n'), ((5279, 5320), 'tensorflow.greater_equal', 'tf.greater_equal', (['global_step', 'boundaries'], {}), '(global_step, boundaries)\n', (5295, 5320), True, 'import tensorflow as tf\n'), ((5419, 5463), 'tensorflow.one_hot', 'tf.one_hot', (['rate_index'], {'depth': 'num_boundaries'}), '(rate_index, depth=num_boundaries)\n', (5429, 5463), True, 'import tensorflow as tf\n'), ((7898, 7964), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate', 'momentum': '(0.9)'}), '(learning_rate=learning_rate, momentum=0.9)\n', (7921, 7964), True, 'import tensorflow as tf\n'), ((9985, 10012), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (9999, 10012), False, 'import os\n'), ((10172, 10207), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config_path', '"""w"""'], {}), "(config_path, 'w')\n", (10189, 10207), True, 'import tensorflow as tf\n'), ((10308, 10364), 'json.dump', 'json.dump', (['config', 'config_file'], {'sort_keys': '(True)', 'indent': '(4)'}), '(config, config_file, sort_keys=True, indent=4)\n', (10317, 10364), False, 'import json\n'), ((10482, 10517), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config_path', '"""r"""'], {}), "(config_path, 'r')\n", (10499, 10517), True, 'import tensorflow as tf\n'), ((10560, 10582), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (10569, 10582), False, 'import json\n'), ((11511, 11546), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config_path', '"""r"""'], {}), "(config_path, 'r')\n", (11528, 11546), True, 'import tensorflow as tf\n'), ((11589, 11611), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (11598, 11611), False, 'import json\n'), ((11656, 11690), 'time.sleep', 'time.sleep', (['config_timeout_seconds'], {}), '(config_timeout_seconds)\n', (11666, 11690), False, 'import time\n'), ((17059, 17087), 'numpy.concatenate', 'np.concatenate', (['embs'], {'axis': '(0)'}), '(embs, axis=0)\n', (17073, 17087), True, 'import numpy as np\n'), ((17109, 17139), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (17123, 17139), True, 'import numpy as np\n'), ((17161, 17190), 'numpy.concatenate', 'np.concatenate', (['steps'], {'axis': '(0)'}), '(steps, axis=0)\n', (17175, 17190), True, 'import numpy as np\n'), ((17214, 17246), 'numpy.concatenate', 'np.concatenate', (['seq_lens'], {'axis': '(0)'}), '(seq_lens, axis=0)\n', (17228, 17246), True, 'import numpy as np\n'), ((17267, 17296), 'numpy.concatenate', 'np.concatenate', (['names'], {'axis': '(0)'}), '(names, axis=0)\n', (17281, 17296), True, 'import numpy as np\n'), ((17322, 17356), 'numpy.concatenate', 'np.concatenate', (['seq_labels'], {'axis': '(0)'}), '(seq_labels, axis=0)\n', (17336, 17356), True, 'import numpy as np\n'), ((20228, 20239), 'time.time', 'time.time', ([], {}), '()\n', (20237, 20239), False, 'import time\n'), ((20931, 20969), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(1)'], {}), '(1)\n', (20966, 20969), True, 'import tensorflow as tf\n'), ((21071, 21109), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(0)'], {}), '(0)\n', (21106, 21109), True, 'import tensorflow as tf\n'), ((21308, 21320), 'json.load', 'json.load', (['f'], {}), '(f)\n', (21317, 21320), False, 'import json\n'), ((21573, 21633), 'tensorflow.config.experimental.set_visible_devices', 'tf.config.experimental.set_visible_devices', (['GPUS[ind]', '"""GPU"""'], {}), "(GPUS[ind], 'GPU')\n", (21615, 21633), True, 'import tensorflow as tf\n'), ((21843, 21893), 'tensorflow.config.experimental.list_logical_devices', 'tf.config.experimental.list_logical_devices', (['"""GPU"""'], {}), "('GPU')\n", (21886, 21893), True, 'import tensorflow as tf\n'), ((1466, 1508), 'tensorflow.split', 'tf.split', (['emb_feats', '(2 * num_steps)'], {'axis': '(0)'}), '(emb_feats, 2 * num_steps, axis=0)\n', (1474, 1508), True, 'import tensorflow as tf\n'), ((2581, 2651), 'tensorflow.unstack', 'tf.unstack', (['image_list[i]'], {'num': '(num_steps * num_frames_per_step)', 'axis': '(0)'}), '(image_list[i], num=num_steps * num_frames_per_step, axis=0)\n', (2591, 2651), True, 'import tensorflow as tf\n'), ((2937, 2998), 'tensorflow.math.squared_difference', 'tf.math.squared_difference', (['curr_query_feats', 'candidate_feats'], {}), '(curr_query_feats, candidate_feats)\n', (2963, 2998), True, 'import tensorflow as tf\n'), ((6209, 6325), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['lr', 'global_step', 'lr_params.EXP_DECAY_STEPS', 'lr_params.EXP_DECAY_RATE'], {'staircase': '(True)'}), '(lr, global_step, lr_params.EXP_DECAY_STEPS,\n lr_params.EXP_DECAY_RATE, staircase=True)\n', (6235, 6325), True, 'import tensorflow as tf\n'), ((15907, 15994), 'absl.logging.debug', 'logging.debug', (['"""On sequence number %d, frames embedded %d"""', 'n', '(curr_idx + num_steps)'], {}), "('On sequence number %d, frames embedded %d', n, curr_idx +\n num_steps)\n", (15920, 15994), False, 'from absl import logging\n'), ((17465, 17495), 'numpy.concatenate', 'np.concatenate', (['frames'], {'axis': '(0)'}), '(frames, axis=0)\n', (17479, 17495), True, 'import numpy as np\n'), ((17629, 17667), 'numpy.concatenate', 'np.concatenate', (['frame_original'], {'axis': '(0)'}), '(frame_original, axis=0)\n', (17643, 17667), True, 'import numpy as np\n'), ((18925, 18972), 'absl.logging.info', 'logging.info', (['"""Finished embedding the dataset."""'], {}), "('Finished embedding the dataset.')\n", (18937, 18972), False, 'from absl import logging\n'), ((21764, 21815), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (21804, 21815), True, 'import tensorflow as tf\n'), ((22130, 22145), 'absl.logging.info', 'logging.info', (['e'], {}), '(e)\n', (22142, 22145), False, 'from absl import logging\n'), ((2384, 2458), 'tensorflow.unstack', 'tf.unstack', (['image_list[i]'], {'num': '(2 * num_steps * num_frames_per_step)', 'axis': '(0)'}), '(image_list[i], num=2 * num_steps * num_frames_per_step, axis=0)\n', (2394, 2458), True, 'import tensorflow as tf\n'), ((12887, 12928), 'numpy.arange', 'np.arange', (['curr_idx', '(curr_idx + num_steps)'], {}), '(curr_idx, curr_idx + num_steps)\n', (12896, 12928), True, 'import numpy as np\n'), ((16585, 16605), 'numpy.concatenate', 'np.concatenate', (['embs'], {}), '(embs)\n', (16599, 16605), True, 'import numpy as np\n'), ((3119, 3151), 'tensorflow.argmin', 'tf.argmin', (['mean_squared_distance'], {}), '(mean_squared_distance)\n', (3128, 3151), True, 'import tensorflow as tf\n'), ((7047, 7164), 'tensorflow.train.polynomial_decay', 'tf.train.polynomial_decay', (['lr', 'global_step', 'CONFIG.TRAIN.MAX_ITERS'], {'end_learning_rate': '(0.0)', 'power': '(1.0)', 'cycle': '(False)'}), '(lr, global_step, CONFIG.TRAIN.MAX_ITERS,\n end_learning_rate=0.0, power=1.0, cycle=False)\n', (7072, 7164), True, 'import tensorflow as tf\n'), ((10279, 10293), 'config.CONFIG.items', 'CONFIG.items', ([], {}), '()\n', (10291, 10293), False, 'from config import CONFIG\n'), ((15539, 15566), 'tensorflow.gather', 'tf.gather', (['v', 'idxes'], {'axis': '(1)'}), '(v, idxes, axis=1)\n', (15548, 15566), True, 'import tensorflow as tf\n'), ((17843, 17857), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (17851, 17857), True, 'import numpy as np\n'), ((17899, 17913), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (17907, 17913), True, 'import numpy as np\n'), ((17960, 17974), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (17968, 17974), True, 'import numpy as np\n'), ((18015, 18029), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (18023, 18029), True, 'import numpy as np\n'), ((18262, 18276), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (18270, 18276), True, 'import numpy as np\n'), ((17730, 17744), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (17738, 17744), True, 'import numpy as np\n'), ((18102, 18116), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (18110, 18116), True, 'import numpy as np\n'), ((18208, 18222), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (18216, 18222), True, 'import numpy as np\n')] |
dblack2056/UnityPy | UnityPy/classes/Sprite.py | 303291e46ddfbf266131237e59e6b1b5c46a9ca4 | from enum import IntEnum
from .Mesh import BoneWeights4, SubMesh, VertexData
from .NamedObject import NamedObject
from .PPtr import PPtr, save_ptr
from ..export import SpriteHelper
from ..enums import SpriteMeshType
from ..streams import EndianBinaryWriter
class Sprite(NamedObject):
@property
def image(self):
return SpriteHelper.get_image_from_sprite(self)
def __init__(self, reader):
super().__init__(reader=reader)
version = self.version
self.m_Rect = reader.read_rectangle_f()
self.m_Offset = reader.read_vector2()
if version >= (4, 5): # 4.5 and up
self.m_Border = reader.read_vector4()
self.m_PixelsToUnits = reader.read_float()
if version >= (5, 4, 2) or (
version >= (5, 4, 1, 3) and self.build_type.IsPatch
): # 5.4.1p3 and up
self.m_Pivot = reader.read_vector2()
self.m_Extrude = reader.read_u_int()
if version >= (5, 3): # 5.3 and up
self.m_IsPolygon = reader.read_boolean()
reader.align_stream()
if version >= (2017,): # 2017 and up
first = reader.read_bytes(16) # GUID
second = reader.read_long()
self.m_RenderDataKey = (first, second)
self.m_AtlasTags = reader.read_string_array()
self.m_SpriteAtlas = PPtr(reader) # SpriteAtlas
self.m_RD = SpriteRenderData(reader)
if version >= (2017,): # 2017 and up
m_PhysicsShapeSize = reader.read_int()
self.m_PhysicsShape = [
reader.read_vector2_array() for _ in range(m_PhysicsShapeSize)
]
if version >= (2018,): # 2018 and up
m_BonesSize = reader.read_int()
self.m_Bones = [
reader.read_vector2_array() for _ in range(m_BonesSize)
]
def save(self, writer: EndianBinaryWriter = None):
if writer is None:
writer = EndianBinaryWriter(endian=self.reader.endian)
version = self.version
super().save(writer)
writer.write_rectangle_f(self.m_Rect)
writer.write_vector2(self.m_Offset)
if version >= (4, 5): # 4.5 and up
writer.write_vector4(self.m_Border)
writer.write_float(self.m_PixelsToUnits)
if version >= (5, 4, 2) or (
version >= (5, 4, 1, 3) and self.build_type.IsPatch
): # 5.4.1p3 and up
writer.write_vector2(self.m_Pivot)
writer.write_u_int(self.m_Extrude)
if version >= (5, 3): # 5.3 and up
writer.write_boolean(self.m_IsPolygon)
writer.align_stream()
if version >= (2017,): # 2017 and up
writer.write_bytes(self.m_RenderDataKey[0]) # GUID
writer.write_long(self.m_RenderDataKey[1])
writer.write_string_array(self.m_AtlasTags)
self.m_SpriteAtlas.save(writer) # SpriteAtlas
self.m_RD.save(writer, version)
if version >= (2017,): # 2017 and up
writer.write_int(len(self.m_PhysicsShape))
for phys in self.m_PhysicsShape:
writer.write_vector2_array(phys)
if version >= (2018,): # 2018 and up
writer.write_int(len(self.m_Bones))
for bone in self.m_Bones:
writer.write_vector2_array(bone)
self.set_raw_data(writer.bytes)
class SecondarySpriteTexture:
def __init__(self, reader):
self.texture = PPtr(reader) # Texture2D
self.name = reader.read_string_to_null()
def save(self, writer):
self.texture.save(writer)
writer.write_string_to_null(self.name)
class SpritePackingRotation(IntEnum):
kSPRNone = (0,)
kSPRFlipHorizontal = (1,)
kSPRFlipVertical = (2,)
kSPRRotate180 = (3,)
kSPRRotate90 = 4
class SpritePackingMode(IntEnum):
kSPMTight = (0,)
kSPMRectangle = 1
class SpriteSettings:
def __init__(self, reader):
self.value = reader.read_u_int()
@property
def value(self):
return self.m_settingsRaw
@value.setter
def value(self, _value):
self.m_settingsRaw = _value
self.packed = self.m_settingsRaw & 1 # 1
self.packingMode = SpritePackingMode((self.m_settingsRaw >> 1) & 1) # 1
self.packingRotation = SpritePackingRotation((self.m_settingsRaw >> 2) & 0xF) # 4
self.meshType = SpriteMeshType((self.m_settingsRaw >> 6) & 1) # 1
# rest of the bits are reserved
def save(self, writer):
writer.write_u_int(self.m_settingsRaw)
class SpriteVertex:
def __init__(self, reader):
version = reader.version
self.pos = reader.read_vector3()
if version[:2] <= (4, 3): # 4.3 and down
self.uv = reader.read_vector2()
def save(self, writer, version):
writer.write_vector3(self.pos)
if version[:2] <= (4, 3): # 4.3 and down
writer.write__vector2(self.uv)
class SpriteRenderData:
def __init__(self, reader):
version = reader.version
self.texture = PPtr(reader) # Texture2D
if version >= (5, 2): # 5.2 and up
self.alphaTexture = PPtr(reader) # Texture2D
if version >= (2019,): # 2019 and up
secondaryTexturesSize = reader.read_int()
self.secondaryTextures = [
SecondarySpriteTexture(reader) for _ in range(secondaryTexturesSize)
]
if version >= (5, 6): # 5.6 and up
SubMeshesSize = reader.read_int()
self.m_SubMeshes = [SubMesh(reader) for _ in range(SubMeshesSize)]
IndexBufferSize = reader.read_int()
self.m_IndexBuffer = reader.read_bytes(IndexBufferSize)
reader.align_stream()
self.m_VertexData = VertexData(reader)
else:
verticesSize = reader.read_int()
self.vertices = [SpriteVertex(reader) for _ in range(verticesSize)]
self.indices = reader.read_u_short_array()
reader.align_stream()
if version >= (2018,): # 2018 and up
self.m_Bindpose = reader.read_matrix_array()
if version < (2018, 2): # 2018.2 down
self.m_SourceSkinSize = reader.read_int()
self.m_SourceSkin = [BoneWeights4(reader)]
self.textureRect = reader.read_rectangle_f()
self.textureRectOffset = reader.read_vector2()
if version >= (5, 6): # 5.6 and up
self.atlasRectOffset = reader.read_vector2()
self.settingsRaw = SpriteSettings(reader)
if version >= (4, 5): # 4.5 and up
self.uvTransform = reader.read_vector4()
if version >= (2017,): # 2017 and up
self.downscaleMultiplier = reader.read_float()
def save(self, writer, version):
self.texture.save(writer) # Texture2D
if version >= (5, 2): # 5.2 and up
self.alphaTexture.save(writer) # Texture2D
if version >= (2019,): # 2019 and up
writer.write_int(len(self.secondaryTextures))
for tex in self.secondaryTextures:
tex.save(writer)
if version >= (5, 6): # 5.6 and up
writer.write_int(len(self.m_SubMeshes))
for mesh in self.m_SubMeshes:
mesh.save(writer, version)
writer.write_int(len(self.m_IndexBuffer))
writer.write_bytes(self.m_IndexBuffer)
writer.align_stream()
self.m_VertexData.save(writer, version)
else:
writer.write_int(len(self.vertices))
for vertex in self.vertices:
vertex.save(writer, version)
writer.write_u_short_array(self.indices)
writer.align_stream()
if version >= (2018,): # 2018 and up
writer.write_matrix_array(self.m_Bindpose)
if version < (2018, 2): # 2018.2 down
writer.write_int(self.m_SourceSkinSize)
self.m_SourceSkin[0].save(writer)
writer.write_rectangle_f(self.textureRect)
writer.write_vector2(self.textureRectOffset)
if version >= (5, 6): # 5.6 and up
writer.write_vector2(self.atlasRectOffset)
self.settingsRaw.save(writer)
if version >= (4, 5): # 4.5 and up
writer.write_vector4(self.uvTransform)
if version >= (2017,): # 2017 and up
writer.write_float(self.downscaleMultiplier)
| [] |
albertfxwang/eazy-py | eazy/filters.py | bcfd8a1e49f077adc794202871345542ab29800b | import numpy as np
import os
from astropy.table import Table
from . import utils
__all__ = ["FilterDefinition", "FilterFile", "ParamFilter"]
VEGA_FILE = os.path.join(utils.path_to_eazy_data(),
'alpha_lyr_stis_008.fits')
VEGA = Table.read(VEGA_FILE)
for c in VEGA.colnames:
VEGA[c] = VEGA[c].astype(float)
class FilterDefinition:
def __init__(self, name=None, wave=None, throughput=None, bp=None):
"""
Bandpass object
Parameters
----------
name : str
Label name
wave : array
Wavelength array, in `astropy.units.Angstrom`.
throughput : array
Throughput, arbitrary normalization
bp : optional, `pysynphot.obsbandpass` object
`pysynphot` filter bandpass
"""
self.name = name
self.wave = wave
self.throughput = throughput
self.Aflux = 1.
# pysynphot Bandpass
if bp is not None:
self.wave = np.cast[np.double](bp.wave)
self.throughput = np.cast[np.double](bp.throughput)
self.name = bp.name
self.norm = 1.
if self.throughput is not None:
self.norm = np.trapz(self.throughput/self.wave, self.wave)
def __repr__(self):
return self.name.__repr__()
def __str__(self):
return self.name.__str__()
def get_extinction(self, EBV=0, Rv=3.1):
"""
Extinction factor
"""
import astropy.units as u
f99 = utils.GalacticExtinction(EBV=EBV, Rv=Rv)
self.Alambda = f99(self.wave)
self.Aflux = 10**(-0.4*self.Alambda)
def extinction_correction(self, EBV, Rv=3.1, mag=True, source_lam=None, source_flux=None):
"""
Get the MW extinction correction within the filter.
Optionally supply a source spectrum.
"""
import astropy.units as u
try:
import grizli.utils_c
interp = grizli.utils_c.interp.interp_conserve_c
except ImportError:
interp = utils.interp_conserve
if self.wave is None:
print('Filter not defined.')
return False
if source_flux is None:
source_flux = self.throughput*0.+1
else:
source_flux = interp(self.wave, source_lam, source_flux, left=0, right=0)
if (self.wave.min() < 910) | (self.wave.max() > 6.e4):
Alambda = 0.
else:
f99 = utils.GalacticExtinction(EBV=EBV, Rv=Rv)
Alambda = f99(self.wave)
delta = np.trapz(self.throughput*source_flux*10**(-0.4*Alambda), self.wave) / np.trapz(self.throughput*source_flux, self.wave)
if mag:
return 2.5*np.log10(delta)
else:
return 1./delta
@property
def ABVega(self):
"""
Compute AB-Vega conversion
"""
from astropy.constants import c
import astropy.units as u
try:
import grizli.utils_c
interp = grizli.utils_c.interp.interp_conserve_c
except ImportError:
interp = utils.interp_conserve
# Union of throughput and Vega spectrum arrays
full_x = np.hstack([self.wave, VEGA['WAVELENGTH']])
full_x = full_x[np.argsort(full_x)]
# Vega spectrum, units of f-lambda flux density, cgs
# Interpolate to wavelength grid, no extrapolation
vega_full = interp(full_x, VEGA['WAVELENGTH'], VEGA['FLUX'],
left=0, right=0)
thru_full = interp(full_x, self.wave, self.throughput,
left=0, right=0)
# AB = 0, same units
absp = 3631*1e-23*c.to(u.m/u.s).value*1.e10/full_x**2
# Integrate over the bandpass, flam dlam
num = np.trapz(vega_full*thru_full, full_x)
den = np.trapz(absp*thru_full, full_x)
return -2.5*np.log10(num/den)
@property
def pivot(self):
"""
Pivot wavelength
http://pysynphot.readthedocs.io/en/latest/properties.html
"""
integrator = np.trapz
num = integrator(self.wave, self.wave*self.throughput)
den = integrator(self.wave, self.throughput/self.wave)
pivot = np.sqrt(num/den)
return pivot
@property
def equivwidth(self):
"""
Filter equivalent width
http://pysynphot.readthedocs.io/en/latest/properties.html
"""
return np.trapz(self.throughput, self.wave)
@property
def rectwidth(self):
"""
Filter rectangular width
http://pysynphot.readthedocs.io/en/latest/properties.html
"""
rect = self.equivwidth / self.throughput.max()
return rect
@property
def ctw95(self):
"""
95% cumulative throughput width
http://www.stsci.edu/hst/acs/analysis/bandwidths/#keywords
"""
dl = np.diff(self.wave)
filt = np.cumsum((self.wave*self.throughput)[1:]*dl)
ctw95 = np.interp([0.025, 0.975], filt/filt.max(), self.wave[1:])
return np.diff(ctw95)[0]
def for_filter_file(self, row_str='{i:6} {wave:.5e} {thru:.5e}'):
"""
Return a string that can be put in the EAZY filter file
"""
header = '{0} {1} lambda_c= {2:.4e} AB-Vega= {3:.3f} w95={4:.1f}'
N = len(self.wave)
lines = [header.format(N, self.name.split('lambda_c')[0],
self.pivot, self.ABVega, self.ctw95)]
lines += [row_str.format(i=i+1, wave=w, thru=t)
for i, (w, t) in enumerate(zip(self.wave, self.throughput))]
return '\n'.join(lines)
class FilterFile:
def __init__(self, file='FILTER.RES.latest', path='./'):
"""
Read a EAZY filter file.
.. plot::
:include-source:
import matplotlib.pyplot as plt
from eazy.filters import FilterFile
res = FilterFile(path=None)
print(len(res.filters))
bp = res[205]
print(bp)
fig, ax = plt.subplots(1,1,figsize=(6,4))
ax.plot(bp.wave, bp.throughput, label=bp.name.split()[0])
ax.set_xlabel('wavelength, Angstroms')
ax.set_ylabel('throughput')
ax.legend()
ax.grid()
fig.tight_layout(pad=0.5)
"""
if path is None:
file_path = os.path.join(os.getenv('EAZYCODE'), 'filters', file)
else:
file_path = os.path.join(path, file)
with open(file_path, 'r') as fp:
lines = fp.readlines()
self.filename = file_path
filters = []
wave = []
trans = []
header = ''
for line in lines:
if 'lambda_c' in line:
if len(wave) > 0:
# Make filter from lines already read in
new_filter = FilterDefinition(name=header,
wave=np.cast[float](wave),
throughput=np.cast[float](trans))
# new_filter.name = header
# new_filter.wave = np.cast[float](wave)
# new_filter.throughput = np.cast[float](trans)
filters.append(new_filter)
# Initialize filter
header = ' '.join(line.split()[1:])
wave = []
trans = []
else:
lspl = np.cast[float](line.split())
wave.append(lspl[1])
trans.append(lspl[2])
# last one
# new_filter = FilterDefinition()
# new_filter.name = header
# new_filter.wave = np.cast[float](wave)
# new_filter.throughput = np.cast[float](trans)
new_filter = FilterDefinition(name=header,
wave=np.cast[float](wave),
throughput=np.cast[float](trans))
filters.append(new_filter)
self.filters = filters
@property
def NFILT(self):
"""
Number of filters in the list
"""
return len(self.filters)
def __getitem__(self, i1):
"""
Return unit-indexed filter, e.g., 161 = 2mass-j
"""
return self.filters[i1-1]
def names(self, verbose=True):
"""
Print the filter names.
"""
if verbose:
for i in range(len(self.filters)):
print('{0:5d} {1}'.format(i+1, self.filters[i].name))
else:
string_list = ['{0:5d} {1}\n'.format(i+1, self.filters[i].name) for i in range(len(self.filters))]
return string_list
def write(self, file='xxx.res', verbose=True):
"""
Dump the filter information to a filter file.
"""
fp = open(file,'w')
for filter in self.filters:
fp.write('{0:6d} {1}\n'.format(len(filter.wave), filter.name))
for i in range(len(filter.wave)):
fp.write('{0:6d} {1:.5e} {2:.5e}\n'.format(i+1, filter.wave[i], filter.throughput[i]))
fp.close()
string_list = self.names(verbose=False)
fp = open(file+'.info', 'w')
fp.writelines(string_list)
fp.close()
if verbose:
print('Wrote <{0}[.info]>'.format(file))
def search(self, search_string, case=False, verbose=True):
"""
Search filter names for ``search_string``. If ``case`` is True, then
match case.
"""
import re
if not case:
search_string = search_string.upper()
matched = []
for i in range(len(self.filters)):
filt_name = self.filters[i].name
if not case:
filt_name = filt_name.upper()
if re.search(search_string, filt_name) is not None:
if verbose:
print('{0:5d} {1}'.format(i+1, self.filters[i].name))
matched.append(i)
return np.array(matched)
class ParamFilter(FilterDefinition):
def __init__(self, line='# Filter #20, RES#78: COSMOS/SUBARU_filter_B.txt - lambda_c=4458.276253'):
self.lambda_c = float(line.split('lambda_c=')[1])
self.name = line.split()[4]
self.fnumber = int(line.split('RES#')[1].split(':')[0])
self.cnumber = int(line.split('Filter #')[1].split(',')[0])
| [((281, 302), 'astropy.table.Table.read', 'Table.read', (['VEGA_FILE'], {}), '(VEGA_FILE)\n', (291, 302), False, 'from astropy.table import Table\n'), ((3424, 3466), 'numpy.hstack', 'np.hstack', (["[self.wave, VEGA['WAVELENGTH']]"], {}), "([self.wave, VEGA['WAVELENGTH']])\n", (3433, 3466), True, 'import numpy as np\n'), ((4071, 4110), 'numpy.trapz', 'np.trapz', (['(vega_full * thru_full)', 'full_x'], {}), '(vega_full * thru_full, full_x)\n', (4079, 4110), True, 'import numpy as np\n'), ((4123, 4157), 'numpy.trapz', 'np.trapz', (['(absp * thru_full)', 'full_x'], {}), '(absp * thru_full, full_x)\n', (4131, 4157), True, 'import numpy as np\n'), ((4549, 4567), 'numpy.sqrt', 'np.sqrt', (['(num / den)'], {}), '(num / den)\n', (4556, 4567), True, 'import numpy as np\n'), ((4774, 4810), 'numpy.trapz', 'np.trapz', (['self.throughput', 'self.wave'], {}), '(self.throughput, self.wave)\n', (4782, 4810), True, 'import numpy as np\n'), ((5300, 5318), 'numpy.diff', 'np.diff', (['self.wave'], {}), '(self.wave)\n', (5307, 5318), True, 'import numpy as np\n'), ((5334, 5383), 'numpy.cumsum', 'np.cumsum', (['((self.wave * self.throughput)[1:] * dl)'], {}), '((self.wave * self.throughput)[1:] * dl)\n', (5343, 5383), True, 'import numpy as np\n'), ((10726, 10743), 'numpy.array', 'np.array', (['matched'], {}), '(matched)\n', (10734, 10743), True, 'import numpy as np\n'), ((1319, 1367), 'numpy.trapz', 'np.trapz', (['(self.throughput / self.wave)', 'self.wave'], {}), '(self.throughput / self.wave, self.wave)\n', (1327, 1367), True, 'import numpy as np\n'), ((2764, 2839), 'numpy.trapz', 'np.trapz', (['(self.throughput * source_flux * 10 ** (-0.4 * Alambda))', 'self.wave'], {}), '(self.throughput * source_flux * 10 ** (-0.4 * Alambda), self.wave)\n', (2772, 2839), True, 'import numpy as np\n'), ((2834, 2884), 'numpy.trapz', 'np.trapz', (['(self.throughput * source_flux)', 'self.wave'], {}), '(self.throughput * source_flux, self.wave)\n', (2842, 2884), True, 'import numpy as np\n'), ((4185, 4204), 'numpy.log10', 'np.log10', (['(num / den)'], {}), '(num / den)\n', (4193, 4204), True, 'import numpy as np\n'), ((5469, 5483), 'numpy.diff', 'np.diff', (['ctw95'], {}), '(ctw95)\n', (5476, 5483), True, 'import numpy as np\n'), ((7039, 7063), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (7051, 7063), False, 'import os\n'), ((2931, 2946), 'numpy.log10', 'np.log10', (['delta'], {}), '(delta)\n', (2939, 2946), True, 'import numpy as np\n'), ((3491, 3509), 'numpy.argsort', 'np.argsort', (['full_x'], {}), '(full_x)\n', (3501, 3509), True, 'import numpy as np\n'), ((6961, 6982), 'os.getenv', 'os.getenv', (['"""EAZYCODE"""'], {}), "('EAZYCODE')\n", (6970, 6982), False, 'import os\n'), ((10517, 10552), 're.search', 're.search', (['search_string', 'filt_name'], {}), '(search_string, filt_name)\n', (10526, 10552), False, 'import re\n'), ((3963, 3978), 'astropy.constants.c.to', 'c.to', (['(u.m / u.s)'], {}), '(u.m / u.s)\n', (3967, 3978), False, 'from astropy.constants import c\n')] |
KevinTMtz/CompetitiveProgramming | LeetCode/106.py | 0bf8a297c404073df707b6d7b06965b055ccd872 | #
# LeetCode
#
# Problem - 106
# URL - https://leetcode.com/problems/construct-binary-tree-from-inorder-and-postorder-traversal/
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode:
if not inorder:
return None
r = postorder.pop()
root = TreeNode(r)
index = inorder.index(r)
root.right = self.buildTree(inorder[index+1:], postorder)
root.left = self.buildTree(inorder[:index], postorder)
return root
| [] |
skvorekn/evalml | evalml/automl/automl_search.py | 2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8 | import copy
import time
from collections import defaultdict
import cloudpickle
import numpy as np
import pandas as pd
import woodwork as ww
from sklearn.model_selection import BaseCrossValidator
from .pipeline_search_plots import PipelineSearchPlots
from evalml.automl.automl_algorithm import IterativeAlgorithm
from evalml.automl.callbacks import log_error_callback
from evalml.automl.engine import SequentialEngine
from evalml.automl.utils import (
check_all_pipeline_names_unique,
get_default_primary_search_objective,
make_data_splitter
)
from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError
from evalml.model_family import ModelFamily
from evalml.objectives import (
get_core_objectives,
get_non_core_objectives,
get_objective
)
from evalml.pipelines import (
MeanBaselineRegressionPipeline,
ModeBaselineBinaryPipeline,
ModeBaselineMulticlassPipeline,
TimeSeriesBaselineBinaryPipeline,
TimeSeriesBaselineMulticlassPipeline,
TimeSeriesBaselineRegressionPipeline
)
from evalml.pipelines.components.utils import get_estimators
from evalml.pipelines.utils import make_pipeline
from evalml.preprocessing import split_data
from evalml.problem_types import ProblemTypes, handle_problem_types
from evalml.tuners import SKOptTuner
from evalml.utils import convert_to_seconds, infer_feature_types
from evalml.utils.logger import (
get_logger,
log_subtitle,
log_title,
time_elapsed,
update_pipeline
)
logger = get_logger(__file__)
class AutoMLSearch:
"""Automated Pipeline search."""
_MAX_NAME_LEN = 40
# Necessary for "Plotting" documentation, since Sphinx does not work well with instance attributes.
plot = PipelineSearchPlots
def __init__(self,
X_train=None,
y_train=None,
problem_type=None,
objective='auto',
max_iterations=None,
max_time=None,
patience=None,
tolerance=None,
data_splitter=None,
allowed_pipelines=None,
allowed_model_families=None,
start_iteration_callback=None,
add_result_callback=None,
error_callback=None,
additional_objectives=None,
random_seed=0,
n_jobs=-1,
tuner_class=None,
optimize_thresholds=True,
ensembling=False,
max_batches=None,
problem_configuration=None,
train_best_pipeline=True,
pipeline_parameters=None,
_ensembling_split_size=0.2,
_pipelines_per_batch=5):
"""Automated pipeline search
Arguments:
X_train (pd.DataFrame, ww.DataTable): The input training data of shape [n_samples, n_features]. Required.
y_train (pd.Series, ww.DataColumn): The target training data of length [n_samples]. Required for supervised learning tasks.
problem_type (str or ProblemTypes): type of supervised learning problem. See evalml.problem_types.ProblemType.all_problem_types for a full list.
objective (str, ObjectiveBase): The objective to optimize for. Used to propose and rank pipelines, but not for optimizing each pipeline during fit-time.
When set to 'auto', chooses:
- LogLossBinary for binary classification problems,
- LogLossMulticlass for multiclass classification problems, and
- R2 for regression problems.
max_iterations (int): Maximum number of iterations to search. If max_iterations and
max_time is not set, then max_iterations will default to max_iterations of 5.
max_time (int, str): Maximum time to search for pipelines.
This will not start a new pipeline search after the duration
has elapsed. If it is an integer, then the time will be in seconds.
For strings, time can be specified as seconds, minutes, or hours.
patience (int): Number of iterations without improvement to stop search early. Must be positive.
If None, early stopping is disabled. Defaults to None.
tolerance (float): Minimum percentage difference to qualify as score improvement for early stopping.
Only applicable if patience is not None. Defaults to None.
allowed_pipelines (list(class)): A list of PipelineBase subclasses indicating the pipelines allowed in the search.
The default of None indicates all pipelines for this problem type are allowed. Setting this field will cause
allowed_model_families to be ignored.
allowed_model_families (list(str, ModelFamily)): The model families to search. The default of None searches over all
model families. Run evalml.pipelines.components.utils.allowed_model_families("binary") to see options. Change `binary`
to `multiclass` or `regression` depending on the problem type. Note that if allowed_pipelines is provided,
this parameter will be ignored.
data_splitter (sklearn.model_selection.BaseCrossValidator): Data splitting method to use. Defaults to StratifiedKFold.
tuner_class: The tuner class to use. Defaults to SKOptTuner.
optimize_thresholds (bool): Whether or not to optimize the binary pipeline threshold. Defaults to True.
start_iteration_callback (callable): Function called before each pipeline training iteration.
Callback function takes three positional parameters: The pipeline class, the pipeline parameters, and the AutoMLSearch object.
add_result_callback (callable): Function called after each pipeline training iteration.
Callback function takes three positional parameters: A dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the AutoMLSearch object.
error_callback (callable): Function called when `search()` errors and raises an Exception.
Callback function takes three positional parameters: the Exception raised, the traceback, and the AutoMLSearch object.
Must also accepts kwargs, so AutoMLSearch is able to pass along other appropriate parameters by default.
Defaults to None, which will call `log_error_callback`.
additional_objectives (list): Custom set of objectives to score on.
Will override default objectives for problem type if not empty.
random_seed (int): Seed for the random number generator. Defaults to 0.
n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
ensembling (boolean): If True, runs ensembling in a separate batch after every allowed pipeline class has been iterated over.
If the number of unique pipelines to search over per batch is one, ensembling will not run. Defaults to False.
max_batches (int): The maximum number of batches of pipelines to search. Parameters max_time, and
max_iterations have precedence over stopping the search.
problem_configuration (dict, None): Additional parameters needed to configure the search. For example,
in time series problems, values should be passed in for the gap and max_delay variables.
train_best_pipeline (boolean): Whether or not to train the best pipeline before returning it. Defaults to True.
pipeline_parameters (dict): A dict of the parameters used to initalize a pipeline with.
_ensembling_split_size (float): The amount of the training data we'll set aside for training ensemble metalearners. Only used when ensembling is True.
Must be between 0 and 1, exclusive. Defaults to 0.2
_pipelines_per_batch (int): The number of pipelines to train for every batch after the first one.
The first batch will train a baseline pipline + one of each pipeline family allowed in the search.
"""
if X_train is None:
raise ValueError('Must specify training data as a 2d array using the X_train argument')
if y_train is None:
raise ValueError('Must specify training data target values as a 1d vector using the y_train argument')
try:
self.problem_type = handle_problem_types(problem_type)
except ValueError:
raise ValueError('choose one of (binary, multiclass, regression) as problem_type')
self.tuner_class = tuner_class or SKOptTuner
self.start_iteration_callback = start_iteration_callback
self.add_result_callback = add_result_callback
self.error_callback = error_callback or log_error_callback
self.data_splitter = data_splitter
self.optimize_thresholds = optimize_thresholds
self.ensembling = ensembling
if objective == 'auto':
objective = get_default_primary_search_objective(self.problem_type.value)
objective = get_objective(objective, return_instance=False)
self.objective = self._validate_objective(objective)
if self.data_splitter is not None and not issubclass(self.data_splitter.__class__, BaseCrossValidator):
raise ValueError("Not a valid data splitter")
if not objective.is_defined_for_problem_type(self.problem_type):
raise ValueError("Given objective {} is not compatible with a {} problem.".format(self.objective.name, self.problem_type.value))
if additional_objectives is None:
additional_objectives = get_core_objectives(self.problem_type)
# if our main objective is part of default set of objectives for problem_type, remove it
existing_main_objective = next((obj for obj in additional_objectives if obj.name == self.objective.name), None)
if existing_main_objective is not None:
additional_objectives.remove(existing_main_objective)
else:
additional_objectives = [get_objective(o) for o in additional_objectives]
additional_objectives = [self._validate_objective(obj) for obj in additional_objectives]
self.additional_objectives = additional_objectives
self.objective_name_to_class = {o.name: o for o in [self.objective] + self.additional_objectives}
if not isinstance(max_time, (int, float, str, type(None))):
raise TypeError(f"Parameter max_time must be a float, int, string or None. Received {type(max_time)} with value {str(max_time)}..")
if isinstance(max_time, (int, float)) and max_time < 0:
raise ValueError(f"Parameter max_time must be None or non-negative. Received {max_time}.")
if max_batches is not None and max_batches < 0:
raise ValueError(f"Parameter max_batches must be None or non-negative. Received {max_batches}.")
if max_iterations is not None and max_iterations < 0:
raise ValueError(f"Parameter max_iterations must be None or non-negative. Received {max_iterations}.")
self.max_time = convert_to_seconds(max_time) if isinstance(max_time, str) else max_time
self.max_iterations = max_iterations
self.max_batches = max_batches
self._pipelines_per_batch = _pipelines_per_batch
if not self.max_iterations and not self.max_time and not self.max_batches:
self.max_batches = 1
logger.info("Using default limit of max_batches=1.\n")
if patience and (not isinstance(patience, int) or patience < 0):
raise ValueError("patience value must be a positive integer. Received {} instead".format(patience))
if tolerance and (tolerance > 1.0 or tolerance < 0.0):
raise ValueError("tolerance value must be a float between 0.0 and 1.0 inclusive. Received {} instead".format(tolerance))
self.patience = patience
self.tolerance = tolerance or 0.0
self._results = {
'pipeline_results': {},
'search_order': [],
'errors': []
}
self.random_seed = random_seed
self.n_jobs = n_jobs
self.plot = None
try:
self.plot = PipelineSearchPlots(self)
except ImportError:
logger.warning("Unable to import plotly; skipping pipeline search plotting\n")
self.allowed_pipelines = allowed_pipelines
self.allowed_model_families = allowed_model_families
self._automl_algorithm = None
self._start = 0.0
self._baseline_cv_scores = {}
self.show_batch_output = False
self._validate_problem_type()
self.problem_configuration = self._validate_problem_configuration(problem_configuration)
self._train_best_pipeline = train_best_pipeline
self._best_pipeline = None
self._searched = False
self.X_train = infer_feature_types(X_train)
self.y_train = infer_feature_types(y_train)
self.ensembling_indices = None
default_data_splitter = make_data_splitter(self.X_train, self.y_train, self.problem_type, self.problem_configuration,
n_splits=3, shuffle=True, random_seed=self.random_seed)
self.data_splitter = self.data_splitter or default_data_splitter
self.pipeline_parameters = pipeline_parameters if pipeline_parameters is not None else {}
self.search_iteration_plot = None
self._interrupted = False
if self.allowed_pipelines is None:
logger.info("Generating pipelines to search over...")
allowed_estimators = get_estimators(self.problem_type, self.allowed_model_families)
logger.debug(f"allowed_estimators set to {[estimator.name for estimator in allowed_estimators]}")
self.allowed_pipelines = [make_pipeline(self.X_train, self.y_train, estimator, self.problem_type, custom_hyperparameters=self.pipeline_parameters) for estimator in allowed_estimators]
if self.allowed_pipelines == []:
raise ValueError("No allowed pipelines to search")
check_all_pipeline_names_unique(self.allowed_pipelines)
run_ensembling = self.ensembling
if run_ensembling and len(self.allowed_pipelines) == 1:
logger.warning("Ensembling is set to True, but the number of unique pipelines is one, so ensembling will not run.")
run_ensembling = False
if run_ensembling and self.max_iterations is not None:
# Baseline + first batch + each pipeline iteration + 1
first_ensembling_iteration = (1 + len(self.allowed_pipelines) + len(self.allowed_pipelines) * self._pipelines_per_batch + 1)
if self.max_iterations < first_ensembling_iteration:
run_ensembling = False
logger.warning(f"Ensembling is set to True, but max_iterations is too small, so ensembling will not run. Set max_iterations >= {first_ensembling_iteration} to run ensembling.")
else:
logger.info(f"Ensembling will run at the {first_ensembling_iteration} iteration and every {len(self.allowed_pipelines) * self._pipelines_per_batch} iterations after that.")
if self.max_batches and self.max_iterations is None:
self.show_batch_output = True
if run_ensembling:
ensemble_nth_batch = len(self.allowed_pipelines) + 1
num_ensemble_batches = (self.max_batches - 1) // ensemble_nth_batch
if num_ensemble_batches == 0:
run_ensembling = False
logger.warning(f"Ensembling is set to True, but max_batches is too small, so ensembling will not run. Set max_batches >= {ensemble_nth_batch + 1} to run ensembling.")
else:
logger.info(f"Ensembling will run every {ensemble_nth_batch} batches.")
self.max_iterations = (1 + len(self.allowed_pipelines) +
self._pipelines_per_batch * (self.max_batches - 1 - num_ensemble_batches) +
num_ensemble_batches)
else:
self.max_iterations = 1 + len(self.allowed_pipelines) + (self._pipelines_per_batch * (self.max_batches - 1))
if run_ensembling:
if not (0 < _ensembling_split_size < 1):
raise ValueError(f"Ensembling split size must be between 0 and 1 exclusive, received {_ensembling_split_size}")
X_shape = ww.DataTable(np.arange(self.X_train.shape[0]))
_, ensembling_indices, _, _ = split_data(X_shape, self.y_train, problem_type=self.problem_type, test_size=_ensembling_split_size, random_seed=self.random_seed)
self.ensembling_indices = ensembling_indices.to_dataframe()[0].tolist()
self._engine = SequentialEngine(self.X_train,
self.y_train,
self.ensembling_indices,
self,
should_continue_callback=self._should_continue,
pre_evaluation_callback=self._pre_evaluation_callback,
post_evaluation_callback=self._post_evaluation_callback)
self.allowed_model_families = list(set([p.model_family for p in (self.allowed_pipelines)]))
logger.debug(f"allowed_pipelines set to {[pipeline.name for pipeline in self.allowed_pipelines]}")
logger.debug(f"allowed_model_families set to {self.allowed_model_families}")
if len(self.problem_configuration):
pipeline_params = {**{'pipeline': self.problem_configuration}, **self.pipeline_parameters}
else:
pipeline_params = self.pipeline_parameters
self._automl_algorithm = IterativeAlgorithm(
max_iterations=self.max_iterations,
allowed_pipelines=self.allowed_pipelines,
tuner_class=self.tuner_class,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
number_features=self.X_train.shape[1],
pipelines_per_batch=self._pipelines_per_batch,
ensembling=run_ensembling,
pipeline_params=pipeline_params
)
def _pre_evaluation_callback(self, pipeline):
if self.start_iteration_callback:
self.start_iteration_callback(pipeline.__class__, pipeline.parameters, self)
desc = f"{pipeline.name}"
if len(desc) > AutoMLSearch._MAX_NAME_LEN:
desc = desc[:AutoMLSearch._MAX_NAME_LEN - 3] + "..."
desc = desc.ljust(AutoMLSearch._MAX_NAME_LEN)
batch_number = 1
if self._automl_algorithm is not None and self._automl_algorithm.batch_number > 0:
batch_number = self._automl_algorithm.batch_number
update_pipeline(logger,
desc,
len(self._results['pipeline_results']) + 1,
self.max_iterations,
self._start,
batch_number,
self.show_batch_output)
def _validate_objective(self, objective):
non_core_objectives = get_non_core_objectives()
if isinstance(objective, type):
if objective in non_core_objectives:
raise ValueError(f"{objective.name.lower()} is not allowed in AutoML! "
"Use evalml.objectives.utils.get_core_objective_names() "
"to get all objective names allowed in automl.")
return objective()
return objective
def __str__(self):
def _print_list(obj_list):
lines = sorted(['\t{}'.format(o.name) for o in obj_list])
return '\n'.join(lines)
def _get_funct_name(function):
if callable(function):
return function.__name__
else:
return None
search_desc = (
f"{handle_problem_types(self.problem_type).name} Search\n\n"
f"Parameters: \n{'='*20}\n"
f"Objective: {get_objective(self.objective).name}\n"
f"Max Time: {self.max_time}\n"
f"Max Iterations: {self.max_iterations}\n"
f"Max Batches: {self.max_batches}\n"
f"Allowed Pipelines: \n{_print_list(self.allowed_pipelines or [])}\n"
f"Patience: {self.patience}\n"
f"Tolerance: {self.tolerance}\n"
f"Data Splitting: {self.data_splitter}\n"
f"Tuner: {self.tuner_class.__name__}\n"
f"Start Iteration Callback: {_get_funct_name(self.start_iteration_callback)}\n"
f"Add Result Callback: {_get_funct_name(self.add_result_callback)}\n"
f"Additional Objectives: {_print_list(self.additional_objectives or [])}\n"
f"Random Seed: {self.random_seed}\n"
f"n_jobs: {self.n_jobs}\n"
f"Optimize Thresholds: {self.optimize_thresholds}\n"
)
rankings_desc = ""
if not self.rankings.empty:
rankings_str = self.rankings.drop(['parameters'], axis='columns').to_string()
rankings_desc = f"\nSearch Results: \n{'='*20}\n{rankings_str}"
return search_desc + rankings_desc
def _validate_problem_configuration(self, problem_configuration=None):
if self.problem_type in [ProblemTypes.TIME_SERIES_REGRESSION]:
required_parameters = {'gap', 'max_delay'}
if not problem_configuration or not all(p in problem_configuration for p in required_parameters):
raise ValueError("user_parameters must be a dict containing values for at least the gap and max_delay "
f"parameters. Received {problem_configuration}.")
return problem_configuration or {}
def _handle_keyboard_interrupt(self):
"""Presents a prompt to the user asking if they want to stop the search.
Returns:
bool: If True, search should terminate early
"""
leading_char = "\n"
start_of_loop = time.time()
while True:
choice = input(leading_char + "Do you really want to exit search (y/n)? ").strip().lower()
if choice == "y":
logger.info("Exiting AutoMLSearch.")
return True
elif choice == "n":
# So that the time in this loop does not count towards the time budget (if set)
time_in_loop = time.time() - start_of_loop
self._start += time_in_loop
return False
else:
leading_char = ""
def search(self, show_iteration_plot=True):
"""Find the best pipeline for the data set.
Arguments:
feature_types (list, optional): list of feature types, either numerical or categorical.
Categorical features will automatically be encoded
show_iteration_plot (boolean, True): Shows an iteration vs. score plot in Jupyter notebook.
Disabled by default in non-Jupyter enviroments.
"""
if self._searched:
logger.info("AutoMLSearch.search() has already been run and will not run again on the same instance. Re-initialize AutoMLSearch to search again.")
return
# don't show iteration plot outside of a jupyter notebook
if show_iteration_plot:
try:
get_ipython
except NameError:
show_iteration_plot = False
log_title(logger, "Beginning pipeline search")
logger.info("Optimizing for %s. " % self.objective.name)
logger.info("{} score is better.\n".format('Greater' if self.objective.greater_is_better else 'Lower'))
logger.info(f"Using {self._engine.__class__.__name__} to train and score pipelines.")
if self.max_batches is not None:
logger.info(f"Searching up to {self.max_batches} batches for a total of {self.max_iterations} pipelines. ")
elif self.max_iterations is not None:
logger.info("Searching up to %s pipelines. " % self.max_iterations)
if self.max_time is not None:
logger.info("Will stop searching for new pipelines after %d seconds.\n" % self.max_time)
logger.info("Allowed model families: %s\n" % ", ".join([model.value for model in self.allowed_model_families]))
self.search_iteration_plot = None
if self.plot:
self.search_iteration_plot = self.plot.search_iteration_plot(interactive_plot=show_iteration_plot)
self._start = time.time()
try:
self._add_baseline_pipelines()
except KeyboardInterrupt:
if self._handle_keyboard_interrupt():
self._interrupted = True
current_batch_pipelines = []
current_batch_pipeline_scores = []
new_pipeline_ids = []
loop_interrupted = False
while self._should_continue():
try:
if not loop_interrupted:
current_batch_pipelines = self._automl_algorithm.next_batch()
except StopIteration:
logger.info('AutoML Algorithm out of recommendations, ending')
break
try:
new_pipeline_ids = self._engine.evaluate_batch(current_batch_pipelines)
loop_interrupted = False
except KeyboardInterrupt:
loop_interrupted = True
if self._handle_keyboard_interrupt():
break
full_rankings = self.full_rankings
current_batch_idx = full_rankings['id'].isin(new_pipeline_ids)
current_batch_pipeline_scores = full_rankings[current_batch_idx]['score']
if len(current_batch_pipeline_scores) and current_batch_pipeline_scores.isna().all():
raise AutoMLSearchException(f"All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}.")
self.search_duration = time.time() - self._start
elapsed_time = time_elapsed(self._start)
desc = f"\nSearch finished after {elapsed_time}"
desc = desc.ljust(self._MAX_NAME_LEN)
logger.info(desc)
self._find_best_pipeline()
if self._best_pipeline is not None:
best_pipeline = self.rankings.iloc[0]
best_pipeline_name = best_pipeline["pipeline_name"]
logger.info(f"Best pipeline: {best_pipeline_name}")
logger.info(f"Best pipeline {self.objective.name}: {best_pipeline['score']:3f}")
self._searched = True
def _find_best_pipeline(self):
"""Finds the best pipeline in the rankings
If self._best_pipeline already exists, check to make sure it is different from the current best pipeline before training and thresholding"""
if len(self.rankings) == 0:
return
best_pipeline = self.rankings.iloc[0]
if not (self._best_pipeline and self._best_pipeline == self.get_pipeline(best_pipeline['id'])):
best_pipeline = self.get_pipeline(best_pipeline['id'])
if self._train_best_pipeline:
if best_pipeline.model_family == ModelFamily.ENSEMBLE:
X_train, y_train = self.X_train.iloc[self.ensembling_indices], self.y_train.iloc[self.ensembling_indices]
else:
X_train = self.X_train
y_train = self.y_train
if hasattr(self.data_splitter, "transform_sample"):
train_indices = self.data_splitter.transform_sample(X_train, y_train)
X_train = X_train.iloc[train_indices]
y_train = y_train.iloc[train_indices]
best_pipeline = self._engine.train_pipeline(best_pipeline, X_train, y_train,
self.optimize_thresholds, self.objective)
self._best_pipeline = best_pipeline
def _num_pipelines(self):
"""Return the number of pipeline evaluations which have been made
Returns:
int: the number of pipeline evaluations made in the search
"""
return len(self._results['pipeline_results'])
def _should_continue(self):
"""Given the original stopping criterion and current state, should the search continue?
Returns:
bool: True if yes, False if no.
"""
if self._interrupted:
return False
# for add_to_rankings
if self._searched:
return True
# Run at least one pipeline for every search
num_pipelines = self._num_pipelines()
if num_pipelines == 0:
return True
# check max_time and max_iterations
elapsed = time.time() - self._start
if self.max_time and elapsed >= self.max_time:
return False
elif self.max_iterations and num_pipelines >= self.max_iterations:
return False
# check for early stopping
if self.patience is None or self.tolerance is None:
return True
first_id = self._results['search_order'][0]
best_score = self._results['pipeline_results'][first_id]['score']
num_without_improvement = 0
for id in self._results['search_order'][1:]:
curr_score = self._results['pipeline_results'][id]['score']
significant_change = abs((curr_score - best_score) / best_score) > self.tolerance
score_improved = curr_score > best_score if self.objective.greater_is_better else curr_score < best_score
if score_improved and significant_change:
best_score = curr_score
num_without_improvement = 0
else:
num_without_improvement += 1
if num_without_improvement >= self.patience:
logger.info("\n\n{} iterations without improvement. Stopping search early...".format(self.patience))
return False
return True
def _validate_problem_type(self):
for obj in self.additional_objectives:
if not obj.is_defined_for_problem_type(self.problem_type):
raise ValueError("Additional objective {} is not compatible with a {} problem.".format(obj.name, self.problem_type.value))
for pipeline in self.allowed_pipelines or []:
if pipeline.problem_type != self.problem_type:
raise ValueError("Given pipeline {} is not compatible with problem_type {}.".format(pipeline.name, self.problem_type.value))
def _add_baseline_pipelines(self):
"""Fits a baseline pipeline to the data.
This is the first pipeline fit during search.
"""
if self.problem_type == ProblemTypes.BINARY:
baseline = ModeBaselineBinaryPipeline(parameters={})
elif self.problem_type == ProblemTypes.MULTICLASS:
baseline = ModeBaselineMulticlassPipeline(parameters={})
elif self.problem_type == ProblemTypes.REGRESSION:
baseline = MeanBaselineRegressionPipeline(parameters={})
else:
pipeline_class = {ProblemTypes.TIME_SERIES_REGRESSION: TimeSeriesBaselineRegressionPipeline,
ProblemTypes.TIME_SERIES_MULTICLASS: TimeSeriesBaselineMulticlassPipeline,
ProblemTypes.TIME_SERIES_BINARY: TimeSeriesBaselineBinaryPipeline}[self.problem_type]
gap = self.problem_configuration['gap']
max_delay = self.problem_configuration['max_delay']
baseline = pipeline_class(parameters={"pipeline": {"gap": gap, "max_delay": max_delay},
"Time Series Baseline Estimator": {"gap": gap, "max_delay": max_delay}})
self._engine.evaluate_batch([baseline])
@staticmethod
def _get_mean_cv_scores_for_all_objectives(cv_data, objective_name_to_class):
scores = defaultdict(int)
n_folds = len(cv_data)
for fold_data in cv_data:
for field, value in fold_data['all_objective_scores'].items():
# The 'all_objective_scores' field contains scores for all objectives
# but also fields like "# Training" and "# Testing", so we want to exclude them since
# they are not scores
if field in objective_name_to_class:
scores[field] += value
return {objective: float(score) / n_folds for objective, score in scores.items()}
def _post_evaluation_callback(self, pipeline, evaluation_results):
training_time = evaluation_results['training_time']
cv_data = evaluation_results['cv_data']
cv_scores = evaluation_results['cv_scores']
is_baseline = pipeline.model_family == ModelFamily.BASELINE
cv_score = cv_scores.mean()
percent_better_than_baseline = {}
mean_cv_all_objectives = self._get_mean_cv_scores_for_all_objectives(cv_data, self.objective_name_to_class)
if is_baseline:
self._baseline_cv_scores = mean_cv_all_objectives
for obj_name in mean_cv_all_objectives:
objective_class = self.objective_name_to_class[obj_name]
# In the event add_to_rankings is called before search _baseline_cv_scores will be empty so we will return
# nan for the base score.
percent_better = objective_class.calculate_percent_difference(mean_cv_all_objectives[obj_name],
self._baseline_cv_scores.get(obj_name, np.nan))
percent_better_than_baseline[obj_name] = percent_better
high_variance_cv = self._check_for_high_variance(pipeline, cv_scores)
pipeline_id = len(self._results['pipeline_results'])
self._results['pipeline_results'][pipeline_id] = {
"id": pipeline_id,
"pipeline_name": pipeline.name,
"pipeline_class": type(pipeline),
"pipeline_summary": pipeline.summary,
"parameters": pipeline.parameters,
"score": cv_score,
"high_variance_cv": high_variance_cv,
"training_time": training_time,
"cv_data": cv_data,
"percent_better_than_baseline_all_objectives": percent_better_than_baseline,
"percent_better_than_baseline": percent_better_than_baseline[self.objective.name],
"validation_score": cv_scores[0]
}
if pipeline.model_family == ModelFamily.ENSEMBLE:
input_pipeline_ids = [self._automl_algorithm._best_pipeline_info[model_family]["id"] for model_family in self._automl_algorithm._best_pipeline_info]
self._results['pipeline_results'][pipeline_id]["input_pipeline_ids"] = input_pipeline_ids
self._results['search_order'].append(pipeline_id)
if not is_baseline:
score_to_minimize = -cv_score if self.objective.greater_is_better else cv_score
try:
self._automl_algorithm.add_result(score_to_minimize, pipeline, self._results['pipeline_results'][pipeline_id])
except PipelineNotFoundError:
pass
if self.search_iteration_plot:
self.search_iteration_plot.update()
if self.add_result_callback:
self.add_result_callback(self._results['pipeline_results'][pipeline_id], pipeline, self)
return pipeline_id
def _check_for_high_variance(self, pipeline, cv_scores, threshold=0.2):
"""Checks cross-validation scores and logs a warning if variance is higher than specified threshhold."""
pipeline_name = pipeline.name
high_variance_cv = bool(abs(cv_scores.std() / cv_scores.mean()) > threshold)
if high_variance_cv:
logger.warning(f"High coefficient of variation (cv >= {threshold}) within cross validation scores. {pipeline_name} may not perform as estimated on unseen data.")
return high_variance_cv
def get_pipeline(self, pipeline_id):
"""Given the ID of a pipeline training result, returns an untrained instance of the specified pipeline
initialized with the parameters used to train that pipeline during automl search.
Arguments:
pipeline_id (int): pipeline to retrieve
Returns:
PipelineBase: untrained pipeline instance associated with the provided ID
"""
pipeline_results = self.results['pipeline_results'].get(pipeline_id)
if pipeline_results is None:
raise PipelineNotFoundError("Pipeline not found in automl results")
pipeline_class = pipeline_results.get('pipeline_class')
parameters = pipeline_results.get('parameters')
if pipeline_class is None or parameters is None:
raise PipelineNotFoundError("Pipeline class or parameters not found in automl results")
return pipeline_class(parameters, random_seed=self.random_seed)
def describe_pipeline(self, pipeline_id, return_dict=False):
"""Describe a pipeline
Arguments:
pipeline_id (int): pipeline to describe
return_dict (bool): If True, return dictionary of information
about pipeline. Defaults to False.
Returns:
Description of specified pipeline. Includes information such as
type of pipeline components, problem, training time, cross validation, etc.
"""
if pipeline_id not in self._results['pipeline_results']:
raise PipelineNotFoundError("Pipeline not found")
pipeline = self.get_pipeline(pipeline_id)
pipeline_results = self._results['pipeline_results'][pipeline_id]
pipeline.describe()
if pipeline.model_family == ModelFamily.ENSEMBLE:
logger.info("Input for ensembler are pipelines with IDs: " + str(pipeline_results['input_pipeline_ids']))
log_subtitle(logger, "Training")
logger.info("Training for {} problems.".format(pipeline.problem_type))
if self.optimize_thresholds and self.objective.is_defined_for_problem_type(ProblemTypes.BINARY) and self.objective.can_optimize_threshold:
logger.info("Objective to optimize binary classification pipeline thresholds for: {}".format(self.objective))
logger.info("Total training time (including CV): %.1f seconds" % pipeline_results["training_time"])
log_subtitle(logger, "Cross Validation", underline="-")
all_objective_scores = [fold["all_objective_scores"] for fold in pipeline_results["cv_data"]]
all_objective_scores = pd.DataFrame(all_objective_scores)
for c in all_objective_scores:
if c in ["# Training", "# Validation"]:
all_objective_scores[c] = all_objective_scores[c].astype("object")
continue
mean = all_objective_scores[c].mean(axis=0)
std = all_objective_scores[c].std(axis=0)
all_objective_scores.loc["mean", c] = mean
all_objective_scores.loc["std", c] = std
all_objective_scores.loc["coef of var", c] = std / mean if abs(mean) > 0 else np.inf
all_objective_scores = all_objective_scores.fillna("-")
with pd.option_context('display.float_format', '{:.3f}'.format, 'expand_frame_repr', False):
logger.info(all_objective_scores)
if return_dict:
return pipeline_results
def add_to_rankings(self, pipeline):
"""Fits and evaluates a given pipeline then adds the results to the automl rankings with the requirement that automl search has been run.
Arguments:
pipeline (PipelineBase): pipeline to train and evaluate.
"""
pipeline_rows = self.full_rankings[self.full_rankings['pipeline_name'] == pipeline.name]
for parameter in pipeline_rows['parameters']:
if pipeline.parameters == parameter:
return
self._engine.evaluate_batch([pipeline])
self._find_best_pipeline()
@property
def results(self):
"""Class that allows access to a copy of the results from `automl_search`.
Returns: dict containing `pipeline_results`: a dict with results from each pipeline,
and `search_order`: a list describing the order the pipelines were searched.
"""
return copy.deepcopy(self._results)
@property
def rankings(self):
"""Returns a pandas.DataFrame with scoring results from the highest-scoring set of parameters used with each pipeline."""
return self.full_rankings.drop_duplicates(subset="pipeline_name", keep="first")
@property
def full_rankings(self):
"""Returns a pandas.DataFrame with scoring results from all pipelines searched"""
ascending = True
if self.objective.greater_is_better:
ascending = False
full_rankings_cols = ["id", "pipeline_name", "score", "validation_score",
"percent_better_than_baseline", "high_variance_cv", "parameters"]
if not self._results['pipeline_results']:
return pd.DataFrame(columns=full_rankings_cols)
rankings_df = pd.DataFrame(self._results['pipeline_results'].values())
rankings_df = rankings_df[full_rankings_cols]
rankings_df.sort_values("score", ascending=ascending, inplace=True)
rankings_df.reset_index(drop=True, inplace=True)
return rankings_df
@property
def best_pipeline(self):
"""Returns a trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
Returns:
PipelineBase: A trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
"""
if not self._best_pipeline:
raise PipelineNotFoundError("automl search must be run before selecting `best_pipeline`.")
return self._best_pipeline
def save(self, file_path, pickle_protocol=cloudpickle.DEFAULT_PROTOCOL):
"""Saves AutoML object at file path
Arguments:
file_path (str): location to save file
pickle_protocol (int): the pickle data stream format.
Returns:
None
"""
with open(file_path, 'wb') as f:
cloudpickle.dump(self, f, protocol=pickle_protocol)
@staticmethod
def load(file_path):
"""Loads AutoML object at file path
Arguments:
file_path (str): location to find file to load
Returns:
AutoSearchBase object
"""
with open(file_path, 'rb') as f:
return cloudpickle.load(f)
def train_pipelines(self, pipelines):
"""Train a list of pipelines on the training data.
This can be helpful for training pipelines once the search is complete.
Arguments:
pipelines (list(PipelineBase)): List of pipelines to train.
Returns:
Dict[str, PipelineBase]: Dictionary keyed by pipeline name that maps to the fitted pipeline.
Note that the any pipelines that error out during training will not be included in the dictionary
but the exception and stacktrace will be displayed in the log.
"""
return self._engine.train_batch(pipelines)
def score_pipelines(self, pipelines, X_holdout, y_holdout, objectives):
"""Score a list of pipelines on the given holdout data.
Arguments:
pipelines (list(PipelineBase)): List of pipelines to train.
X_holdout (ww.DataTable, pd.DataFrame): Holdout features.
y_holdout (ww.DataTable, pd.DataFrame): Holdout targets for scoring.
objectives (list(str), list(ObjectiveBase)): Objectives used for scoring.
Returns:
Dict[str, Dict[str, float]]: Dictionary keyed by pipeline name that maps to a dictionary of scores.
Note that the any pipelines that error out during scoring will not be included in the dictionary
but the exception and stacktrace will be displayed in the log.
"""
return self._engine.score_batch(pipelines, X_holdout, y_holdout, objectives)
| [((1498, 1518), 'evalml.utils.logger.get_logger', 'get_logger', (['__file__'], {}), '(__file__)\n', (1508, 1518), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((9426, 9473), 'evalml.objectives.get_objective', 'get_objective', (['objective'], {'return_instance': '(False)'}), '(objective, return_instance=False)\n', (9439, 9473), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((13288, 13316), 'evalml.utils.infer_feature_types', 'infer_feature_types', (['X_train'], {}), '(X_train)\n', (13307, 13316), False, 'from evalml.utils import convert_to_seconds, infer_feature_types\n'), ((13340, 13368), 'evalml.utils.infer_feature_types', 'infer_feature_types', (['y_train'], {}), '(y_train)\n', (13359, 13368), False, 'from evalml.utils import convert_to_seconds, infer_feature_types\n'), ((13441, 13600), 'evalml.automl.utils.make_data_splitter', 'make_data_splitter', (['self.X_train', 'self.y_train', 'self.problem_type', 'self.problem_configuration'], {'n_splits': '(3)', 'shuffle': '(True)', 'random_seed': 'self.random_seed'}), '(self.X_train, self.y_train, self.problem_type, self.\n problem_configuration, n_splits=3, shuffle=True, random_seed=self.\n random_seed)\n', (13459, 13600), False, 'from evalml.automl.utils import check_all_pipeline_names_unique, get_default_primary_search_objective, make_data_splitter\n'), ((14514, 14569), 'evalml.automl.utils.check_all_pipeline_names_unique', 'check_all_pipeline_names_unique', (['self.allowed_pipelines'], {}), '(self.allowed_pipelines)\n', (14545, 14569), False, 'from evalml.automl.utils import check_all_pipeline_names_unique, get_default_primary_search_objective, make_data_splitter\n'), ((17239, 17488), 'evalml.automl.engine.SequentialEngine', 'SequentialEngine', (['self.X_train', 'self.y_train', 'self.ensembling_indices', 'self'], {'should_continue_callback': 'self._should_continue', 'pre_evaluation_callback': 'self._pre_evaluation_callback', 'post_evaluation_callback': 'self._post_evaluation_callback'}), '(self.X_train, self.y_train, self.ensembling_indices, self,\n should_continue_callback=self._should_continue, pre_evaluation_callback\n =self._pre_evaluation_callback, post_evaluation_callback=self.\n _post_evaluation_callback)\n', (17255, 17488), False, 'from evalml.automl.engine import SequentialEngine\n'), ((18259, 18600), 'evalml.automl.automl_algorithm.IterativeAlgorithm', 'IterativeAlgorithm', ([], {'max_iterations': 'self.max_iterations', 'allowed_pipelines': 'self.allowed_pipelines', 'tuner_class': 'self.tuner_class', 'random_seed': 'self.random_seed', 'n_jobs': 'self.n_jobs', 'number_features': 'self.X_train.shape[1]', 'pipelines_per_batch': 'self._pipelines_per_batch', 'ensembling': 'run_ensembling', 'pipeline_params': 'pipeline_params'}), '(max_iterations=self.max_iterations, allowed_pipelines=\n self.allowed_pipelines, tuner_class=self.tuner_class, random_seed=self.\n random_seed, n_jobs=self.n_jobs, number_features=self.X_train.shape[1],\n pipelines_per_batch=self._pipelines_per_batch, ensembling=\n run_ensembling, pipeline_params=pipeline_params)\n', (18277, 18600), False, 'from evalml.automl.automl_algorithm import IterativeAlgorithm\n'), ((19640, 19665), 'evalml.objectives.get_non_core_objectives', 'get_non_core_objectives', ([], {}), '()\n', (19663, 19665), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((22545, 22556), 'time.time', 'time.time', ([], {}), '()\n', (22554, 22556), False, 'import time\n'), ((24004, 24050), 'evalml.utils.logger.log_title', 'log_title', (['logger', '"""Beginning pipeline search"""'], {}), "(logger, 'Beginning pipeline search')\n", (24013, 24050), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((25067, 25078), 'time.time', 'time.time', ([], {}), '()\n', (25076, 25078), False, 'import time\n'), ((26570, 26595), 'evalml.utils.logger.time_elapsed', 'time_elapsed', (['self._start'], {}), '(self._start)\n', (26582, 26595), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((32472, 32488), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (32483, 32488), False, 'from collections import defaultdict\n'), ((38449, 38481), 'evalml.utils.logger.log_subtitle', 'log_subtitle', (['logger', '"""Training"""'], {}), "(logger, 'Training')\n", (38461, 38481), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((38948, 39003), 'evalml.utils.logger.log_subtitle', 'log_subtitle', (['logger', '"""Cross Validation"""'], {'underline': '"""-"""'}), "(logger, 'Cross Validation', underline='-')\n", (38960, 39003), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((39138, 39172), 'pandas.DataFrame', 'pd.DataFrame', (['all_objective_scores'], {}), '(all_objective_scores)\n', (39150, 39172), True, 'import pandas as pd\n'), ((40904, 40932), 'copy.deepcopy', 'copy.deepcopy', (['self._results'], {}), '(self._results)\n', (40917, 40932), False, 'import copy\n'), ((8755, 8789), 'evalml.problem_types.handle_problem_types', 'handle_problem_types', (['problem_type'], {}), '(problem_type)\n', (8775, 8789), False, 'from evalml.problem_types import ProblemTypes, handle_problem_types\n'), ((9344, 9405), 'evalml.automl.utils.get_default_primary_search_objective', 'get_default_primary_search_objective', (['self.problem_type.value'], {}), '(self.problem_type.value)\n', (9380, 9405), False, 'from evalml.automl.utils import check_all_pipeline_names_unique, get_default_primary_search_objective, make_data_splitter\n'), ((9997, 10035), 'evalml.objectives.get_core_objectives', 'get_core_objectives', (['self.problem_type'], {}), '(self.problem_type)\n', (10016, 10035), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((11491, 11519), 'evalml.utils.convert_to_seconds', 'convert_to_seconds', (['max_time'], {}), '(max_time)\n', (11509, 11519), False, 'from evalml.utils import convert_to_seconds, infer_feature_types\n'), ((14032, 14094), 'evalml.pipelines.components.utils.get_estimators', 'get_estimators', (['self.problem_type', 'self.allowed_model_families'], {}), '(self.problem_type, self.allowed_model_families)\n', (14046, 14094), False, 'from evalml.pipelines.components.utils import get_estimators\n'), ((17001, 17135), 'evalml.preprocessing.split_data', 'split_data', (['X_shape', 'self.y_train'], {'problem_type': 'self.problem_type', 'test_size': '_ensembling_split_size', 'random_seed': 'self.random_seed'}), '(X_shape, self.y_train, problem_type=self.problem_type, test_size\n =_ensembling_split_size, random_seed=self.random_seed)\n', (17011, 17135), False, 'from evalml.preprocessing import split_data\n'), ((26521, 26532), 'time.time', 'time.time', ([], {}), '()\n', (26530, 26532), False, 'import time\n'), ((29296, 29307), 'time.time', 'time.time', ([], {}), '()\n', (29305, 29307), False, 'import time\n'), ((31329, 31370), 'evalml.pipelines.ModeBaselineBinaryPipeline', 'ModeBaselineBinaryPipeline', ([], {'parameters': '{}'}), '(parameters={})\n', (31355, 31370), False, 'from evalml.pipelines import MeanBaselineRegressionPipeline, ModeBaselineBinaryPipeline, ModeBaselineMulticlassPipeline, TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline, TimeSeriesBaselineRegressionPipeline\n'), ((37083, 37144), 'evalml.exceptions.PipelineNotFoundError', 'PipelineNotFoundError', (['"""Pipeline not found in automl results"""'], {}), "('Pipeline not found in automl results')\n", (37104, 37144), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((37340, 37426), 'evalml.exceptions.PipelineNotFoundError', 'PipelineNotFoundError', (['"""Pipeline class or parameters not found in automl results"""'], {}), "(\n 'Pipeline class or parameters not found in automl results')\n", (37361, 37426), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((38065, 38108), 'evalml.exceptions.PipelineNotFoundError', 'PipelineNotFoundError', (['"""Pipeline not found"""'], {}), "('Pipeline not found')\n", (38086, 38108), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((39768, 39858), 'pandas.option_context', 'pd.option_context', (['"""display.float_format"""', '"""{:.3f}""".format', '"""expand_frame_repr"""', '(False)'], {}), "('display.float_format', '{:.3f}'.format,\n 'expand_frame_repr', False)\n", (39785, 39858), True, 'import pandas as pd\n'), ((41672, 41712), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'full_rankings_cols'}), '(columns=full_rankings_cols)\n', (41684, 41712), True, 'import pandas as pd\n'), ((42510, 42599), 'evalml.exceptions.PipelineNotFoundError', 'PipelineNotFoundError', (['"""automl search must be run before selecting `best_pipeline`."""'], {}), "(\n 'automl search must be run before selecting `best_pipeline`.')\n", (42531, 42599), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((42990, 43041), 'cloudpickle.dump', 'cloudpickle.dump', (['self', 'f'], {'protocol': 'pickle_protocol'}), '(self, f, protocol=pickle_protocol)\n', (43006, 43041), False, 'import cloudpickle\n'), ((43333, 43352), 'cloudpickle.load', 'cloudpickle.load', (['f'], {}), '(f)\n', (43349, 43352), False, 'import cloudpickle\n'), ((10434, 10450), 'evalml.objectives.get_objective', 'get_objective', (['o'], {}), '(o)\n', (10447, 10450), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((14243, 14367), 'evalml.pipelines.utils.make_pipeline', 'make_pipeline', (['self.X_train', 'self.y_train', 'estimator', 'self.problem_type'], {'custom_hyperparameters': 'self.pipeline_parameters'}), '(self.X_train, self.y_train, estimator, self.problem_type,\n custom_hyperparameters=self.pipeline_parameters)\n', (14256, 14367), False, 'from evalml.pipelines.utils import make_pipeline\n'), ((16925, 16957), 'numpy.arange', 'np.arange', (['self.X_train.shape[0]'], {}), '(self.X_train.shape[0])\n', (16934, 16957), True, 'import numpy as np\n'), ((26351, 26498), 'evalml.exceptions.AutoMLSearchException', 'AutoMLSearchException', (['f"""All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}."""'], {}), "(\n f'All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}.'\n )\n", (26372, 26498), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((31453, 31498), 'evalml.pipelines.ModeBaselineMulticlassPipeline', 'ModeBaselineMulticlassPipeline', ([], {'parameters': '{}'}), '(parameters={})\n', (31483, 31498), False, 'from evalml.pipelines import MeanBaselineRegressionPipeline, ModeBaselineBinaryPipeline, ModeBaselineMulticlassPipeline, TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline, TimeSeriesBaselineRegressionPipeline\n'), ((20439, 20478), 'evalml.problem_types.handle_problem_types', 'handle_problem_types', (['self.problem_type'], {}), '(self.problem_type)\n', (20459, 20478), False, 'from evalml.problem_types import ProblemTypes, handle_problem_types\n'), ((20437, 20466), 'evalml.objectives.get_objective', 'get_objective', (['self.objective'], {}), '(self.objective)\n', (20450, 20466), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((31581, 31626), 'evalml.pipelines.MeanBaselineRegressionPipeline', 'MeanBaselineRegressionPipeline', ([], {'parameters': '{}'}), '(parameters={})\n', (31611, 31626), False, 'from evalml.pipelines import MeanBaselineRegressionPipeline, ModeBaselineBinaryPipeline, ModeBaselineMulticlassPipeline, TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline, TimeSeriesBaselineRegressionPipeline\n'), ((22950, 22961), 'time.time', 'time.time', ([], {}), '()\n', (22959, 22961), False, 'import time\n')] |
deepsourcelabs/django-graphql-social-auth | graphql_social_auth/mutations.py | a0cc7715144dc289ccb4d2430e7c3b94fc1dffba | import graphene
from graphql_jwt.decorators import setup_jwt_cookie
from . import mixins, types
from .decorators import social_auth
class SocialAuthMutation(mixins.SocialAuthMixin, graphene.Mutation):
social = graphene.Field(types.SocialType)
class Meta:
abstract = True
class Arguments:
provider = graphene.String(required=True)
code = graphene.String(required=True)
@classmethod
@setup_jwt_cookie
@social_auth
def mutate(cls, root, info, social, **kwargs):
return cls.resolve(root, info, social, **kwargs)
class SocialAuth(mixins.ResolveMixin, SocialAuthMutation):
"""Social Auth Mutation"""
class SocialAuthJWT(mixins.JSONWebTokenMixin, SocialAuthMutation):
"""Social Auth for JSON Web Token (JWT)"""
| [((217, 249), 'graphene.Field', 'graphene.Field', (['types.SocialType'], {}), '(types.SocialType)\n', (231, 249), False, 'import graphene\n'), ((332, 362), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (347, 362), False, 'import graphene\n'), ((378, 408), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (393, 408), False, 'import graphene\n')] |
Juan0001/yellowbrick-docs-zh | yellowbrick/regressor/base.py | 36275d9704fc2a946c5bec5f802106bb5281efd1 | # yellowbrick.regressor.base
# Base classes for regressor Visualizers.
#
# Author: Rebecca Bilbro <[email protected]>
# Author: Benjamin Bengfort <[email protected]>
# Created: Fri Jun 03 10:30:36 2016 -0700
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: base.py [7d3f5e6] [email protected] $
"""
Base classes for regressor Visualizers.
"""
##########################################################################
## Imports
##########################################################################
from ..utils import isregressor
from ..base import ScoreVisualizer
from ..exceptions import YellowbrickTypeError
## Packages for export
__all__ = [
"RegressionScoreVisualizer",
]
##########################################################################
## Regression Visualization Base Object
##########################################################################
class RegressionScoreVisualizer(ScoreVisualizer):
"""
Base class for all ScoreVisualizers that evaluate a regression estimator.
The primary functionality of this class is to perform a check to ensure
the passed in estimator is a regressor, otherwise it raises a
``YellowbrickTypeError``.
"""
def __init__(self, model, ax=None, **kwargs):
if not isregressor(model):
raise YellowbrickTypeError(
"This estimator is not a regressor; try a classifier or "
"clustering score visualizer instead!"
)
super(RegressionScoreVisualizer, self).__init__(model, ax=ax, **kwargs)
| [] |
falkamelung/isce2 | contrib/stack/stripmapStack/crossmul.py | edea69d4b6216f4ac729eba78f12547807a2751a | #!/usr/bin/env python3
import os
import argparse
import logging
import isce
import isceobj
from components.stdproc.stdproc import crossmul
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
def createParser():
'''
Command Line Parser.
'''
parser = argparse.ArgumentParser( description='Generate offset field between two Sentinel swaths')
parser.add_argument('-m', '--master', type=str, dest='master', required=True,
help='Master image')
parser.add_argument('-s', '--slave', type=str, dest='slave', required=True,
help='Slave image')
parser.add_argument('-o', '--outdir', type=str, dest='prefix', default='crossmul',
help='Prefix of output int and amp files')
parser.add_argument('-a', '--alks', type=int, dest='azlooks', default=1,
help='Azimuth looks')
parser.add_argument('-r', '--rlks', type=int, dest='rglooks', default=1,
help='Range looks')
return parser
def cmdLineParse(iargs = None):
parser = createParser()
return parser.parse_args(args=iargs)
def run(imageSlc1, imageSlc2, resampName, azLooks, rgLooks):
objSlc1 = isceobj.createSlcImage()
#right now imageSlc1 and 2 are just text files, need to open them as image
IU.copyAttributes(imageSlc1, objSlc1)
objSlc1.setAccessMode('read')
objSlc1.createImage()
objSlc2 = isceobj.createSlcImage()
IU.copyAttributes(imageSlc2, objSlc2)
objSlc2.setAccessMode('read')
objSlc2.createImage()
slcWidth = imageSlc1.getWidth()
intWidth = int(slcWidth / rgLooks)
lines = min(imageSlc1.getLength(), imageSlc2.getLength())
resampAmp = resampName + '.amp'
resampInt = resampName + '.int'
objInt = isceobj.createIntImage()
objInt.setFilename(resampInt)
objInt.setWidth(intWidth)
imageInt = isceobj.createIntImage()
IU.copyAttributes(objInt, imageInt)
objInt.setAccessMode('write')
objInt.createImage()
objAmp = isceobj.createAmpImage()
objAmp.setFilename(resampAmp)
objAmp.setWidth(intWidth)
imageAmp = isceobj.createAmpImage()
IU.copyAttributes(objAmp, imageAmp)
objAmp.setAccessMode('write')
objAmp.createImage()
objCrossmul = crossmul.createcrossmul()
objCrossmul.width = slcWidth
objCrossmul.length = lines
objCrossmul.LooksDown = azLooks
objCrossmul.LooksAcross = rgLooks
objCrossmul.crossmul(objSlc1, objSlc2, objInt, objAmp)
for obj in [objInt, objAmp, objSlc1, objSlc2]:
obj.finalizeImage()
return imageInt, imageAmp
def main(iargs=None):
inps = cmdLineParse(iargs)
img1 = isceobj.createImage()
img1.load(inps.master + '.xml')
img2 = isceobj.createImage()
img2.load(inps.slave + '.xml')
os.makedirs(os.path.dirname(inps.prefix), exist_ok=True)
run(img1, img2, inps.prefix, inps.azlooks, inps.rglooks)
if __name__ == '__main__':
main()
'''
Main driver.
'''
| [((275, 368), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate offset field between two Sentinel swaths"""'}), "(description=\n 'Generate offset field between two Sentinel swaths')\n", (298, 368), False, 'import argparse\n'), ((1151, 1175), 'isceobj.createSlcImage', 'isceobj.createSlcImage', ([], {}), '()\n', (1173, 1175), False, 'import isceobj\n'), ((1260, 1297), 'iscesys.ImageUtil.ImageUtil.ImageUtil.copyAttributes', 'IU.copyAttributes', (['imageSlc1', 'objSlc1'], {}), '(imageSlc1, objSlc1)\n', (1277, 1297), True, 'from iscesys.ImageUtil.ImageUtil import ImageUtil as IU\n'), ((1373, 1397), 'isceobj.createSlcImage', 'isceobj.createSlcImage', ([], {}), '()\n', (1395, 1397), False, 'import isceobj\n'), ((1402, 1439), 'iscesys.ImageUtil.ImageUtil.ImageUtil.copyAttributes', 'IU.copyAttributes', (['imageSlc2', 'objSlc2'], {}), '(imageSlc2, objSlc2)\n', (1419, 1439), True, 'from iscesys.ImageUtil.ImageUtil import ImageUtil as IU\n'), ((1726, 1750), 'isceobj.createIntImage', 'isceobj.createIntImage', ([], {}), '()\n', (1748, 1750), False, 'import isceobj\n'), ((1830, 1854), 'isceobj.createIntImage', 'isceobj.createIntImage', ([], {}), '()\n', (1852, 1854), False, 'import isceobj\n'), ((1859, 1894), 'iscesys.ImageUtil.ImageUtil.ImageUtil.copyAttributes', 'IU.copyAttributes', (['objInt', 'imageInt'], {}), '(objInt, imageInt)\n', (1876, 1894), True, 'from iscesys.ImageUtil.ImageUtil import ImageUtil as IU\n'), ((1968, 1992), 'isceobj.createAmpImage', 'isceobj.createAmpImage', ([], {}), '()\n', (1990, 1992), False, 'import isceobj\n'), ((2072, 2096), 'isceobj.createAmpImage', 'isceobj.createAmpImage', ([], {}), '()\n', (2094, 2096), False, 'import isceobj\n'), ((2101, 2136), 'iscesys.ImageUtil.ImageUtil.ImageUtil.copyAttributes', 'IU.copyAttributes', (['objAmp', 'imageAmp'], {}), '(objAmp, imageAmp)\n', (2118, 2136), True, 'from iscesys.ImageUtil.ImageUtil import ImageUtil as IU\n'), ((2215, 2240), 'components.stdproc.stdproc.crossmul.createcrossmul', 'crossmul.createcrossmul', ([], {}), '()\n', (2238, 2240), False, 'from components.stdproc.stdproc import crossmul\n'), ((2617, 2638), 'isceobj.createImage', 'isceobj.createImage', ([], {}), '()\n', (2636, 2638), False, 'import isceobj\n'), ((2687, 2708), 'isceobj.createImage', 'isceobj.createImage', ([], {}), '()\n', (2706, 2708), False, 'import isceobj\n'), ((2761, 2789), 'os.path.dirname', 'os.path.dirname', (['inps.prefix'], {}), '(inps.prefix)\n', (2776, 2789), False, 'import os\n')] |
sunshot/LeetCode | 27. Remove Element/solution2.py | 8f6503201831055f1d49ed3abb25be44a13ec317 | from typing import List
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
if not nums:
return 0
curr = 0
n = len(nums)
while curr < n:
if nums[curr] == val:
nums[curr] = nums[n-1]
n -= 1
else:
curr += 1
return n
if __name__== '__main__':
solution = Solution()
nums = [3,2,2,3]
val = 3
ans = solution.removeElement(nums, val)
# print(ans)
print(nums[:ans]) | [] |
Granjow/platformio-core | platformio/commands/home/run.py | 71ae579bc07b2e11fec16acda482dea04bc3a359 | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from urllib.parse import urlparse
import click
import uvicorn
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.responses import PlainTextResponse
from starlette.routing import Mount, Route, WebSocketRoute
from starlette.staticfiles import StaticFiles
from starlette.status import HTTP_403_FORBIDDEN
from platformio.commands.home.rpc.handlers.account import AccountRPC
from platformio.commands.home.rpc.handlers.app import AppRPC
from platformio.commands.home.rpc.handlers.ide import IDERPC
from platformio.commands.home.rpc.handlers.misc import MiscRPC
from platformio.commands.home.rpc.handlers.os import OSRPC
from platformio.commands.home.rpc.handlers.piocore import PIOCoreRPC
from platformio.commands.home.rpc.handlers.project import ProjectRPC
from platformio.commands.home.rpc.server import WebSocketJSONRPCServerFactory
from platformio.compat import aio_get_running_loop
from platformio.exception import PlatformioException
from platformio.package.manager.core import get_core_package_dir
from platformio.proc import force_exit
class ShutdownMiddleware:
def __init__(self, app):
self.app = app
async def __call__(self, scope, receive, send):
if scope["type"] == "http" and b"__shutdown__" in scope.get("query_string", {}):
await shutdown_server()
await self.app(scope, receive, send)
async def shutdown_server(_=None):
aio_get_running_loop().call_later(0.5, force_exit)
return PlainTextResponse("Server has been shutdown!")
async def protected_page(_):
return PlainTextResponse(
"Protected PlatformIO Home session", status_code=HTTP_403_FORBIDDEN
)
def run_server(host, port, no_open, shutdown_timeout, home_url):
contrib_dir = get_core_package_dir("contrib-piohome")
if not os.path.isdir(contrib_dir):
raise PlatformioException("Invalid path to PIO Home Contrib")
ws_rpc_factory = WebSocketJSONRPCServerFactory(shutdown_timeout)
ws_rpc_factory.addObjectHandler(AccountRPC(), namespace="account")
ws_rpc_factory.addObjectHandler(AppRPC(), namespace="app")
ws_rpc_factory.addObjectHandler(IDERPC(), namespace="ide")
ws_rpc_factory.addObjectHandler(MiscRPC(), namespace="misc")
ws_rpc_factory.addObjectHandler(OSRPC(), namespace="os")
ws_rpc_factory.addObjectHandler(PIOCoreRPC(), namespace="core")
ws_rpc_factory.addObjectHandler(ProjectRPC(), namespace="project")
path = urlparse(home_url).path
routes = [
WebSocketRoute(path + "wsrpc", ws_rpc_factory, name="wsrpc"),
Route(path + "__shutdown__", shutdown_server, methods=["POST"]),
Mount(path, StaticFiles(directory=contrib_dir, html=True), name="static"),
]
if path != "/":
routes.append(Route("/", protected_page))
uvicorn.run(
Starlette(
middleware=[Middleware(ShutdownMiddleware)],
routes=routes,
on_startup=[
lambda: click.echo(
"PIO Home has been started. Press Ctrl+C to shutdown."
),
lambda: None if no_open else click.launch(home_url),
],
),
host=host,
port=port,
log_level="warning",
)
| [((2120, 2166), 'starlette.responses.PlainTextResponse', 'PlainTextResponse', (['"""Server has been shutdown!"""'], {}), "('Server has been shutdown!')\n", (2137, 2166), False, 'from starlette.responses import PlainTextResponse\n'), ((2209, 2300), 'starlette.responses.PlainTextResponse', 'PlainTextResponse', (['"""Protected PlatformIO Home session"""'], {'status_code': 'HTTP_403_FORBIDDEN'}), "('Protected PlatformIO Home session', status_code=\n HTTP_403_FORBIDDEN)\n", (2226, 2300), False, 'from starlette.responses import PlainTextResponse\n'), ((2395, 2434), 'platformio.package.manager.core.get_core_package_dir', 'get_core_package_dir', (['"""contrib-piohome"""'], {}), "('contrib-piohome')\n", (2415, 2434), False, 'from platformio.package.manager.core import get_core_package_dir\n'), ((2566, 2613), 'platformio.commands.home.rpc.server.WebSocketJSONRPCServerFactory', 'WebSocketJSONRPCServerFactory', (['shutdown_timeout'], {}), '(shutdown_timeout)\n', (2595, 2613), False, 'from platformio.commands.home.rpc.server import WebSocketJSONRPCServerFactory\n'), ((2446, 2472), 'os.path.isdir', 'os.path.isdir', (['contrib_dir'], {}), '(contrib_dir)\n', (2459, 2472), False, 'import os\n'), ((2488, 2543), 'platformio.exception.PlatformioException', 'PlatformioException', (['"""Invalid path to PIO Home Contrib"""'], {}), "('Invalid path to PIO Home Contrib')\n", (2507, 2543), False, 'from platformio.exception import PlatformioException\n'), ((2650, 2662), 'platformio.commands.home.rpc.handlers.account.AccountRPC', 'AccountRPC', ([], {}), '()\n', (2660, 2662), False, 'from platformio.commands.home.rpc.handlers.account import AccountRPC\n'), ((2721, 2729), 'platformio.commands.home.rpc.handlers.app.AppRPC', 'AppRPC', ([], {}), '()\n', (2727, 2729), False, 'from platformio.commands.home.rpc.handlers.app import AppRPC\n'), ((2784, 2792), 'platformio.commands.home.rpc.handlers.ide.IDERPC', 'IDERPC', ([], {}), '()\n', (2790, 2792), False, 'from platformio.commands.home.rpc.handlers.ide import IDERPC\n'), ((2847, 2856), 'platformio.commands.home.rpc.handlers.misc.MiscRPC', 'MiscRPC', ([], {}), '()\n', (2854, 2856), False, 'from platformio.commands.home.rpc.handlers.misc import MiscRPC\n'), ((2912, 2919), 'platformio.commands.home.rpc.handlers.os.OSRPC', 'OSRPC', ([], {}), '()\n', (2917, 2919), False, 'from platformio.commands.home.rpc.handlers.os import OSRPC\n'), ((2973, 2985), 'platformio.commands.home.rpc.handlers.piocore.PIOCoreRPC', 'PIOCoreRPC', ([], {}), '()\n', (2983, 2985), False, 'from platformio.commands.home.rpc.handlers.piocore import PIOCoreRPC\n'), ((3041, 3053), 'platformio.commands.home.rpc.handlers.project.ProjectRPC', 'ProjectRPC', ([], {}), '()\n', (3051, 3053), False, 'from platformio.commands.home.rpc.handlers.project import ProjectRPC\n'), ((3088, 3106), 'urllib.parse.urlparse', 'urlparse', (['home_url'], {}), '(home_url)\n', (3096, 3106), False, 'from urllib.parse import urlparse\n'), ((3135, 3195), 'starlette.routing.WebSocketRoute', 'WebSocketRoute', (["(path + 'wsrpc')", 'ws_rpc_factory'], {'name': '"""wsrpc"""'}), "(path + 'wsrpc', ws_rpc_factory, name='wsrpc')\n", (3149, 3195), False, 'from starlette.routing import Mount, Route, WebSocketRoute\n'), ((3205, 3268), 'starlette.routing.Route', 'Route', (["(path + '__shutdown__')", 'shutdown_server'], {'methods': "['POST']"}), "(path + '__shutdown__', shutdown_server, methods=['POST'])\n", (3210, 3268), False, 'from starlette.routing import Mount, Route, WebSocketRoute\n'), ((2058, 2080), 'platformio.compat.aio_get_running_loop', 'aio_get_running_loop', ([], {}), '()\n', (2078, 2080), False, 'from platformio.compat import aio_get_running_loop\n'), ((3290, 3335), 'starlette.staticfiles.StaticFiles', 'StaticFiles', ([], {'directory': 'contrib_dir', 'html': '(True)'}), '(directory=contrib_dir, html=True)\n', (3301, 3335), False, 'from starlette.staticfiles import StaticFiles\n'), ((3401, 3427), 'starlette.routing.Route', 'Route', (['"""/"""', 'protected_page'], {}), "('/', protected_page)\n", (3406, 3427), False, 'from starlette.routing import Mount, Route, WebSocketRoute\n'), ((3490, 3520), 'starlette.middleware.Middleware', 'Middleware', (['ShutdownMiddleware'], {}), '(ShutdownMiddleware)\n', (3500, 3520), False, 'from starlette.middleware import Middleware\n'), ((3599, 3665), 'click.echo', 'click.echo', (['"""PIO Home has been started. Press Ctrl+C to shutdown."""'], {}), "('PIO Home has been started. Press Ctrl+C to shutdown.')\n", (3609, 3665), False, 'import click\n'), ((3750, 3772), 'click.launch', 'click.launch', (['home_url'], {}), '(home_url)\n', (3762, 3772), False, 'import click\n')] |
stefb965/luci-py | appengine/components/components/machine_provider/rpc_messages.py | e0a8a5640c4104e5c90781d833168aa8a8d1f24d | # Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Messages for the Machine Provider API."""
# pylint: disable=unused-wildcard-import, wildcard-import
from protorpc import messages
from components.machine_provider.dimensions import *
from components.machine_provider.instructions import *
from components.machine_provider.policies import *
class CatalogMachineRetrievalRequest(messages.Message):
"""Represents a request to retrieve a machine from the catalog."""
# Hostname of the machine to retrieve.
hostname = messages.StringField(1, required=True)
# Backend which added the machine.
backend = messages.EnumField(Backend, 2)
class CatalogMachineRetrievalResponse(messages.Message):
"""Represents a response to a catalog machine retrieval request."""
# Dimensions instance specifying what sort of machine this is.
dimensions = messages.MessageField(Dimensions, 1)
# Policies governing this machine.
policies = messages.MessageField(Policies, 2)
# State of the CatalogMachineEntry.
state = messages.StringField(3)
# Cloud Pub/Sub subscription the machine must listen to for instructions.
pubsub_subscription = messages.StringField(4)
# Project the Cloud Pub/Sub subscription exists in.
pubsub_subscription_project = messages.StringField(5)
# Cloud Pub/Sub topic the machine must be subscribed to.
pubsub_topic = messages.StringField(6)
# Project the Cloud Pub/Sub topic exists in.
pubsub_topic_project = messages.StringField(7)
# Timestamp indicating lease expiration seconds from epoch in UTC.
lease_expiration_ts = messages.IntegerField(8)
class CatalogMachineAdditionRequest(messages.Message):
"""Represents a request to add a machine to the catalog.
dimensions.backend must be specified.
dimensions.hostname must be unique per backend.
"""
# Dimensions instance specifying what sort of machine this is.
dimensions = messages.MessageField(Dimensions, 1, required=True)
# Policies instance specifying machine-specific configuration.
policies = messages.MessageField(Policies, 2, required=True)
class CatalogMachineBatchAdditionRequest(messages.Message):
"""Represents a batched set of CatalogMachineAdditionRequests.
dimensions.backend must be specified in each CatalogMachineAdditionRequest.
dimensions.hostname must be unique per backend.
"""
# CatalogMachineAdditionRequest instances to batch together.
requests = messages.MessageField(
CatalogMachineAdditionRequest, 1, repeated=True)
class CatalogMachineDeletionRequest(messages.Message):
"""Represents a request to delete a machine in the catalog."""
# Dimensions instance specifying what sort of machine this is.
dimensions = messages.MessageField(Dimensions, 1, required=True)
class CatalogManipulationRequestError(messages.Enum):
"""Represents an error in a catalog manipulation request."""
# Per backend, hostnames must be unique in the catalog.
HOSTNAME_REUSE = 1
# Tried to lookup an entry that didn't exist.
ENTRY_NOT_FOUND = 2
# Didn't specify a backend.
UNSPECIFIED_BACKEND = 3
# Specified backend didn't match the backend originating the request.
MISMATCHED_BACKEND = 4
# Didn't specify a hostname.
UNSPECIFIED_HOSTNAME = 5
# Proposed Cloud Pub/Sub topic was invalid.
INVALID_TOPIC = 6
# Proposed Cloud Pub/Sub project was invalid.
INVALID_PROJECT = 7
# Didn't specify a Cloud Pub/Sub topic.
UNSPECIFIED_TOPIC = 8
# Attempted to delete a leased machine.
LEASED = 9
class CatalogManipulationResponse(messages.Message):
"""Represents a response to a catalog manipulation request."""
# CatalogManipulationRequestError instance indicating an error with the
# request, or None if there is no error.
error = messages.EnumField(CatalogManipulationRequestError, 1)
# CatalogMachineAdditionRequest this response is in reference to.
machine_addition_request = messages.MessageField(
CatalogMachineAdditionRequest, 2)
# CatalogMachineDeletionRequest this response is in reference to.
machine_deletion_request = messages.MessageField(
CatalogMachineDeletionRequest, 3)
class CatalogBatchManipulationResponse(messages.Message):
"""Represents a response to a batched catalog manipulation request."""
responses = messages.MessageField(
CatalogManipulationResponse, 1, repeated=True)
class LeaseRequest(messages.Message):
"""Represents a request for a lease on a machine."""
# Per-user unique ID used to deduplicate requests.
request_id = messages.StringField(1, required=True)
# Dimensions instance specifying what sort of machine to lease.
dimensions = messages.MessageField(Dimensions, 2, required=True)
# Desired length of the lease in seconds.
duration = messages.IntegerField(3)
# Cloud Pub/Sub topic name to communicate on regarding this request.
pubsub_topic = messages.StringField(4)
# Cloud Pub/Sub project name to communicate on regarding this request.
pubsub_project = messages.StringField(5)
# Instructions to give the machine once it's been leased.
on_lease = messages.MessageField(Instruction, 6)
# UTC seconds from epoch when lease should expire.
lease_expiration_ts = messages.IntegerField(7)
class BatchedLeaseRequest(messages.Message):
"""Represents a batched set of LeaseRequests."""
# LeaseRequest instances to batch together.
requests = messages.MessageField(LeaseRequest, 1, repeated=True)
class LeaseRequestError(messages.Enum):
"""Represents an error in a LeaseRequest."""
# Request IDs are intended to be unique.
# Reusing a request ID in a different request is an error.
REQUEST_ID_REUSE = 1
# Proposed Cloud Pub/Sub topic was invalid.
INVALID_TOPIC = 2
# Proposed Cloud Pub/Sub project was invalid.
INVALID_PROJECT = 3
# Didn't specify a Cloud Pub/Sub topic.
UNSPECIFIED_TOPIC = 4
# Request couldn't be processed in time.
DEADLINE_EXCEEDED = 5
# Miscellaneous transient error.
TRANSIENT_ERROR = 6
# Mutually exclusive duration and lease_expiration_ts both specified.
MUTUAL_EXCLUSION_ERROR = 7
# Proposed duration was zero or negative.
NONPOSITIVE_DEADLINE = 8
# Proposed expiration time is not in the future.
LEASE_EXPIRATION_TS_ERROR = 9
# Neither duration nor lease_expiration_ts were specified.
LEASE_LENGTH_UNSPECIFIED = 10
# Requested lease duration is too long.
LEASE_TOO_LONG = 11
class LeaseRequestState(messages.Enum):
"""Represents the state of a LeaseRequest."""
# LeaseRequest has been received, but not processed yet.
UNTRIAGED = 0
# LeaseRequest is pending provisioning of additional capacity.
PENDING = 1
# LeaseRequest has been fulfilled.
FULFILLED = 2
# LeaseRequest has been denied.
DENIED = 3
class LeaseResponse(messages.Message):
"""Represents a response to a LeaseRequest."""
# SHA-1 identifying the LeaseRequest this response refers to.
request_hash = messages.StringField(1)
# LeaseRequestError instance indicating an error with the request, or None
# if there is no error.
error = messages.EnumField(LeaseRequestError, 2)
# Request ID used by the client to generate the LeaseRequest.
client_request_id = messages.StringField(3, required=True)
# State of the LeaseRequest.
state = messages.EnumField(LeaseRequestState, 4)
# Hostname of the machine available for this request.
hostname = messages.StringField(5)
# Timestamp indicating lease expiration seconds from epoch in UTC.
lease_expiration_ts = messages.IntegerField(6)
class BatchedLeaseResponse(messages.Message):
"""Represents a response to a batched lease request."""
responses = messages.MessageField(LeaseResponse, 1, repeated=True)
class LeaseReleaseRequest(messages.Message):
"""Represents a request to voluntarily cancel a LeaseRequest."""
# Per-user unique ID used to identify the LeaseRequest.
request_id = messages.StringField(1, required=True)
class BatchedLeaseReleaseRequest(messages.Message):
"""Represents a batched set of lease release requests."""
requests = messages.MessageField(LeaseReleaseRequest, 1, repeated=True)
class LeaseReleaseRequestError(messages.Enum):
"""Represents an error in a LeaseReleaseRequest."""
# Request ID referred to non-existent request for this user.
NOT_FOUND = 1
# Request ID referred to an unfulfilled request.
NOT_FULFILLED = 2
# Request ID referred to a fulfilled request whose machine was
# already reclaimed.
ALREADY_RECLAIMED = 3
# Request couldn't be processed in time.
DEADLINE_EXCEEDED = 4
# Miscellaneous transient error.
TRANSIENT_ERROR = 5
class LeaseReleaseResponse(messages.Message):
"""Represents a response to a LeaseReleaseRequest."""
# SHA-1 identifying the LeaseRequest this response refers to.
request_hash = messages.StringField(1)
# LeaseReleaseRequestError indicating an error with the request, or None
# if there is no error.
error = messages.EnumField(LeaseReleaseRequestError, 2)
# Request ID used by the client to generate the LeaseRequest
# referred to by the LeaseReleaseRequest.
client_request_id = messages.StringField(3, required=True)
class BatchedLeaseReleaseResponse(messages.Message):
"""Represents responses to a batched set of lease release requests."""
responses = messages.MessageField(LeaseReleaseResponse, 1, repeated=True)
class MachineInstructionRequest(messages.Message):
"""Represents a request to send an instruction to a leased machine."""
# Request ID for the fulfilled LeaseRequest whose machine should be
# instructed.
request_id = messages.StringField(1, required=True)
# Instruction to send the leased machine.
instruction = messages.MessageField(Instruction, 2)
class MachineInstructionError(messages.Enum):
"""Represents an error in a MachineInstructionRequest."""
# Request ID referred to an unfulfilled request.
NOT_FULFILLED = 1
# Request ID referred to a fulfilled request whose machine was
# already reclaimed.
ALREADY_RECLAIMED = 2
# Invalid instruction for the machine.
INVALID_INSTRUCTION = 3
class MachineInstructionResponse(messages.Message):
"""Represents a response to a MachineInstructionRequest."""
# Request ID used by the client to generate the LeaseRequest for the
# machine being instructed.
client_request_id = messages.StringField(1, required=True)
# MachineInstructionError indicating an error with the request, or None
# if there is no error.
error = messages.EnumField(MachineInstructionError, 2)
class PollRequest(messages.Message):
"""Represents a request to poll for instructions given to a machine."""
# Hostname of the machine whose instructions to retrieve.
hostname = messages.StringField(1, required=True)
# Backend the machine belongs to. Generally required.
backend = messages.EnumField(Backend, 2)
class PollResponse(messages.Message):
"""Represents a response to a request for instructions given to a machine."""
# Instruction given to the machine.
instruction = messages.MessageField(Instruction, 1)
# State of the instruction.
state = messages.StringField(2)
class AckRequest(messages.Message):
"""Represents a request to ack an instruction received by a machine."""
# Hostname of the machine whose instruction to ack.
hostname = messages.StringField(1, required=True)
# Backend the machine belongs to.
backend = messages.EnumField(Backend, 2)
| [((650, 688), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (670, 688), False, 'from protorpc import messages\n'), ((738, 768), 'protorpc.messages.EnumField', 'messages.EnumField', (['Backend', '(2)'], {}), '(Backend, 2)\n', (756, 768), False, 'from protorpc import messages\n'), ((978, 1014), 'protorpc.messages.MessageField', 'messages.MessageField', (['Dimensions', '(1)'], {}), '(Dimensions, 1)\n', (999, 1014), False, 'from protorpc import messages\n'), ((1065, 1099), 'protorpc.messages.MessageField', 'messages.MessageField', (['Policies', '(2)'], {}), '(Policies, 2)\n', (1086, 1099), False, 'from protorpc import messages\n'), ((1148, 1171), 'protorpc.messages.StringField', 'messages.StringField', (['(3)'], {}), '(3)\n', (1168, 1171), False, 'from protorpc import messages\n'), ((1272, 1295), 'protorpc.messages.StringField', 'messages.StringField', (['(4)'], {}), '(4)\n', (1292, 1295), False, 'from protorpc import messages\n'), ((1382, 1405), 'protorpc.messages.StringField', 'messages.StringField', (['(5)'], {}), '(5)\n', (1402, 1405), False, 'from protorpc import messages\n'), ((1482, 1505), 'protorpc.messages.StringField', 'messages.StringField', (['(6)'], {}), '(6)\n', (1502, 1505), False, 'from protorpc import messages\n'), ((1578, 1601), 'protorpc.messages.StringField', 'messages.StringField', (['(7)'], {}), '(7)\n', (1598, 1601), False, 'from protorpc import messages\n'), ((1695, 1719), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(8)'], {}), '(8)\n', (1716, 1719), False, 'from protorpc import messages\n'), ((2013, 2064), 'protorpc.messages.MessageField', 'messages.MessageField', (['Dimensions', '(1)'], {'required': '(True)'}), '(Dimensions, 1, required=True)\n', (2034, 2064), False, 'from protorpc import messages\n'), ((2143, 2192), 'protorpc.messages.MessageField', 'messages.MessageField', (['Policies', '(2)'], {'required': '(True)'}), '(Policies, 2, required=True)\n', (2164, 2192), False, 'from protorpc import messages\n'), ((2531, 2601), 'protorpc.messages.MessageField', 'messages.MessageField', (['CatalogMachineAdditionRequest', '(1)'], {'repeated': '(True)'}), '(CatalogMachineAdditionRequest, 1, repeated=True)\n', (2552, 2601), False, 'from protorpc import messages\n'), ((2811, 2862), 'protorpc.messages.MessageField', 'messages.MessageField', (['Dimensions', '(1)'], {'required': '(True)'}), '(Dimensions, 1, required=True)\n', (2832, 2862), False, 'from protorpc import messages\n'), ((3846, 3900), 'protorpc.messages.EnumField', 'messages.EnumField', (['CatalogManipulationRequestError', '(1)'], {}), '(CatalogManipulationRequestError, 1)\n', (3864, 3900), False, 'from protorpc import messages\n'), ((3998, 4053), 'protorpc.messages.MessageField', 'messages.MessageField', (['CatalogMachineAdditionRequest', '(2)'], {}), '(CatalogMachineAdditionRequest, 2)\n', (4019, 4053), False, 'from protorpc import messages\n'), ((4158, 4213), 'protorpc.messages.MessageField', 'messages.MessageField', (['CatalogMachineDeletionRequest', '(3)'], {}), '(CatalogMachineDeletionRequest, 3)\n', (4179, 4213), False, 'from protorpc import messages\n'), ((4368, 4436), 'protorpc.messages.MessageField', 'messages.MessageField', (['CatalogManipulationResponse', '(1)'], {'repeated': '(True)'}), '(CatalogManipulationResponse, 1, repeated=True)\n', (4389, 4436), False, 'from protorpc import messages\n'), ((4607, 4645), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (4627, 4645), False, 'from protorpc import messages\n'), ((4727, 4778), 'protorpc.messages.MessageField', 'messages.MessageField', (['Dimensions', '(2)'], {'required': '(True)'}), '(Dimensions, 2, required=True)\n', (4748, 4778), False, 'from protorpc import messages\n'), ((4836, 4860), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(3)'], {}), '(3)\n', (4857, 4860), False, 'from protorpc import messages\n'), ((4949, 4972), 'protorpc.messages.StringField', 'messages.StringField', (['(4)'], {}), '(4)\n', (4969, 4972), False, 'from protorpc import messages\n'), ((5065, 5088), 'protorpc.messages.StringField', 'messages.StringField', (['(5)'], {}), '(5)\n', (5085, 5088), False, 'from protorpc import messages\n'), ((5162, 5199), 'protorpc.messages.MessageField', 'messages.MessageField', (['Instruction', '(6)'], {}), '(Instruction, 6)\n', (5183, 5199), False, 'from protorpc import messages\n'), ((5277, 5301), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(7)'], {}), '(7)\n', (5298, 5301), False, 'from protorpc import messages\n'), ((5459, 5512), 'protorpc.messages.MessageField', 'messages.MessageField', (['LeaseRequest', '(1)'], {'repeated': '(True)'}), '(LeaseRequest, 1, repeated=True)\n', (5480, 5512), False, 'from protorpc import messages\n'), ((6982, 7005), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (7002, 7005), False, 'from protorpc import messages\n'), ((7119, 7159), 'protorpc.messages.EnumField', 'messages.EnumField', (['LeaseRequestError', '(2)'], {}), '(LeaseRequestError, 2)\n', (7137, 7159), False, 'from protorpc import messages\n'), ((7246, 7284), 'protorpc.messages.StringField', 'messages.StringField', (['(3)'], {'required': '(True)'}), '(3, required=True)\n', (7266, 7284), False, 'from protorpc import messages\n'), ((7326, 7366), 'protorpc.messages.EnumField', 'messages.EnumField', (['LeaseRequestState', '(4)'], {}), '(LeaseRequestState, 4)\n', (7344, 7366), False, 'from protorpc import messages\n'), ((7436, 7459), 'protorpc.messages.StringField', 'messages.StringField', (['(5)'], {}), '(5)\n', (7456, 7459), False, 'from protorpc import messages\n'), ((7553, 7577), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(6)'], {}), '(6)\n', (7574, 7577), False, 'from protorpc import messages\n'), ((7698, 7752), 'protorpc.messages.MessageField', 'messages.MessageField', (['LeaseResponse', '(1)'], {'repeated': '(True)'}), '(LeaseResponse, 1, repeated=True)\n', (7719, 7752), False, 'from protorpc import messages\n'), ((7940, 7978), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (7960, 7978), False, 'from protorpc import messages\n'), ((8106, 8166), 'protorpc.messages.MessageField', 'messages.MessageField', (['LeaseReleaseRequest', '(1)'], {'repeated': '(True)'}), '(LeaseReleaseRequest, 1, repeated=True)\n', (8127, 8166), False, 'from protorpc import messages\n'), ((8841, 8864), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (8861, 8864), False, 'from protorpc import messages\n'), ((8976, 9023), 'protorpc.messages.EnumField', 'messages.EnumField', (['LeaseReleaseRequestError', '(2)'], {}), '(LeaseReleaseRequestError, 2)\n', (8994, 9023), False, 'from protorpc import messages\n'), ((9153, 9191), 'protorpc.messages.StringField', 'messages.StringField', (['(3)'], {'required': '(True)'}), '(3, required=True)\n', (9173, 9191), False, 'from protorpc import messages\n'), ((9334, 9395), 'protorpc.messages.MessageField', 'messages.MessageField', (['LeaseReleaseResponse', '(1)'], {'repeated': '(True)'}), '(LeaseReleaseResponse, 1, repeated=True)\n', (9355, 9395), False, 'from protorpc import messages\n'), ((9623, 9661), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (9643, 9661), False, 'from protorpc import messages\n'), ((9722, 9759), 'protorpc.messages.MessageField', 'messages.MessageField', (['Instruction', '(2)'], {}), '(Instruction, 2)\n', (9743, 9759), False, 'from protorpc import messages\n'), ((10357, 10395), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (10377, 10395), False, 'from protorpc import messages\n'), ((10506, 10552), 'protorpc.messages.EnumField', 'messages.EnumField', (['MachineInstructionError', '(2)'], {}), '(MachineInstructionError, 2)\n', (10524, 10552), False, 'from protorpc import messages\n'), ((10739, 10777), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (10759, 10777), False, 'from protorpc import messages\n'), ((10846, 10876), 'protorpc.messages.EnumField', 'messages.EnumField', (['Backend', '(2)'], {}), '(Backend, 2)\n', (10864, 10876), False, 'from protorpc import messages\n'), ((11051, 11088), 'protorpc.messages.MessageField', 'messages.MessageField', (['Instruction', '(1)'], {}), '(Instruction, 1)\n', (11072, 11088), False, 'from protorpc import messages\n'), ((11129, 11152), 'protorpc.messages.StringField', 'messages.StringField', (['(2)'], {}), '(2)\n', (11149, 11152), False, 'from protorpc import messages\n'), ((11332, 11370), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (11352, 11370), False, 'from protorpc import messages\n'), ((11419, 11449), 'protorpc.messages.EnumField', 'messages.EnumField', (['Backend', '(2)'], {}), '(Backend, 2)\n', (11437, 11449), False, 'from protorpc import messages\n')] |
carvalho-fdec/DesafioDSA | webscraping.py | fec9742bd77ddc3923ed616b6511cce87de48968 | # webscraping test
import urllib.request
from bs4 import BeautifulSoup
with urllib.request.urlopen('http://www.netvasco.com.br') as url:
page = url.read()
#print(page)
print(url.geturl())
print(url.info())
print(url.getcode())
# Analise o html na variável 'page' e armazene-o no formato Beautiful Soup
soup = BeautifulSoup(page, 'html.parser')
#print(soup.prettify())
print(soup.title)
print(soup.title.string)
print(soup.title.name)
soup_a = soup.find_all('a')[:10]
for a in soup_a:
print(a.get('href'))
print(a.get_text())
| [((333, 367), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page', '"""html.parser"""'], {}), "(page, 'html.parser')\n", (346, 367), False, 'from bs4 import BeautifulSoup\n')] |
hongxu-jia/tensorboard | tensorboard/backend/event_processing/data_provider_test.py | 98d4dadc61fd5a0580bed808653c59fb37748893 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `tensorboard.backend.event_processing.data_provider`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorboard import context
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
from tensorboard.compat.proto import summary_pb2
from tensorboard.data import provider as base_provider
from tensorboard.plugins.graph import metadata as graph_metadata
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.histogram import summary_v2 as histogram_summary
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.plugins.scalar import summary_v2 as scalar_summary
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.image import summary_v2 as image_summary
from tensorboard.util import tensor_util
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
tf1.enable_eager_execution()
class MultiplexerDataProviderTest(tf.test.TestCase):
def setUp(self):
super(MultiplexerDataProviderTest, self).setUp()
self.logdir = self.get_temp_dir()
self.ctx = context.RequestContext()
logdir = os.path.join(self.logdir, "polynomials")
with tf.summary.create_file_writer(logdir).as_default():
for i in xrange(10):
scalar_summary.scalar(
"square", i ** 2, step=2 * i, description="boxen"
)
scalar_summary.scalar("cube", i ** 3, step=3 * i)
logdir = os.path.join(self.logdir, "waves")
with tf.summary.create_file_writer(logdir).as_default():
for i in xrange(10):
scalar_summary.scalar("sine", tf.sin(float(i)), step=i)
scalar_summary.scalar(
"square", tf.sign(tf.sin(float(i))), step=i
)
# Summary with rank-0 data but not owned by the scalars plugin.
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = "marigraphs"
metadata.data_class = summary_pb2.DATA_CLASS_SCALAR
tf.summary.write(
"high_tide", tensor=i, step=i, metadata=metadata
)
# Summary with rank-1 data of scalar data class (bad!).
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = "greetings"
metadata.data_class = summary_pb2.DATA_CLASS_SCALAR
tf.summary.write(
"bad", tensor=[i, i], step=i, metadata=metadata
)
logdir = os.path.join(self.logdir, "lebesgue")
with tf.summary.create_file_writer(logdir).as_default():
data = [
("very smooth", (0.0, 0.25, 0.5, 0.75, 1.0), "uniform"),
("very smoothn't", (0.0, 0.01, 0.99, 1.0), "bimodal"),
]
for (description, distribution, name) in data:
tensor = tf.constant([distribution], dtype=tf.float64)
for i in xrange(1, 11):
histogram_summary.histogram(
name, tensor * i, step=i, description=description
)
logdir = os.path.join(self.logdir, "mondrian")
with tf.summary.create_file_writer(logdir).as_default():
data = [
("red", (221, 28, 38), "top-right"),
("blue", (1, 91, 158), "bottom-left"),
("yellow", (239, 220, 111), "bottom-right"),
]
for (name, color, description) in data:
image_1x1 = tf.constant([[[color]]], dtype=tf.uint8)
for i in xrange(1, 11):
# Use a non-monotonic sequence of sample sizes to
# test `max_length` calculation.
k = 6 - abs(6 - i) # 1, .., 6, .., 2
# a `k`-sample image summary of `i`-by-`i` images
image = tf.tile(image_1x1, [k, i, i, 1])
image_summary.image(
name,
image,
step=i,
description=description,
max_outputs=99,
)
def create_multiplexer(self):
multiplexer = event_multiplexer.EventMultiplexer()
multiplexer.AddRunsFromDirectory(self.logdir)
multiplexer.Reload()
return multiplexer
def create_provider(self):
multiplexer = self.create_multiplexer()
return data_provider.MultiplexerDataProvider(multiplexer, self.logdir)
def test_data_location(self):
provider = self.create_provider()
result = provider.data_location(self.ctx, experiment_id="unused")
self.assertEqual(result, self.logdir)
def test_list_plugins_with_no_graph(self):
provider = self.create_provider()
result = provider.list_plugins(self.ctx, experiment_id="unused")
self.assertItemsEqual(
result,
[
"greetings",
"marigraphs",
histogram_metadata.PLUGIN_NAME,
image_metadata.PLUGIN_NAME,
scalar_metadata.PLUGIN_NAME,
],
)
def test_list_plugins_with_graph(self):
with tf.compat.v1.Graph().as_default() as graph:
writer = tf.compat.v1.summary.FileWriter(self.logdir)
writer.add_graph(graph)
writer.flush()
provider = self.create_provider()
result = provider.list_plugins(self.ctx, experiment_id="unused")
self.assertItemsEqual(
result,
[
"greetings",
"marigraphs",
graph_metadata.PLUGIN_NAME,
histogram_metadata.PLUGIN_NAME,
image_metadata.PLUGIN_NAME,
scalar_metadata.PLUGIN_NAME,
],
)
def test_list_runs(self):
# We can't control the timestamps of events written to disk (without
# manually reading the tfrecords, modifying the data, and writing
# them back out), so we provide a fake multiplexer instead.
start_times = {
"second_2": 2.0,
"first": 1.5,
"no_time": None,
"second_1": 2.0,
}
class FakeMultiplexer(object):
def Runs(multiplexer):
result = ["second_2", "first", "no_time", "second_1"]
self.assertItemsEqual(result, start_times)
return result
def FirstEventTimestamp(multiplexer, run):
self.assertIn(run, start_times)
result = start_times[run]
if result is None:
raise ValueError("No event timestep could be found")
else:
return result
multiplexer = FakeMultiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, "fake_logdir"
)
result = provider.list_runs(self.ctx, experiment_id="unused")
self.assertItemsEqual(
result,
[
base_provider.Run(
run_id=run, run_name=run, start_time=start_time
)
for (run, start_time) in six.iteritems(start_times)
],
)
def test_list_scalars_all(self):
provider = self.create_provider()
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=None,
)
self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
self.assertItemsEqual(result["polynomials"].keys(), ["square", "cube"])
self.assertItemsEqual(result["waves"].keys(), ["square", "sine"])
sample = result["polynomials"]["square"]
self.assertIsInstance(sample, base_provider.ScalarTimeSeries)
self.assertEqual(sample.max_step, 18)
# nothing to test for wall time, as it can't be mocked out
self.assertEqual(sample.plugin_content, b"")
self.assertEqual(
sample.display_name, ""
) # not written by V2 summary ops
self.assertEqual(sample.description, "boxen")
def test_list_scalars_filters(self):
provider = self.create_provider()
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(["waves"], ["square"]),
)
self.assertItemsEqual(result.keys(), ["waves"])
self.assertItemsEqual(result["waves"].keys(), ["square"])
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
tags=["square", "quartic"]
),
)
self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
self.assertItemsEqual(result["polynomials"].keys(), ["square"])
self.assertItemsEqual(result["waves"].keys(), ["square"])
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(runs=["waves", "hugs"]),
)
self.assertItemsEqual(result.keys(), ["waves"])
self.assertItemsEqual(result["waves"].keys(), ["sine", "square"])
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(["un"], ["likely"]),
)
self.assertEqual(result, {})
def test_read_scalars(self):
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
run_tag_filter = base_provider.RunTagFilter(
runs=["waves", "polynomials", "unicorns"],
tags=["sine", "square", "cube", "iridescence"],
)
result = provider.read_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=run_tag_filter,
downsample=100,
)
self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
self.assertItemsEqual(result["polynomials"].keys(), ["square", "cube"])
self.assertItemsEqual(result["waves"].keys(), ["square", "sine"])
for run in result:
for tag in result[run]:
tensor_events = multiplexer.Tensors(run, tag)
self.assertLen(result[run][tag], len(tensor_events))
for (datum, event) in zip(result[run][tag], tensor_events):
self.assertEqual(datum.step, event.step)
self.assertEqual(datum.wall_time, event.wall_time)
self.assertEqual(
datum.value,
tensor_util.make_ndarray(event.tensor_proto).item(),
)
def test_read_scalars_downsamples(self):
# TODO(@wchargin): Verify that this always includes the most
# recent datum, as specified by the interface.
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
result = provider.read_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
downsample=3,
)
self.assertLen(result["waves"]["sine"], 3)
def test_read_scalars_but_not_rank_0(self):
provider = self.create_provider()
run_tag_filter = base_provider.RunTagFilter(["waves"], ["bad"])
# No explicit checks yet.
with six.assertRaisesRegex(
self,
ValueError,
"can only convert an array of size 1 to a Python scalar",
):
provider.read_scalars(
self.ctx,
experiment_id="unused",
plugin_name="greetings",
run_tag_filter=run_tag_filter,
downsample=100,
)
def test_list_tensors_all(self):
provider = self.create_provider()
result = provider.list_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
run_tag_filter=None,
)
self.assertItemsEqual(result.keys(), ["lebesgue"])
self.assertItemsEqual(result["lebesgue"].keys(), ["uniform", "bimodal"])
sample = result["lebesgue"]["uniform"]
self.assertIsInstance(sample, base_provider.TensorTimeSeries)
self.assertEqual(sample.max_step, 10)
# nothing to test for wall time, as it can't be mocked out
self.assertEqual(sample.plugin_content, b"")
self.assertEqual(
sample.display_name, ""
) # not written by V2 summary ops
self.assertEqual(sample.description, "very smooth")
def test_list_tensors_filters(self):
provider = self.create_provider()
# Quick check only, as scalars and tensors use the same underlying
# filtering implementation.
result = provider.list_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
["lebesgue"], ["uniform"]
),
)
self.assertItemsEqual(result.keys(), ["lebesgue"])
self.assertItemsEqual(result["lebesgue"].keys(), ["uniform"])
def test_read_tensors(self):
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
run_tag_filter = base_provider.RunTagFilter(
runs=["lebesgue"],
tags=["uniform", "bimodal"],
)
result = provider.read_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
run_tag_filter=run_tag_filter,
downsample=100,
)
self.assertItemsEqual(result.keys(), ["lebesgue"])
self.assertItemsEqual(result["lebesgue"].keys(), ["uniform", "bimodal"])
for run in result:
for tag in result[run]:
tensor_events = multiplexer.Tensors(run, tag)
self.assertLen(result[run][tag], len(tensor_events))
for (datum, event) in zip(result[run][tag], tensor_events):
self.assertEqual(datum.step, event.step)
self.assertEqual(datum.wall_time, event.wall_time)
np.testing.assert_equal(
datum.numpy,
tensor_util.make_ndarray(event.tensor_proto),
)
def test_read_tensors_downsamples(self):
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
result = provider.read_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
downsample=3,
)
self.assertLen(result["lebesgue"]["uniform"], 3)
def test_list_blob_sequences(self):
provider = self.create_provider()
with self.subTest("finds all time series for a plugin"):
result = provider.list_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(
result["mondrian"].keys(), ["red", "blue", "yellow"]
)
sample = result["mondrian"]["blue"]
self.assertIsInstance(sample, base_provider.BlobSequenceTimeSeries)
self.assertEqual(sample.max_step, 10)
# nothing to test for wall time, as it can't be mocked out
self.assertEqual(sample.plugin_content, b"")
self.assertEqual(sample.max_length, 6 + 2)
self.assertEqual(sample.description, "bottom-left")
self.assertEqual(sample.display_name, "")
with self.subTest("filters by run/tag"):
result = provider.list_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
runs=["mondrian", "picasso"], tags=["yellow", "green't"]
),
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(result["mondrian"].keys(), ["yellow"])
self.assertIsInstance(
result["mondrian"]["yellow"],
base_provider.BlobSequenceTimeSeries,
)
def test_read_blob_sequences_and_read_blob(self):
provider = self.create_provider()
with self.subTest("reads all time series for a plugin"):
result = provider.read_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
downsample=4,
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(
result["mondrian"].keys(), ["red", "blue", "yellow"]
)
sample = result["mondrian"]["blue"]
self.assertLen(sample, 4) # downsampled from 10
last = sample[-1]
self.assertIsInstance(last, base_provider.BlobSequenceDatum)
self.assertEqual(last.step, 10)
self.assertLen(last.values, 2 + 2)
blobs = [
provider.read_blob(self.ctx, blob_key=v.blob_key)
for v in last.values
]
self.assertEqual(blobs[0], b"10")
self.assertEqual(blobs[1], b"10")
self.assertStartsWith(blobs[2], b"\x89PNG")
self.assertStartsWith(blobs[3], b"\x89PNG")
blue1 = blobs[2]
blue2 = blobs[3]
red1 = provider.read_blob(
self.ctx,
blob_key=result["mondrian"]["red"][-1].values[2].blob_key,
)
self.assertEqual(blue1, blue2)
self.assertNotEqual(blue1, red1)
with self.subTest("filters by run/tag"):
result = provider.read_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
runs=["mondrian", "picasso"], tags=["yellow", "green't"]
),
downsample=1,
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(result["mondrian"].keys(), ["yellow"])
self.assertIsInstance(
result["mondrian"]["yellow"][0],
base_provider.BlobSequenceDatum,
)
class DownsampleTest(tf.test.TestCase):
"""Tests for the `_downsample` private helper function."""
def test_deterministic(self):
xs = "abcdefg"
expected = data_provider._downsample(xs, k=4)
for _ in range(100):
actual = data_provider._downsample(xs, k=4)
self.assertEqual(actual, expected)
def test_underlong_ok(self):
xs = list("abcdefg")
actual = data_provider._downsample(xs, k=10)
expected = list("abcdefg")
self.assertIsNot(actual, xs)
self.assertEqual(actual, expected)
def test_inorder(self):
xs = list(range(10000))
actual = data_provider._downsample(xs, k=100)
self.assertEqual(actual, sorted(actual))
def test_zero(self):
xs = "abcdefg"
actual = data_provider._downsample(xs, k=0)
self.assertEqual(actual, [])
if __name__ == "__main__":
tf.test.main()
| [((1874, 1902), 'tensorflow.compat.v1.enable_eager_execution', 'tf1.enable_eager_execution', ([], {}), '()\n', (1900, 1902), True, 'import tensorflow.compat.v1 as tf1\n'), ((21426, 21440), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (21438, 21440), True, 'import tensorflow.compat.v2 as tf\n'), ((2097, 2121), 'tensorboard.context.RequestContext', 'context.RequestContext', ([], {}), '()\n', (2119, 2121), False, 'from tensorboard import context\n'), ((2140, 2180), 'os.path.join', 'os.path.join', (['self.logdir', '"""polynomials"""'], {}), "(self.logdir, 'polynomials')\n", (2152, 2180), False, 'import os\n'), ((2490, 2524), 'os.path.join', 'os.path.join', (['self.logdir', '"""waves"""'], {}), "(self.logdir, 'waves')\n", (2502, 2524), False, 'import os\n'), ((3604, 3641), 'os.path.join', 'os.path.join', (['self.logdir', '"""lebesgue"""'], {}), "(self.logdir, 'lebesgue')\n", (3616, 3641), False, 'import os\n'), ((4219, 4256), 'os.path.join', 'os.path.join', (['self.logdir', '"""mondrian"""'], {}), "(self.logdir, 'mondrian')\n", (4231, 4256), False, 'import os\n'), ((5301, 5337), 'tensorboard.backend.event_processing.plugin_event_multiplexer.EventMultiplexer', 'event_multiplexer.EventMultiplexer', ([], {}), '()\n', (5335, 5337), True, 'from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer\n'), ((5543, 5606), 'tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider', 'data_provider.MultiplexerDataProvider', (['multiplexer', 'self.logdir'], {}), '(multiplexer, self.logdir)\n', (5580, 5606), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((7931, 7996), 'tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider', 'data_provider.MultiplexerDataProvider', (['multiplexer', '"""fake_logdir"""'], {}), "(multiplexer, 'fake_logdir')\n", (7968, 7996), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((10987, 11050), 'tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider', 'data_provider.MultiplexerDataProvider', (['multiplexer', 'self.logdir'], {}), '(multiplexer, self.logdir)\n', (11024, 11050), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((11099, 11221), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', ([], {'runs': "['waves', 'polynomials', 'unicorns']", 'tags': "['sine', 'square', 'cube', 'iridescence']"}), "(runs=['waves', 'polynomials', 'unicorns'], tags=\n ['sine', 'square', 'cube', 'iridescence'])\n", (11125, 11221), True, 'from tensorboard.data import provider as base_provider\n'), ((12523, 12586), 'tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider', 'data_provider.MultiplexerDataProvider', (['multiplexer', 'self.logdir'], {}), '(multiplexer, self.logdir)\n', (12560, 12586), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((12963, 13009), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', (["['waves']", "['bad']"], {}), "(['waves'], ['bad'])\n", (12989, 13009), True, 'from tensorboard.data import provider as base_provider\n'), ((15005, 15068), 'tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider', 'data_provider.MultiplexerDataProvider', (['multiplexer', 'self.logdir'], {}), '(multiplexer, self.logdir)\n', (15042, 15068), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((15117, 15191), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', ([], {'runs': "['lebesgue']", 'tags': "['uniform', 'bimodal']"}), "(runs=['lebesgue'], tags=['uniform', 'bimodal'])\n", (15143, 15191), True, 'from tensorboard.data import provider as base_provider\n'), ((16292, 16355), 'tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider', 'data_provider.MultiplexerDataProvider', (['multiplexer', 'self.logdir'], {}), '(multiplexer, self.logdir)\n', (16329, 16355), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((20693, 20727), 'tensorboard.backend.event_processing.data_provider._downsample', 'data_provider._downsample', (['xs'], {'k': '(4)'}), '(xs, k=4)\n', (20718, 20727), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((20940, 20975), 'tensorboard.backend.event_processing.data_provider._downsample', 'data_provider._downsample', (['xs'], {'k': '(10)'}), '(xs, k=10)\n', (20965, 20975), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((21169, 21205), 'tensorboard.backend.event_processing.data_provider._downsample', 'data_provider._downsample', (['xs'], {'k': '(100)'}), '(xs, k=100)\n', (21194, 21205), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((21321, 21355), 'tensorboard.backend.event_processing.data_provider._downsample', 'data_provider._downsample', (['xs'], {'k': '(0)'}), '(xs, k=0)\n', (21346, 21355), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((2267, 2277), 'six.moves.xrange', 'xrange', (['(10)'], {}), '(10)\n', (2273, 2277), False, 'from six.moves import xrange\n'), ((2611, 2621), 'six.moves.xrange', 'xrange', (['(10)'], {}), '(10)\n', (2617, 2621), False, 'from six.moves import xrange\n'), ((6376, 6420), 'tensorflow.compat.v2.compat.v1.summary.FileWriter', 'tf.compat.v1.summary.FileWriter', (['self.logdir'], {}), '(self.logdir)\n', (6407, 6420), True, 'import tensorflow.compat.v2 as tf\n'), ((13057, 13158), 'six.assertRaisesRegex', 'six.assertRaisesRegex', (['self', 'ValueError', '"""can only convert an array of size 1 to a Python scalar"""'], {}), "(self, ValueError,\n 'can only convert an array of size 1 to a Python scalar')\n", (13078, 13158), False, 'import six\n'), ((20778, 20812), 'tensorboard.backend.event_processing.data_provider._downsample', 'data_provider._downsample', (['xs'], {'k': '(4)'}), '(xs, k=4)\n', (20803, 20812), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((2295, 2367), 'tensorboard.plugins.scalar.summary_v2.scalar', 'scalar_summary.scalar', (['"""square"""', '(i ** 2)'], {'step': '(2 * i)', 'description': '"""boxen"""'}), "('square', i ** 2, step=2 * i, description='boxen')\n", (2316, 2367), True, 'from tensorboard.plugins.scalar import summary_v2 as scalar_summary\n'), ((2422, 2471), 'tensorboard.plugins.scalar.summary_v2.scalar', 'scalar_summary.scalar', (['"""cube"""', '(i ** 3)'], {'step': '(3 * i)'}), "('cube', i ** 3, step=3 * i)\n", (2443, 2471), True, 'from tensorboard.plugins.scalar import summary_v2 as scalar_summary\n'), ((2923, 2952), 'tensorboard.compat.proto.summary_pb2.SummaryMetadata', 'summary_pb2.SummaryMetadata', ([], {}), '()\n', (2950, 2952), False, 'from tensorboard.compat.proto import summary_pb2\n'), ((3101, 3167), 'tensorflow.compat.v2.summary.write', 'tf.summary.write', (['"""high_tide"""'], {'tensor': 'i', 'step': 'i', 'metadata': 'metadata'}), "('high_tide', tensor=i, step=i, metadata=metadata)\n", (3117, 3167), True, 'import tensorflow.compat.v2 as tf\n'), ((3305, 3334), 'tensorboard.compat.proto.summary_pb2.SummaryMetadata', 'summary_pb2.SummaryMetadata', ([], {}), '()\n', (3332, 3334), False, 'from tensorboard.compat.proto import summary_pb2\n'), ((3482, 3547), 'tensorflow.compat.v2.summary.write', 'tf.summary.write', (['"""bad"""'], {'tensor': '[i, i]', 'step': 'i', 'metadata': 'metadata'}), "('bad', tensor=[i, i], step=i, metadata=metadata)\n", (3498, 3547), True, 'import tensorflow.compat.v2 as tf\n'), ((3970, 4015), 'tensorflow.compat.v2.constant', 'tf.constant', (['[distribution]'], {'dtype': 'tf.float64'}), '([distribution], dtype=tf.float64)\n', (3981, 4015), True, 'import tensorflow.compat.v2 as tf\n'), ((4041, 4054), 'six.moves.xrange', 'xrange', (['(1)', '(11)'], {}), '(1, 11)\n', (4047, 4054), False, 'from six.moves import xrange\n'), ((4606, 4646), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[[color]]]'], {'dtype': 'tf.uint8'}), '([[[color]]], dtype=tf.uint8)\n', (4617, 4646), True, 'import tensorflow.compat.v2 as tf\n'), ((4672, 4685), 'six.moves.xrange', 'xrange', (['(1)', '(11)'], {}), '(1, 11)\n', (4678, 4685), False, 'from six.moves import xrange\n'), ((8170, 8236), 'tensorboard.data.provider.Run', 'base_provider.Run', ([], {'run_id': 'run', 'run_name': 'run', 'start_time': 'start_time'}), '(run_id=run, run_name=run, start_time=start_time)\n', (8187, 8236), True, 'from tensorboard.data import provider as base_provider\n'), ((9574, 9623), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', (["['waves']", "['square']"], {}), "(['waves'], ['square'])\n", (9600, 9623), True, 'from tensorboard.data import provider as base_provider\n'), ((9936, 9990), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', ([], {'tags': "['square', 'quartic']"}), "(tags=['square', 'quartic'])\n", (9962, 9990), True, 'from tensorboard.data import provider as base_provider\n'), ((10420, 10470), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', ([], {'runs': "['waves', 'hugs']"}), "(runs=['waves', 'hugs'])\n", (10446, 10470), True, 'from tensorboard.data import provider as base_provider\n'), ((10791, 10837), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', (["['un']", "['likely']"], {}), "(['un'], ['likely'])\n", (10817, 10837), True, 'from tensorboard.data import provider as base_provider\n'), ((14680, 14733), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', (["['lebesgue']", "['uniform']"], {}), "(['lebesgue'], ['uniform'])\n", (14706, 14733), True, 'from tensorboard.data import provider as base_provider\n'), ((2194, 2231), 'tensorflow.compat.v2.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (2223, 2231), True, 'import tensorflow.compat.v2 as tf\n'), ((2538, 2575), 'tensorflow.compat.v2.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (2567, 2575), True, 'import tensorflow.compat.v2 as tf\n'), ((3655, 3692), 'tensorflow.compat.v2.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (3684, 3692), True, 'import tensorflow.compat.v2 as tf\n'), ((4076, 4154), 'tensorboard.plugins.histogram.summary_v2.histogram', 'histogram_summary.histogram', (['name', '(tensor * i)'], {'step': 'i', 'description': 'description'}), '(name, tensor * i, step=i, description=description)\n', (4103, 4154), True, 'from tensorboard.plugins.histogram import summary_v2 as histogram_summary\n'), ((4270, 4307), 'tensorflow.compat.v2.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (4299, 4307), True, 'import tensorflow.compat.v2 as tf\n'), ((4966, 4998), 'tensorflow.compat.v2.tile', 'tf.tile', (['image_1x1', '[k, i, i, 1]'], {}), '(image_1x1, [k, i, i, 1])\n', (4973, 4998), True, 'import tensorflow.compat.v2 as tf\n'), ((5019, 5104), 'tensorboard.plugins.image.summary_v2.image', 'image_summary.image', (['name', 'image'], {'step': 'i', 'description': 'description', 'max_outputs': '(99)'}), '(name, image, step=i, description=description,\n max_outputs=99)\n', (5038, 5104), True, 'from tensorboard.plugins.image import summary_v2 as image_summary\n'), ((6311, 6331), 'tensorflow.compat.v2.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), '()\n', (6329, 6331), True, 'import tensorflow.compat.v2 as tf\n'), ((8316, 8342), 'six.iteritems', 'six.iteritems', (['start_times'], {}), '(start_times)\n', (8329, 8342), False, 'import six\n'), ((17875, 17963), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', ([], {'runs': "['mondrian', 'picasso']", 'tags': '[\'yellow\', "green\'t"]'}), '(runs=[\'mondrian\', \'picasso\'], tags=[\'yellow\',\n "green\'t"])\n', (17901, 17963), True, 'from tensorboard.data import provider as base_provider\n'), ((20060, 20148), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', ([], {'runs': "['mondrian', 'picasso']", 'tags': '[\'yellow\', "green\'t"]'}), '(runs=[\'mondrian\', \'picasso\'], tags=[\'yellow\',\n "green\'t"])\n', (20086, 20148), True, 'from tensorboard.data import provider as base_provider\n'), ((16111, 16155), 'tensorboard.util.tensor_util.make_ndarray', 'tensor_util.make_ndarray', (['event.tensor_proto'], {}), '(event.tensor_proto)\n', (16135, 16155), False, 'from tensorboard.util import tensor_util\n'), ((12211, 12255), 'tensorboard.util.tensor_util.make_ndarray', 'tensor_util.make_ndarray', (['event.tensor_proto'], {}), '(event.tensor_proto)\n', (12235, 12255), False, 'from tensorboard.util import tensor_util\n')] |
luyang1210/tensorflow | extras/amld/cloud/quickdraw_rnn/task.py | 948324f4cafdc97ae51c0e44fc1c28677a6e2e8a | """Experiment wrapper for training on Cloud ML."""
import argparse, glob, os
import tensorflow as tf
# From this package.
import model
def generate_experiment_fn(data_dir, train_batch_size, eval_batch_size,
train_steps, eval_steps, cell_size, hidden,
**experiment_args):
"""Returns experiment_fn for a RNN classifier.
Args:
data_dir: Where {train,eval}-* tf.train.Example datasets can be found.
train_batch_size: Batch size during training.
train_batch_size: Batch size during evaluation.
train_steps: Number of training steps.
eval_steps: Number of evaluation steps.
cell_size: LSTM cell size.
hidden: Number of units in hidden layers (note that None means "use default"
wich is equivalent to [] -- see code in model).
experiment_args: Additional arguments when `tf.contrib.learn.Experiment`
is instantiated.
"""
classes = tf.gfile.Open('%s/labels.txt' % data_dir).read().splitlines()
n_classes = len(classes)
params = tf.contrib.training.HParams(
cell_size=cell_size,
hidden=hidden or None, # Default is empty list.
)
config = tf.contrib.learn.RunConfig()
def _experiment_fn(output_dir):
return tf.contrib.learn.Experiment(
model.build_estimator(output_dir, n_classes, params, config),
train_input_fn=model.make_input_fn_stroke(
files_pattern=os.path.join(data_dir, 'train-*'),
batch_size=train_batch_size),
eval_input_fn=model.make_input_fn_stroke(
files_pattern=os.path.join(data_dir, 'eval-*'),
batch_size=eval_batch_size),
export_strategies=[
tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(
model.serving_input_fn,
exports_to_keep=1)
],
train_steps=train_steps,
eval_steps=eval_steps,
**experiment_args
)
return _experiment_fn
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
help='GCS or local path to training data',
required=True
)
parser.add_argument(
'--train_batch_size',
help='Batch size for training steps',
type=int,
default=100
)
parser.add_argument(
'--eval_batch_size',
help='Batch size for evaluation steps',
type=int,
default=100
)
parser.add_argument(
'--train_steps',
help='Steps to run the training job for.',
type=int,
default=10000
)
parser.add_argument(
'--eval_steps',
help='Number of steps to run evalution for at each checkpoint',
default=100,
type=int
)
parser.add_argument(
'--output_dir',
help='GCS location to write checkpoints and export models',
required=True
)
parser.add_argument(
'--job-dir',
help='this model ignores this field, but it is required by gcloud',
default='junk'
)
parser.add_argument(
'--eval_delay_secs',
help='How long to wait before running first evaluation',
default=10,
type=int
)
parser.add_argument(
'--min_eval_frequency',
help='Minimum number of training steps between evaluations',
default=1,
type=int
)
# Hyper parameters.
parser.add_argument(
'--cell_size',
help='LSTM cell size.',
default=256,
type=int
)
parser.add_argument(
'--hidden',
help='Units in hidden layers.',
default=(),
nargs='+',
type=int
)
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
output_dir = arguments.pop('output_dir')
# Run the training job
tf.contrib.learn.learn_runner.run(
generate_experiment_fn(**arguments), output_dir)
| [((1043, 1114), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'cell_size': 'cell_size', 'hidden': '(hidden or None)'}), '(cell_size=cell_size, hidden=hidden or None)\n', (1070, 1114), True, 'import tensorflow as tf\n'), ((1169, 1197), 'tensorflow.contrib.learn.RunConfig', 'tf.contrib.learn.RunConfig', ([], {}), '()\n', (1195, 1197), True, 'import tensorflow as tf\n'), ((1991, 2032), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (2015, 2032), True, 'import tensorflow as tf\n'), ((2045, 2070), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2068, 2070), False, 'import argparse, glob, os\n'), ((1281, 1341), 'model.build_estimator', 'model.build_estimator', (['output_dir', 'n_classes', 'params', 'config'], {}), '(output_dir, n_classes, params, config)\n', (1302, 1341), False, 'import model\n'), ((942, 983), 'tensorflow.gfile.Open', 'tf.gfile.Open', (["('%s/labels.txt' % data_dir)"], {}), "('%s/labels.txt' % data_dir)\n", (955, 983), True, 'import tensorflow as tf\n'), ((1680, 1796), 'tensorflow.contrib.learn.utils.saved_model_export_utils.make_export_strategy', 'tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy', (['model.serving_input_fn'], {'exports_to_keep': '(1)'}), '(model.\n serving_input_fn, exports_to_keep=1)\n', (1748, 1796), True, 'import tensorflow as tf\n'), ((1418, 1451), 'os.path.join', 'os.path.join', (['data_dir', '"""train-*"""'], {}), "(data_dir, 'train-*')\n", (1430, 1451), False, 'import argparse, glob, os\n'), ((1567, 1599), 'os.path.join', 'os.path.join', (['data_dir', '"""eval-*"""'], {}), "(data_dir, 'eval-*')\n", (1579, 1599), False, 'import argparse, glob, os\n')] |
johnggo/Codeforces-Solutions | A/116A.py | 4127ae6f72294b5781fb94c42b69cfef570aae42 | # Time: 310 ms
# Memory: 1664 KB
n = int(input())
e = 0
s = 0
for i in range(n):
s =s- eval(input().replace(' ', '-'))
e = max(e, s)
print(e)
| [] |
aferrall/redner | tests/test_serialize.py | be52e4105140f575f153d640ba889eb6e6015616 | import pyredner
import numpy as np
import torch
cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.0]),
look_at = torch.tensor([0.0, 0.0, 0.0]),
up = torch.tensor([0.0, 1.0, 0.0]),
fov = torch.tensor([45.0]), # in degree
clip_near = 1e-2, # needs to > 0
resolution = (256, 256),
fisheye = False)
mat_grey = pyredner.Material(\
diffuse_reflectance = \
torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()))
materials = [mat_grey]
shape_triangle = pyredner.Shape(\
vertices = torch.tensor([[-1.7, 1.0, 0.0], [1.0, 1.0, 0.0], [-0.5, -1.0, 0.0]],
device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2]], dtype = torch.int32,
device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shape_light = pyredner.Shape(\
vertices = torch.tensor([[-1.0, -1.0, -7.0],
[ 1.0, -1.0, -7.0],
[-1.0, 1.0, -7.0],
[ 1.0, 1.0, -7.0]], device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
dtype = torch.int32, device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shapes = [shape_triangle, shape_light]
light = pyredner.AreaLight(shape_id = 1,
intensity = torch.tensor([20.0,20.0,20.0]))
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
scene_state_dict = scene.state_dict()
scene = pyredner.Scene.load_state_dict(scene_state_dict)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_serialize/img.exr')
| [((1526, 1577), 'pyredner.Scene', 'pyredner.Scene', (['cam', 'shapes', 'materials', 'area_lights'], {}), '(cam, shapes, materials, area_lights)\n', (1540, 1577), False, 'import pyredner\n'), ((1625, 1673), 'pyredner.Scene.load_state_dict', 'pyredner.Scene.load_state_dict', (['scene_state_dict'], {}), '(scene_state_dict)\n', (1655, 1673), False, 'import pyredner\n'), ((1688, 1775), 'pyredner.RenderFunction.serialize_scene', 'pyredner.RenderFunction.serialize_scene', ([], {'scene': 'scene', 'num_samples': '(16)', 'max_bounces': '(1)'}), '(scene=scene, num_samples=16,\n max_bounces=1)\n', (1727, 1775), False, 'import pyredner\n'), ((82, 112), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, -5.0]'], {}), '([0.0, 0.0, -5.0])\n', (94, 112), False, 'import torch\n'), ((146, 175), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (158, 175), False, 'import torch\n'), ((204, 233), 'torch.tensor', 'torch.tensor', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (216, 233), False, 'import torch\n'), ((263, 283), 'torch.tensor', 'torch.tensor', (['[45.0]'], {}), '([45.0])\n', (275, 283), False, 'import torch\n'), ((1463, 1495), 'torch.tensor', 'torch.tensor', (['[20.0, 20.0, 20.0]'], {}), '([20.0, 20.0, 20.0])\n', (1475, 1495), False, 'import torch\n'), ((545, 566), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (564, 566), False, 'import pyredner\n'), ((728, 749), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (747, 749), False, 'import pyredner\n'), ((830, 851), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (849, 851), False, 'import pyredner\n'), ((1149, 1170), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (1168, 1170), False, 'import pyredner\n'), ((1261, 1282), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (1280, 1282), False, 'import pyredner\n')] |
Shoobx/zope.publisher | src/zope/publisher/tests/test_requestdataproperty.py | 790e82045d7ae06146bd8c5e27139555b9ec1641 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Request Data-Property Tests
"""
from unittest import TestCase, makeSuite
from zope.interface.common.tests.basemapping \
import testIEnumerableMapping, testIReadMapping
from zope.publisher.base \
import RequestDataProperty, RequestDataGetter, RequestDataMapper
class TestDataGettr(RequestDataGetter):
_gettrname = 'getSomething'
class TestDataMapper(RequestDataMapper):
_mapname = '_data'
_marker = object()
class Data(object):
def getSomething(self, name, default=_marker):
if name.startswith('Z'):
return "something %s" % name
if default is not _marker:
return default
raise KeyError(name)
something = RequestDataProperty(TestDataGettr)
somedata = RequestDataProperty(TestDataMapper)
class Test(TestCase):
def testRequestDataGettr(self):
testIReadMapping(self, Data().something,
{"Zope": "something Zope"}, ["spam"])
def testRequestDataMapper(self):
data = Data()
sample = {'foo': 'Foo', 'bar': 'Bar'}
data._data = sample
inst = data.somedata
testIReadMapping(self, inst, sample, ["spam"])
testIEnumerableMapping(self, inst, sample)
def testNoAssign(self):
data = Data()
try:
data.something = {}
except AttributeError:
pass
else:
raise AssertionError("Shouldn't be able to assign")
try:
data.somedata = {}
except AttributeError:
pass
else:
raise AssertionError("Shouldn't be able to assign")
def test_suite():
return makeSuite(Test)
| [((1334, 1368), 'zope.publisher.base.RequestDataProperty', 'RequestDataProperty', (['TestDataGettr'], {}), '(TestDataGettr)\n', (1353, 1368), False, 'from zope.publisher.base import RequestDataProperty, RequestDataGetter, RequestDataMapper\n'), ((1384, 1419), 'zope.publisher.base.RequestDataProperty', 'RequestDataProperty', (['TestDataMapper'], {}), '(TestDataMapper)\n', (1403, 1419), False, 'from zope.publisher.base import RequestDataProperty, RequestDataGetter, RequestDataMapper\n'), ((2286, 2301), 'unittest.makeSuite', 'makeSuite', (['Test'], {}), '(Test)\n', (2295, 2301), False, 'from unittest import TestCase, makeSuite\n'), ((1764, 1810), 'zope.interface.common.tests.basemapping.testIReadMapping', 'testIReadMapping', (['self', 'inst', 'sample', "['spam']"], {}), "(self, inst, sample, ['spam'])\n", (1780, 1810), False, 'from zope.interface.common.tests.basemapping import testIEnumerableMapping, testIReadMapping\n'), ((1819, 1861), 'zope.interface.common.tests.basemapping.testIEnumerableMapping', 'testIEnumerableMapping', (['self', 'inst', 'sample'], {}), '(self, inst, sample)\n', (1841, 1861), False, 'from zope.interface.common.tests.basemapping import testIEnumerableMapping, testIReadMapping\n')] |
ahemphill/digitalbuildings | tools/scoring/dimensions/__init__.py | 56a03b0055f9f771c3ed0a962f6bfb2b1d968947 | """ Enable import """
from os import path
import sys
sys.path.append(
path.abspath(path.join('tools', 'validators', 'instance_validator')))
| [((89, 143), 'os.path.join', 'path.join', (['"""tools"""', '"""validators"""', '"""instance_validator"""'], {}), "('tools', 'validators', 'instance_validator')\n", (98, 143), False, 'from os import path\n')] |
drorvinkler/thornfield | src/thornfield/caches/cache_compression_decorator.py | 3c5bb8afaa96097bc71cccb119394a0f351d828f | from typing import Callable, AnyStr, Optional
from zlib import compress as default_compress, decompress as default_decompress
from .cache import Cache
from ..constants import NOT_FOUND
class CacheCompressionDecorator(Cache):
def __init__(
self,
cache: Cache,
compress: Optional[Callable[[str], AnyStr]] = ...,
decompress: Optional[Callable[[AnyStr], str]] = ...,
) -> None:
super().__init__()
self._cache = cache
if compress is None:
self._compress = self._noop
elif compress is ...:
self._compress = self._default_compress
else:
self._compress = compress
if decompress is None:
self._decompress = self._noop
elif decompress is ...:
self._decompress = self._default_decompress
else:
self._decompress = decompress
def get(self, key):
value = self._cache.get(key)
return value if value is NOT_FOUND else self._decompress(value)
def set(self, key, value, expiration: int) -> None:
self._cache.set(key, self._compress(value), expiration)
@staticmethod
def _noop(x):
return x
@staticmethod
def _default_compress(obj: str) -> bytes:
return default_compress(obj.encode("UTF-8"))
@staticmethod
def _default_decompress(data: bytes) -> str:
return default_decompress(data).decode("UTF-8")
| [((1403, 1427), 'zlib.decompress', 'default_decompress', (['data'], {}), '(data)\n', (1421, 1427), True, 'from zlib import compress as default_compress, decompress as default_decompress\n')] |
kdkasad/manim | manim/mobject/vector_field.py | 249b1dcab0f18a43e953b5fda517734084c0a941 | """Mobjects representing vector fields."""
__all__ = [
"VectorField",
"ArrowVectorField",
"StreamLines",
]
import itertools as it
import random
from math import ceil, floor
from typing import Callable, Iterable, Optional, Sequence, Tuple, Type
import numpy as np
from colour import Color
from PIL import Image
from .. import config
from ..animation.composition import AnimationGroup, Succession
from ..animation.creation import Create
from ..animation.indication import ShowPassingFlash
from ..animation.update import UpdateFromAlphaFunc
from ..constants import OUT, RIGHT, UP
from ..mobject.geometry import Vector
from ..mobject.mobject import Mobject
from ..mobject.types.vectorized_mobject import VGroup, VMobject
from ..utils.bezier import interpolate, inverse_interpolate
from ..utils.color import BLUE_E, GREEN, RED, YELLOW, color_to_rgb, rgb_to_color
from ..utils.deprecation import deprecated_params
from ..utils.rate_functions import ease_out_sine, linear
from ..utils.simple_functions import sigmoid
from .types.opengl_vectorized_mobject import OpenGLVMobject
DEFAULT_SCALAR_FIELD_COLORS: list = [BLUE_E, GREEN, YELLOW, RED]
class VectorField(VGroup):
"""A vector field.
Vector fields are based on a function defining a vector at every position.
This class does by default not include any visible elements but provides
methods to move other :class:`~.Mobject` s along the vector field.
Parameters
----------
func
The function defining the rate of change at every position of the `VectorField`.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
kwargs : Any
Additional arguments to be passed to the :class:`~.VGroup` constructor
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
**kwargs
):
super().__init__(**kwargs)
self.func = func
if color is None:
self.single_color = False
if color_scheme is None:
def color_scheme(p):
return np.linalg.norm(p)
self.color_scheme = color_scheme # TODO maybe other default for direction?
self.rgbs = np.array(list(map(color_to_rgb, colors)))
def pos_to_rgb(pos: np.ndarray) -> Tuple[float, float, float, float]:
vec = self.func(pos)
color_value = np.clip(
self.color_scheme(vec),
min_color_scheme_value,
max_color_scheme_value,
)
alpha = inverse_interpolate(
min_color_scheme_value,
max_color_scheme_value,
color_value,
)
alpha *= len(self.rgbs) - 1
c1 = self.rgbs[int(alpha)]
c2 = self.rgbs[min(int(alpha + 1), len(self.rgbs) - 1)]
alpha %= 1
return interpolate(c1, c2, alpha)
self.pos_to_rgb = pos_to_rgb
self.pos_to_color = lambda pos: rgb_to_color(self.pos_to_rgb(pos))
else:
self.single_color = True
self.color = color
self.submob_movement_updater = None
@staticmethod
def shift_func(
func: Callable[[np.ndarray], np.ndarray],
shift_vector: np.ndarray,
) -> Callable[[np.ndarray], np.ndarray]:
"""Shift a vector field function.
Parameters
----------
func
The function defining a vector field.
shift_vector
The shift to be applied to the vector field.
Returns
-------
`Callable[[np.ndarray], np.ndarray]`
The shifted vector field function.
"""
return lambda p: func(p - shift_vector)
@staticmethod
def scale_func(
func: Callable[[np.ndarray], np.ndarray],
scalar: float,
) -> Callable[[np.ndarray], np.ndarray]:
"""Scale a vector field function.
Parameters
----------
func
The function defining a vector field.
shift_vector
The scalar to be applied to the vector field.
Examples
--------
.. manim:: ScaleVectorFieldFunction
class ScaleVectorFieldFunction(Scene):
def construct(self):
func = lambda pos: np.sin(pos[1]) * RIGHT + np.cos(pos[0]) * UP
vector_field = ArrowVectorField(func)
self.add(vector_field)
self.wait()
func = VectorField.scale_func(func, 0.5)
self.play(vector_field.animate.become(ArrowVectorField(func)))
self.wait()
Returns
-------
`Callable[[np.ndarray], np.ndarray]`
The scaled vector field function.
"""
return lambda p: func(p * scalar)
def nudge(
self,
mob: Mobject,
dt: float = 1,
substeps: int = 1,
pointwise: bool = False,
) -> "VectorField":
"""Nudge a :class:`~.Mobject` along the vector field.
Parameters
----------
mob
The mobject to move along the vector field
dt
A scalar to the amount the mobject is moved along the vector field.
The actual distance is based on the magnitude of the vector field.
substeps
The amount of steps the whole nudge is divided into. Higher values
give more accurate approximations.
pointwise
Whether to move the mobject along the vector field. If `False` the
vector field takes effect on the center of the given
:class:`~.Mobject`. If `True` the vector field takes effect on the
points of the individual points of the :class:`~.Mobject`,
potentially distorting it.
Returns
-------
VectorField
This vector field.
Examples
--------
.. manim:: Nudging
class Nudging(Scene):
def construct(self):
func = lambda pos: np.sin(pos[1] / 2) * RIGHT + np.cos(pos[0] / 2) * UP
vector_field = ArrowVectorField(
func, x_range=[-7, 7, 1], y_range=[-4, 4, 1], length_func=lambda x: x / 2
)
self.add(vector_field)
circle = Circle(radius=2).shift(LEFT)
self.add(circle.copy().set_color(GRAY))
dot = Dot().move_to(circle)
vector_field.nudge(circle, -2, 60, True)
vector_field.nudge(dot, -2, 60)
circle.add_updater(vector_field.get_nudge_updater(pointwise=True))
dot.add_updater(vector_field.get_nudge_updater())
self.add(circle, dot)
self.wait(6)
"""
def runge_kutta(self, p: Sequence[float], step_size: float) -> float:
"""Returns the change in position of a point along a vector field.
Parameters
----------
p
The position of each point being moved along the vector field.
step_size
A scalar that is used to determine how much a point is shifted in a single step.
Returns
-------
float
How much the point is shifted.
"""
k_1 = self.func(p)
k_2 = self.func(p + step_size * (k_1 * 0.5))
k_3 = self.func(p + step_size * (k_2 * 0.5))
k_4 = self.func(p + step_size * k_3)
return step_size / 6.0 * (k_1 + 2.0 * k_2 + 2.0 * k_3 + k_4)
step_size = dt / substeps
for _ in range(substeps):
if pointwise:
mob.apply_function(lambda p: p + runge_kutta(self, p, step_size))
else:
mob.shift(runge_kutta(self, mob.get_center(), step_size))
return self
def nudge_submobjects(
self,
dt: float = 1,
substeps: int = 1,
pointwise: bool = False,
) -> "VectorField":
"""Apply a nudge along the vector field to all submobjects.
Parameters
----------
dt
A scalar to the amount the mobject is moved along the vector field.
The actual distance is based on the magnitude of the vector field.
substeps
The amount of steps the whole nudge is divided into. Higher values
give more accurate approximations.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
VectorField
This vector field.
"""
for mob in self.submobjects:
self.nudge(mob, dt, substeps, pointwise)
return self
def get_nudge_updater(
self,
speed: float = 1,
pointwise: bool = False,
) -> Callable[[Mobject, float], Mobject]:
"""Get an update function to move a :class:`~.Mobject` along the vector field.
When used with :meth:`~.Mobject.add_updater`, the mobject will move along the vector field, where its speed is determined by the magnitude of the vector field.
Parameters
----------
speed
At `speed=1` the distance a mobject moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of such a mobject.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
Callable[[Mobject, float], Mobject]
The update function.
"""
return lambda mob, dt: self.nudge(mob, dt * speed, pointwise=pointwise)
def start_submobject_movement(
self,
speed: float = 1,
pointwise: bool = False,
) -> "VectorField":
"""Start continuously moving all submobjects along the vector field.
Calling this method multiple times will result in removing the previous updater created by this method.
Parameters
----------
speed
The speed at which to move the submobjects. See :meth:`get_nudge_updater` for details.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
VectorField
This vector field.
"""
self.stop_submobject_movement()
self.submob_movement_updater = lambda mob, dt: mob.nudge_submobjects(
dt * speed,
pointwise=pointwise,
)
self.add_updater(self.submob_movement_updater)
return self
def stop_submobject_movement(self) -> "VectorField":
"""Stops the continuous movement started using :meth:`start_submobject_movement`.
Returns
-------
VectorField
This vector field.
"""
self.remove_updater(self.submob_movement_updater)
self.submob_movement_updater = None
return self
def get_colored_background_image(self, sampling_rate: int = 5) -> Image.Image:
"""Generate an image that displays the vector field.
The color at each position is calculated by passing the positing through a
series of steps:
Calculate the vector field function at that position, map that vector to a
single value using `self.color_scheme` and finally generate a color from
that value using the color gradient.
Parameters
----------
sampling_rate
The stepsize at which pixels get included in the image. Lower values give
more accurate results, but may take a long time to compute.
Returns
-------
Image.Imgae
The vector field image.
"""
if self.single_color:
raise ValueError(
"There is no point in generating an image if the vector field uses a single color.",
)
ph = int(config["pixel_height"] / sampling_rate)
pw = int(config["pixel_width"] / sampling_rate)
fw = config["frame_width"]
fh = config["frame_height"]
points_array = np.zeros((ph, pw, 3))
x_array = np.linspace(-fw / 2, fw / 2, pw)
y_array = np.linspace(fh / 2, -fh / 2, ph)
x_array = x_array.reshape((1, len(x_array)))
y_array = y_array.reshape((len(y_array), 1))
x_array = x_array.repeat(ph, axis=0)
y_array.repeat(pw, axis=1) # TODO why not y_array = y_array.repeat(...)?
points_array[:, :, 0] = x_array
points_array[:, :, 1] = y_array
rgbs = np.apply_along_axis(self.pos_to_rgb, 2, points_array)
return Image.fromarray((rgbs * 255).astype("uint8"))
def get_vectorized_rgba_gradient_function(
self,
start: float,
end: float,
colors: Iterable,
):
"""
Generates a gradient of rgbas as a numpy array
Parameters
----------
start
start value used for inverse interpolation at :func:`~.inverse_interpolate`
end
end value used for inverse interpolation at :func:`~.inverse_interpolate`
colors
list of colors to generate the gradient
Returns
-------
function to generate the gradients as numpy arrays representing rgba values
"""
rgbs = np.array([color_to_rgb(c) for c in colors])
def func(values, opacity=1):
alphas = inverse_interpolate(start, end, np.array(values))
alphas = np.clip(alphas, 0, 1)
scaled_alphas = alphas * (len(rgbs) - 1)
indices = scaled_alphas.astype(int)
next_indices = np.clip(indices + 1, 0, len(rgbs) - 1)
inter_alphas = scaled_alphas % 1
inter_alphas = inter_alphas.repeat(3).reshape((len(indices), 3))
result = interpolate(rgbs[indices], rgbs[next_indices], inter_alphas)
result = np.concatenate(
(result, np.full([len(result), 1], opacity)),
axis=1,
)
return result
return func
class ArrowVectorField(VectorField):
"""A :class:`VectorField` represented by a set of change vectors.
Vector fields are always based on a function defining the :class:`~.Vector` at every position.
The values of this functions is displayed as a grid of vectors.
By default the color of each vector is determined by it's magnitude.
Other color schemes can be used however.
Parameters
----------
func
The function defining the rate of change at every position of the vector field.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
x_range
A sequence of x_min, x_max, delta_x
y_range
A sequence of y_min, y_max, delta_y
z_range
A sequence of z_min, z_max, delta_z
three_dimensions
Enables three_dimensions. Default set to False, automatically turns True if
z_range is not None.
length_func
The function determining the displayed size of the vectors. The actual size
of the vector is passed, the returned value will be used as display size for the
vector. By default this is used to cap the displayed size of vectors to reduce the clutter.
opacity
The opacity of the arrows.
vector_config
Additional arguments to be passed to the :class:`~.Vector` constructor
kwargs : Any
Additional arguments to be passed to the :class:`~.VGroup` constructor
Examples
--------
.. manim:: BasicUsage
:save_last_frame:
class BasicUsage(Scene):
def construct(self):
func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3
self.add(ArrowVectorField(func))
.. manim:: SizingAndSpacing
class SizingAndSpacing(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
vf = ArrowVectorField(func, x_range=[-7, 7, 1])
self.add(vf)
self.wait()
length_func = lambda x: x / 3
vf2 = ArrowVectorField(func, x_range=[-7, 7, 1], length_func=length_func)
self.play(vf.animate.become(vf2))
self.wait()
.. manim:: Coloring
:save_last_frame:
class Coloring(Scene):
def construct(self):
func = lambda pos: pos - LEFT * 5
colors = [RED, YELLOW, BLUE, DARK_GRAY]
min_radius = Circle(radius=2, color=colors[0]).shift(LEFT * 5)
max_radius = Circle(radius=10, color=colors[-1]).shift(LEFT * 5)
vf = ArrowVectorField(
func, min_color_scheme_value=2, max_color_scheme_value=10, colors=colors
)
self.add(vf, min_radius, max_radius)
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
# Determining Vector positions:
x_range: Sequence[float] = None,
y_range: Sequence[float] = None,
z_range: Sequence[float] = None,
three_dimensions: bool = False, # Automatically True if z_range is set
# Takes in actual norm, spits out displayed norm
length_func: Callable[[float], float] = lambda norm: 0.45 * sigmoid(norm),
opacity: float = 1.0,
vector_config: Optional[dict] = None,
**kwargs
):
self.x_range = x_range or [
floor(-config["frame_width"] / 2),
ceil(config["frame_width"] / 2),
]
self.y_range = y_range or [
floor(-config["frame_height"] / 2),
ceil(config["frame_height"] / 2),
]
self.ranges = [self.x_range, self.y_range]
if three_dimensions or z_range:
self.z_range = z_range or self.y_range.copy()
self.ranges += [self.z_range]
else:
self.ranges += [[0, 0]]
for i in range(len(self.ranges)):
if len(self.ranges[i]) == 2:
self.ranges[i] += [0.5]
self.ranges[i][1] += self.ranges[i][2]
self.x_range, self.y_range, self.z_range = self.ranges
super().__init__(
func,
color,
color_scheme,
min_color_scheme_value,
max_color_scheme_value,
colors,
**kwargs,
)
self.length_func = length_func
self.opacity = opacity
if vector_config is None:
vector_config = {}
self.vector_config = vector_config
self.func = func
x_range = np.arange(*self.x_range)
y_range = np.arange(*self.y_range)
z_range = np.arange(*self.z_range)
for x, y, z in it.product(x_range, y_range, z_range):
self.add(self.get_vector(x * RIGHT + y * UP + z * OUT))
self.set_opacity(self.opacity)
def get_vector(self, point: np.ndarray):
"""Creates a vector in the vector field.
The created vector is based on the function of the vector field and is
rooted in the given point. Color and length fit the specifications of
this vector field.
Parameters
----------
point
The root point of the vector.
kwargs : Any
Additional arguments to be passed to the :class:`~.Vector` constructor
"""
output = np.array(self.func(point))
norm = np.linalg.norm(output)
if norm != 0:
output *= self.length_func(norm) / norm
vect = Vector(output, **self.vector_config)
vect.shift(point)
if self.single_color:
vect.set_color(self.color)
else:
vect.set_color(self.pos_to_color(point))
return vect
class StreamLines(VectorField):
"""StreamLines represent the flow of a :class:`VectorField` using the trace of moving agents.
Vector fields are always based on a function defining the vector at every position.
The values of this functions is displayed by moving many agents along the vector field
and showing their trace.
Parameters
----------
func
The function defining the rate of change at every position of the vector field.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
x_range
A sequence of x_min, x_max, delta_x
y_range
A sequence of y_min, y_max, delta_y
z_range
A sequence of z_min, z_max, delta_z
three_dimensions
Enables three_dimensions. Default set to False, automatically turns True if
z_range is not None.
noise_factor
The amount by which the starting position of each agent is altered along each axis. Defaults to :code:`delta_y / 2` if not defined.
n_repeats
The number of agents generated at each starting point.
dt
The factor by which the distance an agent moves per step is stretched. Lower values result in a better approximation of the trajectories in the vector field.
virtual_time
The time the agents get to move in the vector field. Higher values therefore result in longer stream lines. However, this whole time gets simulated upon creation.
max_anchors_per_line
The maximum number of anchors per line. Lines with more anchors get reduced in complexity, not in length.
padding
The distance agents can move out of the generation area before being terminated.
stroke_width
The stroke with of the stream lines.
opacity
The opacity of the stream lines.
Examples
--------
.. manim:: BasicUsage
:save_last_frame:
class BasicUsage(Scene):
def construct(self):
func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3
self.add(StreamLines(func))
.. manim:: SpawningAndFlowingArea
:save_last_frame:
class SpawningAndFlowingArea(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0]) * UR + np.cos(pos[1]) * LEFT + pos / 5
stream_lines = StreamLines(
func, x_range=[-3, 3, 0.2], y_range=[-2, 2, 0.2], padding=1
)
spawning_area = Rectangle(width=6, height=4)
flowing_area = Rectangle(width=8, height=6)
labels = [Tex("Spawning Area"), Tex("Flowing Area").shift(DOWN * 2.5)]
for lbl in labels:
lbl.add_background_rectangle(opacity=0.6, buff=0.05)
self.add(stream_lines, spawning_area, flowing_area, *labels)
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
# Determining stream line starting positions:
x_range: Sequence[float] = None,
y_range: Sequence[float] = None,
z_range: Sequence[float] = None,
three_dimensions: bool = False,
noise_factor: Optional[float] = None,
n_repeats=1,
# Determining how lines are drawn
dt=0.05,
virtual_time=3,
max_anchors_per_line=100,
padding=3,
# Determining stream line appearance:
stroke_width=1,
opacity=1,
**kwargs
):
self.x_range = x_range or [
floor(-config["frame_width"] / 2),
ceil(config["frame_width"] / 2),
]
self.y_range = y_range or [
floor(-config["frame_height"] / 2),
ceil(config["frame_height"] / 2),
]
self.ranges = [self.x_range, self.y_range]
if three_dimensions or z_range:
self.z_range = z_range or self.y_range.copy()
self.ranges += [self.z_range]
else:
self.ranges += [[0, 0]]
for i in range(len(self.ranges)):
if len(self.ranges[i]) == 2:
self.ranges[i] += [0.5]
self.ranges[i][1] += self.ranges[i][2]
self.x_range, self.y_range, self.z_range = self.ranges
super().__init__(
func,
color,
color_scheme,
min_color_scheme_value,
max_color_scheme_value,
colors,
**kwargs,
)
self.noise_factor = (
noise_factor if noise_factor is not None else self.y_range[2] / 2
)
self.n_repeats = n_repeats
self.virtual_time = virtual_time
self.max_anchors_per_line = max_anchors_per_line
self.padding = padding
self.stroke_width = stroke_width
half_noise = self.noise_factor / 2
np.random.seed(0)
start_points = np.array(
[
(x - half_noise) * RIGHT
+ (y - half_noise) * UP
+ (z - half_noise) * OUT
+ self.noise_factor * np.random.random(3)
for n in range(self.n_repeats)
for x in np.arange(*self.x_range)
for y in np.arange(*self.y_range)
for z in np.arange(*self.z_range)
],
)
def outside_box(p):
return (
p[0] < self.x_range[0] - self.padding
or p[0] > self.x_range[1] + self.padding - self.x_range[2]
or p[1] < self.y_range[0] - self.padding
or p[1] > self.y_range[1] + self.padding - self.y_range[2]
or p[2] < self.z_range[0] - self.padding
or p[2] > self.z_range[1] + self.padding - self.z_range[2]
)
max_steps = ceil(virtual_time / dt) + 1
if not self.single_color:
self.background_img = self.get_colored_background_image()
if config["renderer"] == "opengl":
self.values_to_rgbas = self.get_vectorized_rgba_gradient_function(
min_color_scheme_value,
max_color_scheme_value,
colors,
)
for point in start_points:
points = [point]
for _ in range(max_steps):
last_point = points[-1]
new_point = last_point + dt * func(last_point)
if outside_box(new_point):
break
points.append(new_point)
step = max_steps
if not step:
continue
if config["renderer"] == "opengl":
line = OpenGLVMobject()
else:
line = VMobject()
line.duration = step * dt
step = max(1, int(len(points) / self.max_anchors_per_line))
line.set_points_smoothly(points[::step])
if self.single_color:
line.set_stroke(self.color)
else:
if config["renderer"] == "opengl":
# scaled for compatibility with cairo
line.set_stroke(width=self.stroke_width / 4.0)
norms = np.array(
[np.linalg.norm(self.func(point)) for point in line.points],
)
line.set_rgba_array_direct(
self.values_to_rgbas(norms, opacity),
name="stroke_rgba",
)
else:
if np.any(self.z_range != np.array([0, 0.5, 0.5])):
line.set_stroke(
[self.pos_to_color(p) for p in line.get_anchors()],
)
else:
line.color_using_background_image(self.background_img)
line.set_stroke(width=self.stroke_width, opacity=opacity)
self.add(line)
self.stream_lines = [*self.submobjects]
def create(
self,
lag_ratio: Optional[float] = None,
run_time: Optional[Callable[[float], float]] = None,
**kwargs
) -> AnimationGroup:
"""The creation animation of the stream lines.
The stream lines appear in random order.
Parameters
----------
lag_ratio
The lag ratio of the animation.
If undefined, it will be selected so that the total animation length is 1.5 times the run time of each stream line creation.
run_time
The run time of every single stream line creation. The runtime of the whole animation might be longer due to the `lag_ratio`.
If undefined, the virtual time of the stream lines is used as run time.
Returns
-------
:class:`~.AnimationGroup`
The creation animation of the stream lines.
Examples
--------
.. manim:: StreamLineCreation
class StreamLineCreation(Scene):
def construct(self):
func = lambda pos: (pos[0] * UR + pos[1] * LEFT) - pos
stream_lines = StreamLines(
func,
color=YELLOW,
x_range=[-7, 7, 1],
y_range=[-4, 4, 1],
stroke_width=3,
virtual_time=1, # use shorter lines
max_anchors_per_line=5, # better performance with fewer anchors
)
self.play(stream_lines.create()) # uses virtual_time as run_time
self.wait()
"""
if run_time is None:
run_time = self.virtual_time
if lag_ratio is None:
lag_ratio = run_time / 2 / len(self.submobjects)
animations = [
Create(line, run_time=run_time, **kwargs) for line in self.stream_lines
]
random.shuffle(animations)
return AnimationGroup(*animations, lag_ratio=lag_ratio)
def start_animation(
self,
warm_up=True,
flow_speed: float = 1,
time_width: float = 0.3,
rate_func: Callable[[float], float] = linear,
line_animation_class: Type[ShowPassingFlash] = ShowPassingFlash,
**kwargs
) -> None:
"""Animates the stream lines using an updater.
The stream lines will continuously flow
Parameters
----------
warm_up : bool, optional
If `True` the animation is initialized line by line. Otherwise it starts with all lines shown.
flow_speed
At `flow_speed=1` the distance the flow moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of this flow.
time_width
The proportion of the stream line shown while being animated
rate_func
The rate function of each stream line flashing
line_animation_class
The animation class being used
Examples
--------
.. manim:: ContinuousMotion
class ContinuousMotion(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
stream_lines = StreamLines(func, stroke_width=3, max_anchors_per_line=30)
self.add(stream_lines)
stream_lines.start_animation(warm_up=False, flow_speed=1.5)
self.wait(stream_lines.virtual_time / stream_lines.flow_speed)
"""
for line in self.stream_lines:
run_time = line.duration / flow_speed
line.anim = line_animation_class(
line,
run_time=run_time,
rate_func=rate_func,
time_width=time_width,
**kwargs,
)
line.anim.begin()
line.time = random.random() * self.virtual_time
if warm_up:
line.time *= -1
self.add(line.anim.mobject)
def updater(mob, dt):
for line in mob.stream_lines:
line.time += dt * flow_speed
if line.time >= self.virtual_time:
line.time -= self.virtual_time
line.anim.interpolate(np.clip(line.time / line.anim.run_time, 0, 1))
self.add_updater(updater)
self.flow_animation = updater
self.flow_speed = flow_speed
self.time_width = time_width
def end_animation(self) -> AnimationGroup:
"""End the stream line animation smoothly.
Returns an animation resulting in fully displayed stream lines without a noticeable cut.
Returns
-------
:class:`~.AnimationGroup`
The animation fading out the running stream animation.
Raises
------
ValueError
if no stream line animation is running
Examples
--------
.. manim:: EndAnimation
class EndAnimation(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
stream_lines = StreamLines(
func, stroke_width=3, max_anchors_per_line=5, virtual_time=1, color=BLUE
)
self.add(stream_lines)
stream_lines.start_animation(warm_up=False, flow_speed=1.5, time_width=0.5)
self.wait(1)
self.play(stream_lines.end_animation())
"""
if self.flow_animation is None:
raise ValueError("You have to start the animation before fading it out.")
def hide_and_wait(mob, alpha):
if alpha == 0:
mob.set_stroke(opacity=0)
elif alpha == 1:
mob.set_stroke(opacity=1)
def finish_updater_cycle(line, alpha):
line.time += dt * self.flow_speed
line.anim.interpolate(min(line.time / line.anim.run_time, 1))
if alpha == 1:
self.remove(line.anim.mobject)
line.anim.finish()
max_run_time = self.virtual_time / self.flow_speed
creation_rate_func = ease_out_sine
creation_staring_speed = creation_rate_func(0.001) * 1000
creation_run_time = (
max_run_time / (1 + self.time_width) * creation_staring_speed
)
# creation_run_time is calculated so that the creation animation starts at the same speed
# as the regular line flash animation but eases out.
dt = 1 / config["frame_rate"]
animations = []
self.remove_updater(self.flow_animation)
self.flow_animation = None
for line in self.stream_lines:
create = Create(
line,
run_time=creation_run_time,
rate_func=creation_rate_func,
)
if line.time <= 0:
animations.append(
Succession(
UpdateFromAlphaFunc(
line,
hide_and_wait,
run_time=-line.time / self.flow_speed,
),
create,
),
)
self.remove(line.anim.mobject)
line.anim.finish()
else:
remaining_time = max_run_time - line.time / self.flow_speed
animations.append(
Succession(
UpdateFromAlphaFunc(
line,
finish_updater_cycle,
run_time=remaining_time,
),
create,
),
)
return AnimationGroup(*animations)
# TODO: Variant of StreamLines that is able to respond to changes in the vector field function
| [((13292, 13313), 'numpy.zeros', 'np.zeros', (['(ph, pw, 3)'], {}), '((ph, pw, 3))\n', (13300, 13313), True, 'import numpy as np\n'), ((13332, 13364), 'numpy.linspace', 'np.linspace', (['(-fw / 2)', '(fw / 2)', 'pw'], {}), '(-fw / 2, fw / 2, pw)\n', (13343, 13364), True, 'import numpy as np\n'), ((13383, 13415), 'numpy.linspace', 'np.linspace', (['(fh / 2)', '(-fh / 2)', 'ph'], {}), '(fh / 2, -fh / 2, ph)\n', (13394, 13415), True, 'import numpy as np\n'), ((13744, 13797), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self.pos_to_rgb', '(2)', 'points_array'], {}), '(self.pos_to_rgb, 2, points_array)\n', (13763, 13797), True, 'import numpy as np\n'), ((20776, 20800), 'numpy.arange', 'np.arange', (['*self.x_range'], {}), '(*self.x_range)\n', (20785, 20800), True, 'import numpy as np\n'), ((20819, 20843), 'numpy.arange', 'np.arange', (['*self.y_range'], {}), '(*self.y_range)\n', (20828, 20843), True, 'import numpy as np\n'), ((20862, 20886), 'numpy.arange', 'np.arange', (['*self.z_range'], {}), '(*self.z_range)\n', (20871, 20886), True, 'import numpy as np\n'), ((20910, 20947), 'itertools.product', 'it.product', (['x_range', 'y_range', 'z_range'], {}), '(x_range, y_range, z_range)\n', (20920, 20947), True, 'import itertools as it\n'), ((21607, 21629), 'numpy.linalg.norm', 'np.linalg.norm', (['output'], {}), '(output)\n', (21621, 21629), True, 'import numpy as np\n'), ((27667, 27684), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (27681, 27684), True, 'import numpy as np\n'), ((32745, 32771), 'random.shuffle', 'random.shuffle', (['animations'], {}), '(animations)\n', (32759, 32771), False, 'import random\n'), ((14691, 14712), 'numpy.clip', 'np.clip', (['alphas', '(0)', '(1)'], {}), '(alphas, 0, 1)\n', (14698, 14712), True, 'import numpy as np\n'), ((28612, 28635), 'math.ceil', 'ceil', (['(virtual_time / dt)'], {}), '(virtual_time / dt)\n', (28616, 28635), False, 'from math import ceil, floor\n'), ((14652, 14668), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (14660, 14668), True, 'import numpy as np\n'), ((19628, 19661), 'math.floor', 'floor', (["(-config['frame_width'] / 2)"], {}), "(-config['frame_width'] / 2)\n", (19633, 19661), False, 'from math import ceil, floor\n'), ((19675, 19706), 'math.ceil', 'ceil', (["(config['frame_width'] / 2)"], {}), "(config['frame_width'] / 2)\n", (19679, 19706), False, 'from math import ceil, floor\n'), ((19766, 19800), 'math.floor', 'floor', (["(-config['frame_height'] / 2)"], {}), "(-config['frame_height'] / 2)\n", (19771, 19800), False, 'from math import ceil, floor\n'), ((19814, 19846), 'math.ceil', 'ceil', (["(config['frame_height'] / 2)"], {}), "(config['frame_height'] / 2)\n", (19818, 19846), False, 'from math import ceil, floor\n'), ((26366, 26399), 'math.floor', 'floor', (["(-config['frame_width'] / 2)"], {}), "(-config['frame_width'] / 2)\n", (26371, 26399), False, 'from math import ceil, floor\n'), ((26413, 26444), 'math.ceil', 'ceil', (["(config['frame_width'] / 2)"], {}), "(config['frame_width'] / 2)\n", (26417, 26444), False, 'from math import ceil, floor\n'), ((26504, 26538), 'math.floor', 'floor', (["(-config['frame_height'] / 2)"], {}), "(-config['frame_height'] / 2)\n", (26509, 26538), False, 'from math import ceil, floor\n'), ((26552, 26584), 'math.ceil', 'ceil', (["(config['frame_height'] / 2)"], {}), "(config['frame_height'] / 2)\n", (26556, 26584), False, 'from math import ceil, floor\n'), ((34762, 34777), 'random.random', 'random.random', ([], {}), '()\n', (34775, 34777), False, 'import random\n'), ((2995, 3012), 'numpy.linalg.norm', 'np.linalg.norm', (['p'], {}), '(p)\n', (3009, 3012), True, 'import numpy as np\n'), ((27984, 28008), 'numpy.arange', 'np.arange', (['*self.x_range'], {}), '(*self.x_range)\n', (27993, 28008), True, 'import numpy as np\n'), ((28034, 28058), 'numpy.arange', 'np.arange', (['*self.y_range'], {}), '(*self.y_range)\n', (28043, 28058), True, 'import numpy as np\n'), ((28084, 28108), 'numpy.arange', 'np.arange', (['*self.z_range'], {}), '(*self.z_range)\n', (28093, 28108), True, 'import numpy as np\n'), ((35152, 35197), 'numpy.clip', 'np.clip', (['(line.time / line.anim.run_time)', '(0)', '(1)'], {}), '(line.time / line.anim.run_time, 0, 1)\n', (35159, 35197), True, 'import numpy as np\n'), ((27892, 27911), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (27908, 27911), True, 'import numpy as np\n'), ((30366, 30389), 'numpy.array', 'np.array', (['[0, 0.5, 0.5]'], {}), '([0, 0.5, 0.5])\n', (30374, 30389), True, 'import numpy as np\n')] |
dan-starkware/marshmallow_dataclass | marshmallow_dataclass/__init__.py | 25c3e041d8c6a87d740984e57a5bd29b768afbf8 | """
This library allows the conversion of python 3.7's :mod:`dataclasses`
to :mod:`marshmallow` schemas.
It takes a python class, and generates a marshmallow schema for it.
Simple example::
from marshmallow import Schema
from marshmallow_dataclass import dataclass
@dataclass
class Point:
x:float
y:float
point = Point(x=0, y=0)
point_json = Point.Schema().dumps(point)
Full example::
from marshmallow import Schema
from dataclasses import field
from marshmallow_dataclass import dataclass
import datetime
@dataclass
class User:
birth: datetime.date = field(metadata= {
"required": True # A parameter to pass to marshmallow's field
})
website:str = field(metadata = {
"marshmallow_field": marshmallow.fields.Url() # Custom marshmallow field
})
Schema: ClassVar[Type[Schema]] = Schema # For the type checker
"""
import inspect
from enum import EnumMeta
from functools import lru_cache
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
import dataclasses
import marshmallow
import typing_inspect
__all__ = ["dataclass", "add_schema", "class_schema", "field_for_schema", "NewType"]
NoneType = type(None)
_U = TypeVar("_U")
# Whitelist of dataclass members that will be copied to generated schema.
MEMBERS_WHITELIST: Set[str] = {"Meta"}
# Max number of generated schemas that class_schema keeps of generated schemas. Removes duplicates.
MAX_CLASS_SCHEMA_CACHE_SIZE = 1024
# _cls should never be specified by keyword, so start it with an
# underscore. The presence of _cls is used to detect if this
# decorator is being called with parameters or not.
def dataclass(
_cls: Type[_U] = None,
*,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
base_schema: Optional[Type[marshmallow.Schema]] = None,
):
"""
This decorator does the same as dataclasses.dataclass, but also applies :func:`add_schema`.
It adds a `.Schema` attribute to the class object
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
>>> @dataclass
... class Artist:
... name: str
>>> Artist.Schema
<class 'marshmallow.schema.Artist'>
>>> from typing import ClassVar
>>> from marshmallow import Schema
>>> @dataclass(order=True) # preserve field order
... class Point:
... x:float
... y:float
... Schema: ClassVar[Type[Schema]] = Schema # For the type checker
...
>>> Point.Schema().load({'x':0, 'y':0}) # This line can be statically type checked
Point(x=0.0, y=0.0)
"""
# dataclass's typing doesn't expect it to be called as a function, so ignore type check
dc = dataclasses.dataclass( # type: ignore
_cls, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen
)
if _cls is None:
return lambda cls: add_schema(dc(cls), base_schema)
return add_schema(dc, base_schema)
@overload
def add_schema(_cls: Type[_U]) -> Type[_U]:
...
@overload
def add_schema(
base_schema: Type[marshmallow.Schema] = None,
) -> Callable[[Type[_U]], Type[_U]]:
...
@overload
def add_schema(
_cls: Type[_U], base_schema: Type[marshmallow.Schema] = None
) -> Type[_U]:
...
def add_schema(_cls=None, base_schema=None):
"""
This decorator adds a marshmallow schema as the 'Schema' attribute in a dataclass.
It uses :func:`class_schema` internally.
:param type cls: The dataclass to which a Schema should be added
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
>>> class BaseSchema(marshmallow.Schema):
... def on_bind_field(self, field_name, field_obj):
... field_obj.data_key = (field_obj.data_key or field_name).upper()
>>> @add_schema(base_schema=BaseSchema)
... @dataclasses.dataclass
... class Artist:
... names: Tuple[str, str]
>>> artist = Artist.Schema().loads('{"NAMES": ["Martin", "Ramirez"]}')
>>> artist
Artist(names=('Martin', 'Ramirez'))
"""
def decorator(clazz: Type[_U]) -> Type[_U]:
clazz.Schema = class_schema(clazz, base_schema) # type: ignore
return clazz
return decorator(_cls) if _cls else decorator
def class_schema(
clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None
) -> Type[marshmallow.Schema]:
"""
Convert a class to a marshmallow schema
:param clazz: A python class (may be a dataclass)
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
:return: A marshmallow Schema corresponding to the dataclass
.. note::
All the arguments supported by marshmallow field classes can
be passed in the `metadata` dictionary of a field.
If you want to use a custom marshmallow field
(one that has no equivalent python type), you can pass it as the
``marshmallow_field`` key in the metadata dictionary.
>>> import typing
>>> Meters = typing.NewType('Meters', float)
>>> @dataclasses.dataclass()
... class Building:
... height: Optional[Meters]
... name: str = dataclasses.field(default="anonymous")
... class Meta:
... ordered = True
...
>>> class_schema(Building) # Returns a marshmallow schema class (not an instance)
<class 'marshmallow.schema.Building'>
>>> @dataclasses.dataclass()
... class City:
... name: str = dataclasses.field(metadata={'required':True})
... best_building: Building # Reference to another dataclasses. A schema will be created for it too.
... other_buildings: List[Building] = dataclasses.field(default_factory=lambda: [])
...
>>> citySchema = class_schema(City)()
>>> city = citySchema.load({"name":"Paris", "best_building": {"name": "Eiffel Tower"}})
>>> city
City(name='Paris', best_building=Building(height=None, name='Eiffel Tower'), other_buildings=[])
>>> citySchema.load({"name":"Paris"})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'best_building': ['Missing data for required field.']}
>>> city_json = citySchema.dump(city)
>>> city_json['best_building'] # We get an OrderedDict because we specified order = True in the Meta class
OrderedDict([('height', None), ('name', 'Eiffel Tower')])
>>> @dataclasses.dataclass()
... class Person:
... name: str = dataclasses.field(default="Anonymous")
... friends: List['Person'] = dataclasses.field(default_factory=lambda:[]) # Recursive field
...
>>> person = class_schema(Person)().load({
... "friends": [{"name": "Roger Boucher"}]
... })
>>> person
Person(name='Anonymous', friends=[Person(name='Roger Boucher', friends=[])])
>>> @dataclasses.dataclass()
... class C:
... important: int = dataclasses.field(init=True, default=0)
... # Only fields that are in the __init__ method will be added:
... unimportant: int = dataclasses.field(init=False, default=0)
...
>>> c = class_schema(C)().load({
... "important": 9, # This field will be imported
... "unimportant": 9 # This field will NOT be imported
... }, unknown=marshmallow.EXCLUDE)
>>> c
C(important=9, unimportant=0)
>>> @dataclasses.dataclass
... class Website:
... url:str = dataclasses.field(metadata = {
... "marshmallow_field": marshmallow.fields.Url() # Custom marshmallow field
... })
...
>>> class_schema(Website)().load({"url": "I am not a good URL !"})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'url': ['Not a valid URL.']}
>>> @dataclasses.dataclass
... class NeverValid:
... @marshmallow.validates_schema
... def validate(self, data, **_):
... raise marshmallow.ValidationError('never valid')
...
>>> class_schema(NeverValid)().load({})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'_schema': ['never valid']}
>>> # noinspection PyTypeChecker
>>> class_schema(None) # unsupported type
Traceback (most recent call last):
...
TypeError: None is not a dataclass and cannot be turned into one.
>>> @dataclasses.dataclass
... class Anything:
... name: str
... @marshmallow.validates('name')
... def validates(self, value):
... if len(value) > 5: raise marshmallow.ValidationError("Name too long")
>>> class_schema(Anything)().load({"name": "aaaaaargh"})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'name': ['Name too long']}
"""
return _proxied_class_schema(clazz, base_schema)
@lru_cache(maxsize=MAX_CLASS_SCHEMA_CACHE_SIZE)
def _proxied_class_schema(
clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None
) -> Type[marshmallow.Schema]:
try:
# noinspection PyDataclass
fields: Tuple[dataclasses.Field, ...] = dataclasses.fields(clazz)
except TypeError: # Not a dataclass
try:
return class_schema(dataclasses.dataclass(clazz), base_schema)
except Exception:
raise TypeError(
f"{getattr(clazz, '__name__', repr(clazz))} is not a dataclass and cannot be turned into one."
)
# Copy all marshmallow hooks and whitelisted members of the dataclass to the schema.
attributes = {
k: v
for k, v in inspect.getmembers(clazz)
if hasattr(v, "__marshmallow_hook__") or k in MEMBERS_WHITELIST
}
# Update the schema members to contain marshmallow fields instead of dataclass fields
attributes.update(
(
field.name,
field_for_schema(
field.type, _get_field_default(field), field.metadata, base_schema
),
)
for field in fields
if field.init
)
schema_class = type(clazz.__name__, (_base_schema(clazz, base_schema),), attributes)
return cast(Type[marshmallow.Schema], schema_class)
def _field_by_type(
typ: Union[type, Any], base_schema: Optional[Type[marshmallow.Schema]]
) -> Optional[Type[marshmallow.fields.Field]]:
return (
base_schema and base_schema.TYPE_MAPPING.get(typ)
) or marshmallow.Schema.TYPE_MAPPING.get(typ)
def _field_by_supertype(
typ: Type,
default: marshmallow.missing,
newtype_supertype: Type,
metadata: dict,
base_schema: Optional[Type[marshmallow.Schema]],
) -> marshmallow.fields.Field:
"""
Return a new field for fields based on a super field. (Usually spawned from NewType)
"""
# Add the information coming our custom NewType implementation
typ_args = getattr(typ, "_marshmallow_args", {})
# Handle multiple validators from both `typ` and `metadata`.
# See https://github.com/lovasoa/marshmallow_dataclass/issues/91
new_validators: List[Callable] = []
for meta_dict in (typ_args, metadata):
if "validate" in meta_dict:
if marshmallow.utils.is_iterable_but_not_string(meta_dict["validate"]):
new_validators.extend(meta_dict["validate"])
elif callable(meta_dict["validate"]):
new_validators.append(meta_dict["validate"])
metadata["validate"] = new_validators if new_validators else None
metadata = {"description": typ.__name__, **typ_args, **metadata}
field = getattr(typ, "_marshmallow_field", None)
if field:
return field(**metadata)
else:
return field_for_schema(
newtype_supertype,
metadata=metadata,
default=default,
base_schema=base_schema,
)
def field_for_schema(
typ: type,
default=marshmallow.missing,
metadata: Mapping[str, Any] = None,
base_schema: Optional[Type[marshmallow.Schema]] = None,
) -> marshmallow.fields.Field:
"""
Get a marshmallow Field corresponding to the given python type.
The metadata of the dataclass field is used as arguments to the marshmallow Field.
:param typ: The type for which a field should be generated
:param default: value to use for (de)serialization when the field is missing
:param metadata: Additional parameters to pass to the marshmallow field constructor
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
>>> int_field = field_for_schema(int, default=9, metadata=dict(required=True))
>>> int_field.__class__
<class 'marshmallow.fields.Integer'>
>>> int_field.default
9
>>> field_for_schema(str, metadata={"marshmallow_field": marshmallow.fields.Url()}).__class__
<class 'marshmallow.fields.Url'>
"""
metadata = {} if metadata is None else dict(metadata)
if default is not marshmallow.missing:
metadata.setdefault("default", default)
# 'missing' must not be set for required fields.
if not metadata.get("required"):
metadata.setdefault("missing", default)
else:
metadata.setdefault("required", True)
# If the field was already defined by the user
predefined_field = metadata.get("marshmallow_field")
if predefined_field:
return predefined_field
# Generic types specified without type arguments
if typ is list:
typ = List[Any]
elif typ is dict:
typ = Dict[Any, Any]
# Base types
field = _field_by_type(typ, base_schema)
if field:
return field(**metadata)
if typ is Any:
metadata.setdefault("allow_none", True)
return marshmallow.fields.Raw(**metadata)
# Generic types
origin = typing_inspect.get_origin(typ)
if origin:
arguments = typing_inspect.get_args(typ, True)
# Override base_schema.TYPE_MAPPING to change the class used for generic types below
type_mapping = base_schema.TYPE_MAPPING if base_schema else {}
if origin in (list, List):
child_type = field_for_schema(arguments[0], base_schema=base_schema)
list_type = type_mapping.get(List, marshmallow.fields.List)
return list_type(child_type, **metadata)
if origin in (tuple, Tuple):
children = tuple(
field_for_schema(arg, base_schema=base_schema) for arg in arguments
)
tuple_type = type_mapping.get(Tuple, marshmallow.fields.Tuple)
return tuple_type(children, **metadata)
elif origin in (dict, Dict):
dict_type = type_mapping.get(Dict, marshmallow.fields.Dict)
return dict_type(
keys=field_for_schema(arguments[0], base_schema=base_schema),
values=field_for_schema(arguments[1], base_schema=base_schema),
**metadata,
)
elif typing_inspect.is_optional_type(typ):
subtyp = next(t for t in arguments if t is not NoneType) # type: ignore
# Treat optional types as types with a None default
metadata["default"] = metadata.get("default", None)
metadata["missing"] = metadata.get("missing", None)
metadata["required"] = False
return field_for_schema(subtyp, metadata=metadata, base_schema=base_schema)
elif typing_inspect.is_union_type(typ):
from . import union_field
return union_field.Union(
[
(
subtyp,
field_for_schema(
subtyp, metadata=metadata, base_schema=base_schema
),
)
for subtyp in arguments
],
**metadata,
)
# typing.NewType returns a function with a __supertype__ attribute
newtype_supertype = getattr(typ, "__supertype__", None)
if newtype_supertype and inspect.isfunction(typ):
return _field_by_supertype(
typ=typ,
default=default,
newtype_supertype=newtype_supertype,
metadata=metadata,
base_schema=base_schema,
)
# enumerations
if isinstance(typ, EnumMeta):
import marshmallow_enum
return marshmallow_enum.EnumField(typ, **metadata)
# Nested marshmallow dataclass
nested_schema = getattr(typ, "Schema", None)
# Nested dataclasses
forward_reference = getattr(typ, "__forward_arg__", None)
nested = (
nested_schema or forward_reference or class_schema(typ, base_schema=base_schema)
)
return marshmallow.fields.Nested(nested, **metadata)
def _base_schema(
clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None
) -> Type[marshmallow.Schema]:
"""
Base schema factory that creates a schema for `clazz` derived either from `base_schema`
or `BaseSchema`
"""
# Remove `type: ignore` when mypy handles dynamic base classes
# https://github.com/python/mypy/issues/2813
class BaseSchema(base_schema or marshmallow.Schema): # type: ignore
def load(self, data: Mapping, *, many: bool = None, **kwargs):
all_loaded = super().load(data, many=many, **kwargs)
many = self.many if many is None else bool(many)
if many:
return [clazz(**loaded) for loaded in all_loaded]
else:
return clazz(**all_loaded)
return BaseSchema
def _get_field_default(field: dataclasses.Field):
"""
Return a marshmallow default value given a dataclass default value
>>> _get_field_default(dataclasses.field())
<marshmallow.missing>
"""
# Remove `type: ignore` when https://github.com/python/mypy/issues/6910 is fixed
default_factory = field.default_factory # type: ignore
if default_factory is not dataclasses.MISSING:
return default_factory
elif field.default is dataclasses.MISSING:
return marshmallow.missing
return field.default
def NewType(
name: str,
typ: Type[_U],
field: Optional[Type[marshmallow.fields.Field]] = None,
**kwargs,
) -> Callable[[_U], _U]:
"""NewType creates simple unique types
to which you can attach custom marshmallow attributes.
All the keyword arguments passed to this function will be transmitted
to the marshmallow field constructor.
>>> import marshmallow.validate
>>> IPv4 = NewType('IPv4', str, validate=marshmallow.validate.Regexp(r'^([0-9]{1,3}\\.){3}[0-9]{1,3}$'))
>>> @dataclass
... class MyIps:
... ips: List[IPv4]
>>> MyIps.Schema().load({"ips": ["0.0.0.0", "grumble grumble"]})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'ips': {1: ['String does not match expected pattern.']}}
>>> MyIps.Schema().load({"ips": ["127.0.0.1"]})
MyIps(ips=['127.0.0.1'])
>>> Email = NewType('Email', str, field=marshmallow.fields.Email)
>>> @dataclass
... class ContactInfo:
... mail: Email = dataclasses.field(default="[email protected]")
>>> ContactInfo.Schema().load({})
ContactInfo(mail='[email protected]')
>>> ContactInfo.Schema().load({"mail": "grumble grumble"})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'mail': ['Not a valid email address.']}
"""
def new_type(x: _U):
return x
new_type.__name__ = name
new_type.__supertype__ = typ # type: ignore
new_type._marshmallow_field = field # type: ignore
new_type._marshmallow_args = kwargs # type: ignore
return new_type
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| [((1344, 1357), 'typing.TypeVar', 'TypeVar', (['"""_U"""'], {}), "('_U')\n", (1351, 1357), False, 'from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Tuple, Type, TypeVar, Union, cast, overload\n'), ((8970, 9016), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'MAX_CLASS_SCHEMA_CACHE_SIZE'}), '(maxsize=MAX_CLASS_SCHEMA_CACHE_SIZE)\n', (8979, 9016), False, 'from functools import lru_cache\n'), ((2894, 2997), 'dataclasses.dataclass', 'dataclasses.dataclass', (['_cls'], {'repr': 'repr', 'eq': 'eq', 'order': 'order', 'unsafe_hash': 'unsafe_hash', 'frozen': 'frozen'}), '(_cls, repr=repr, eq=eq, order=order, unsafe_hash=\n unsafe_hash, frozen=frozen)\n', (2915, 2997), False, 'import dataclasses\n'), ((10263, 10307), 'typing.cast', 'cast', (['Type[marshmallow.Schema]', 'schema_class'], {}), '(Type[marshmallow.Schema], schema_class)\n', (10267, 10307), False, 'from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Tuple, Type, TypeVar, Union, cast, overload\n'), ((13897, 13927), 'typing_inspect.get_origin', 'typing_inspect.get_origin', (['typ'], {}), '(typ)\n', (13922, 13927), False, 'import typing_inspect\n'), ((16804, 16849), 'marshmallow.fields.Nested', 'marshmallow.fields.Nested', (['nested'], {}), '(nested, **metadata)\n', (16829, 16849), False, 'import marshmallow\n'), ((19870, 19899), 'doctest.testmod', 'doctest.testmod', ([], {'verbose': '(True)'}), '(verbose=True)\n', (19885, 19899), False, 'import doctest\n'), ((9240, 9265), 'dataclasses.fields', 'dataclasses.fields', (['clazz'], {}), '(clazz)\n', (9258, 9265), False, 'import dataclasses\n'), ((10532, 10572), 'marshmallow.Schema.TYPE_MAPPING.get', 'marshmallow.Schema.TYPE_MAPPING.get', (['typ'], {}), '(typ)\n', (10567, 10572), False, 'import marshmallow\n'), ((13828, 13862), 'marshmallow.fields.Raw', 'marshmallow.fields.Raw', ([], {}), '(**metadata)\n', (13850, 13862), False, 'import marshmallow\n'), ((13963, 13997), 'typing_inspect.get_args', 'typing_inspect.get_args', (['typ', '(True)'], {}), '(typ, True)\n', (13986, 13997), False, 'import typing_inspect\n'), ((16125, 16148), 'inspect.isfunction', 'inspect.isfunction', (['typ'], {}), '(typ)\n', (16143, 16148), False, 'import inspect\n'), ((16465, 16508), 'marshmallow_enum.EnumField', 'marshmallow_enum.EnumField', (['typ'], {}), '(typ, **metadata)\n', (16491, 16508), False, 'import marshmallow_enum\n'), ((9717, 9742), 'inspect.getmembers', 'inspect.getmembers', (['clazz'], {}), '(clazz)\n', (9735, 9742), False, 'import inspect\n'), ((11277, 11344), 'marshmallow.utils.is_iterable_but_not_string', 'marshmallow.utils.is_iterable_but_not_string', (["meta_dict['validate']"], {}), "(meta_dict['validate'])\n", (11321, 11344), False, 'import marshmallow\n'), ((15048, 15084), 'typing_inspect.is_optional_type', 'typing_inspect.is_optional_type', (['typ'], {}), '(typ)\n', (15079, 15084), False, 'import typing_inspect\n'), ((9352, 9380), 'dataclasses.dataclass', 'dataclasses.dataclass', (['clazz'], {}), '(clazz)\n', (9373, 9380), False, 'import dataclasses\n'), ((15505, 15538), 'typing_inspect.is_union_type', 'typing_inspect.is_union_type', (['typ'], {}), '(typ)\n', (15533, 15538), False, 'import typing_inspect\n')] |
TheSin-/electrum-trc | electrum_trc/scripts/txradar.py | d2f5b15fd4399a9248cce0d63e20128f3f54e69c | #!/usr/bin/env python3
import sys
import asyncio
from electrum_trc.network import filter_protocol, Network
from electrum_trc.util import create_and_start_event_loop, log_exceptions
try:
txid = sys.argv[1]
except:
print("usage: txradar txid")
sys.exit(1)
loop, stopping_fut, loop_thread = create_and_start_event_loop()
network = Network()
network.start()
@log_exceptions
async def f():
try:
peers = await network.get_peers()
peers = filter_protocol(peers, 's')
results = await network.send_multiple_requests(peers, 'blockchain.transaction.get', [txid])
r1, r2 = [], []
for k, v in results.items():
(r1 if not isinstance(v, Exception) else r2).append(k)
print(f"Received {len(results)} answers")
try: propagation = len(r1) * 100. / (len(r1) + len(r2))
except ZeroDivisionError: propagation = 0
print(f"Propagation rate: {propagation:.1f} percent")
finally:
stopping_fut.set_result(1)
asyncio.run_coroutine_threadsafe(f(), loop)
| [((305, 334), 'electrum_trc.util.create_and_start_event_loop', 'create_and_start_event_loop', ([], {}), '()\n', (332, 334), False, 'from electrum_trc.util import create_and_start_event_loop, log_exceptions\n'), ((345, 354), 'electrum_trc.network.Network', 'Network', ([], {}), '()\n', (352, 354), False, 'from electrum_trc.network import filter_protocol, Network\n'), ((257, 268), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (265, 268), False, 'import sys\n'), ((470, 497), 'electrum_trc.network.filter_protocol', 'filter_protocol', (['peers', '"""s"""'], {}), "(peers, 's')\n", (485, 497), False, 'from electrum_trc.network import filter_protocol, Network\n')] |
kagemeka/atcoder-submissions | jp.atcoder/dp/dp_g/24586988.py | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | import sys
import typing
import numpy as np
def solve(
n: int,
g: np.array,
) -> typing.NoReturn:
indeg = np.zeros(
n,
dtype=np.int64,
)
for v in g[:, 1]:
indeg[v] += 1
g = g[g[:, 0].argsort()]
i = np.searchsorted(
g[:, 0],
np.arange(n + 1)
)
q = [
v for v in range(n)
if not indeg[v]
]
dist = np.zeros(
n,
dtype=np.int64,
)
for u in q:
for j in range(
i[u], i[u + 1],
):
v = g[j, 1]
indeg[v] -= 1
dist[v] = max(
dist[v],
dist[u] + 1,
)
if indeg[v]: continue
q.append(v)
print(dist.max())
def main() -> typing.NoReturn:
n, m = map(
int, input().split(),
)
g = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(m, 2) - 1
solve(n, g)
OJ = 'ONLINE_JUDGE'
if sys.argv[-1] == OJ:
from numba import i8, njit
from numba.pycc import CC
cc = CC('my_module')
fn = solve
signature = (i8, i8[:, :])
cc.export(
fn.__name__,
signature,
)(fn)
cc.compile()
exit(0)
from my_module import solve
main()
| [((115, 142), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (123, 142), True, 'import numpy as np\n'), ((347, 374), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (355, 374), True, 'import numpy as np\n'), ((791, 802), 'my_module.solve', 'solve', (['n', 'g'], {}), '(n, g)\n', (796, 802), False, 'from my_module import solve\n'), ((912, 927), 'numba.pycc.CC', 'CC', (['"""my_module"""'], {}), "('my_module')\n", (914, 927), False, 'from numba.pycc import CC\n'), ((261, 277), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (270, 277), True, 'import numpy as np\n'), ((721, 737), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (735, 737), False, 'import sys\n')] |
jkerpe/TroubleBubble | starteMessung.py | 813ad797398b9f338f136bcb96c6c92186d92ebf | from datetime import datetime
from pypylon import pylon
import nimmAuf
import smbus2
import os
import argparse
import bestimmeVolumen
from threading import Thread
import time
programmstart = time.time()
# Argumente parsen (bei Aufruf im Terminal z.B. 'starteMessung.py -n 100' eingeben)
ap = argparse.ArgumentParser(description="""Skript zum Aufnehmen von Bildern der Teststrecke und der
Volumenbestimmung von Luftblasen""")
ap.add_argument("-n", "--number", default=400, type=int, help="Anzahl an Frames die aufgenommen werden sollen. Default: 400 Bilder")
ap.add_argument("-fr", "--framerate", default=100, type=int, help="Framerate in fps. Richtwerte: <Flow 3 ml/s:50 fps, 3-6ml/s:100 fps, >6ml/s:200 fps; Default: 100 fps")
args = vars(ap.parse_args())
# Argumente des Parsers extrahieren
numberOfImagesToGrab = args['number']
framerate = args['framerate']
if __name__ == '__main__':
startzeit = time.time()
#Test ob Kamera angeschlossen ist
devices = pylon.TlFactory.GetInstance().EnumerateDevices()
if len(devices) == 0:
print("Keine Kamera angeschlossen oder Kamera woanders geöffnet.")
return False
# Test ob Drucksensor angeschlossen ist
try:
bus = smbus2.SMBus(0)
bus.read_i2c_block_data(0x40, 0, 2) # 2 Bytes empfangen
except OSError:
print("Kein Drucksensor angeschlossen")
exit()
# Aus der aktuellen Zeit und den Parametern einen individuellen Ordnernamen generieren
dirname = f'{datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}'
os.mkdir(dirname) # Ordner erstellen
print(f"Ordnername: {dirname}")
beginn = time.time()-programmstart
# Threads zum Aufnehmen und Verarbeiten starten
t_aufnahme = Thread(target=nimmAuf.starte, args=(dirname, numberOfImagesToGrab, framerate, startzeit))
t_tracke = Thread(target=bestimmeVolumen.tracke, args=(dirname, numberOfImagesToGrab))
t_aufnahme.start()
t_tracke.start()
t_aufnahme.join()
t_tracke.join()
| [((193, 204), 'time.time', 'time.time', ([], {}), '()\n', (202, 204), False, 'import time\n'), ((294, 481), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Skript zum Aufnehmen von Bildern der Teststrecke und der \n Volumenbestimmung von Luftblasen"""'}), '(description=\n """Skript zum Aufnehmen von Bildern der Teststrecke und der \n Volumenbestimmung von Luftblasen"""\n )\n', (317, 481), False, 'import argparse\n'), ((956, 967), 'time.time', 'time.time', ([], {}), '()\n', (965, 967), False, 'import time\n'), ((1588, 1605), 'os.mkdir', 'os.mkdir', (['dirname'], {}), '(dirname)\n', (1596, 1605), False, 'import os\n'), ((1770, 1863), 'threading.Thread', 'Thread', ([], {'target': 'nimmAuf.starte', 'args': '(dirname, numberOfImagesToGrab, framerate, startzeit)'}), '(target=nimmAuf.starte, args=(dirname, numberOfImagesToGrab,\n framerate, startzeit))\n', (1776, 1863), False, 'from threading import Thread\n'), ((1875, 1950), 'threading.Thread', 'Thread', ([], {'target': 'bestimmeVolumen.tracke', 'args': '(dirname, numberOfImagesToGrab)'}), '(target=bestimmeVolumen.tracke, args=(dirname, numberOfImagesToGrab))\n', (1881, 1950), False, 'from threading import Thread\n'), ((1264, 1279), 'smbus2.SMBus', 'smbus2.SMBus', (['(0)'], {}), '(0)\n', (1276, 1279), False, 'import smbus2\n'), ((1674, 1685), 'time.time', 'time.time', ([], {}), '()\n', (1683, 1685), False, 'import time\n'), ((1025, 1054), 'pypylon.pylon.TlFactory.GetInstance', 'pylon.TlFactory.GetInstance', ([], {}), '()\n', (1052, 1054), False, 'from pypylon import pylon\n'), ((1536, 1550), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1548, 1550), False, 'from datetime import datetime\n')] |
Sapfir0/web-premier-eye | application/services/decart.py | f060b01e98a923374ea60360ba133caaa654b6c7 | import os
import tempfile
def hasOnePointInside(bigRect, minRect): # хотя бы одна точка лежит внутри
minY, minX, maxY, maxX = bigRect
y1, x1, y2, x2 = minRect
a = (minY <= y1 <= maxY)
b = (minX <= x1 <= maxX)
c = (minY <= y2 <= maxY)
d = (minX <= x2 <= maxX)
return a or b or c or d
def isCompletelyInside(bigRect, minRect): # объект полностью внутри прямоугольника
y1, x1, y2, x2 = bigRect
minX = x1
minY = y1 # вроде верно
maxX = x2
maxY = y2
y1, x1, y2, x2 = minRect
a = (minY <= y1 <= maxY)
b = (minX <= x1 <= maxX)
c = (minY <= y2 <= maxY)
d = (minX <= x2 <= maxX)
return a and b and c and d # если тру, то объект полностью внутри большого прямоугольника
def isPartiallyInside(bigRect, minRect, innerPercent=0.5): # объект частично внутри прямоугольника
bigLUy, bigLUx, bigRDy, bigRDx = bigRect
minLUy, minLUx, minRDy, minRDx = minRect
fullSquare = (minLUy - minRDy) * (minRDx - minLUx) # не уверен что правильно
# Не уверен в ифах
if bigLUy < minLUy:
minLUy = bigLUy
if bigRDy < minRDy:
minRDy = bigRDy
if bigLUx > minLUx:
minLUx = bigLUx
if bigRDx > minRDx:
minRDx = bigRDx
inObjSquare = (minLUy - minRDy) * (minRDx - minLUx)
return inObjSquare / fullSquare >= innerPercent
def createGraphic(imagePath: str, searchRect: list, objectsListRect: list):
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import matplotlib.patches as patches
im = np.array(Image.open(imagePath), dtype=np.uint8)
fig, ax = plt.subplots(1)
ax.imshow(im)
bigRect = Rectangle(searchRect)
minRects = [Rectangle(i) for i in objectsListRect]
rect = patches.Rectangle(*bigRect.getMTparam(), linewidth=1, edgecolor='g', facecolor='None')
ax.add_patch(rect)
for i in minRects:
rect = patches.Rectangle(*i.getMTparam(), linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
temp = tempfile.NamedTemporaryFile()
path = os.path.join(os.getcwd(), temp.name)
plt.savefig(path)
return os.path.split(temp.name + ".png")
class Rectangle:
LDx = 0
LDy = 0
RUx = 0
RUy = 0
def __init__(self, coordinates: list):
if len(coordinates) != 4:
raise ValueError("Нужно подавать координаты(х,у) двух противоложных вершин")
if coordinates[0] >= coordinates[2] or coordinates[1] >= coordinates[3]:
raise ValueError(
"Неверно заданы вершины, сначала подаются 2 координаты нижнего левого угла, потом верхнего правого")
self.LDx, self.LDy, self.RUx, self.RUy = coordinates
def getWidth(self):
return self.RUx - self.LDx
def getHeight(self):
return self.RUy - self.LDy
def getLUx(self):
return self.LDx
def getLUy(self):
return self.RUy
def getMTparam(self):
return ((self.getLUy(), self.getLUx()), # почему -? я не знаю
-self.getHeight(), self.getWidth()) # все абсолютно в другом порядке, чем должно быть? что ха дринся
def getCenterOfDown(self):
return [(self.LDx + self.RUx) / 2, self.LDy]
| [((1618, 1633), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (1630, 1633), True, 'import matplotlib.pyplot as plt\n'), ((2025, 2054), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2052, 2054), False, 'import tempfile\n'), ((2107, 2124), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (2118, 2124), True, 'import matplotlib.pyplot as plt\n'), ((2137, 2170), 'os.path.split', 'os.path.split', (["(temp.name + '.png')"], {}), "(temp.name + '.png')\n", (2150, 2170), False, 'import os\n'), ((1565, 1586), 'PIL.Image.open', 'Image.open', (['imagePath'], {}), '(imagePath)\n', (1575, 1586), False, 'from PIL import Image\n'), ((2079, 2090), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2088, 2090), False, 'import os\n')] |
agustinhenze/mibs.snmplabs.com | pysnmp/EXTREME-RTSTATS-MIB.py | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | #
# PySNMP MIB module EXTREME-RTSTATS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EXTREME-BASE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:53:03 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection")
extremeAgent, = mibBuilder.importSymbols("EXTREME-BASE-MIB", "extremeAgent")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, iso, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Bits, MibIdentifier, ModuleIdentity, Counter64, Counter32, NotificationType, Integer32, IpAddress, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "iso", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Bits", "MibIdentifier", "ModuleIdentity", "Counter64", "Counter32", "NotificationType", "Integer32", "IpAddress", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
extremeRtStats = ModuleIdentity((1, 3, 6, 1, 4, 1, 1916, 1, 11))
if mibBuilder.loadTexts: extremeRtStats.setLastUpdated('9906240000Z')
if mibBuilder.loadTexts: extremeRtStats.setOrganization('Extreme Networks, Inc.')
extremeRtStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1), )
if mibBuilder.loadTexts: extremeRtStatsTable.setStatus('current')
extremeRtStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1), ).setIndexNames((0, "EXTREME-RTSTATS-MIB", "extremeRtStatsIndex"))
if mibBuilder.loadTexts: extremeRtStatsEntry.setStatus('current')
extremeRtStatsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeRtStatsIndex.setStatus('current')
extremeRtStatsIntervalStart = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 2), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeRtStatsIntervalStart.setStatus('current')
extremeRtStatsCRCAlignErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeRtStatsCRCAlignErrors.setStatus('current')
extremeRtStatsUndersizePkts = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeRtStatsUndersizePkts.setStatus('current')
extremeRtStatsOversizePkts = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeRtStatsOversizePkts.setStatus('current')
extremeRtStatsFragments = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeRtStatsFragments.setStatus('current')
extremeRtStatsJabbers = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeRtStatsJabbers.setStatus('current')
extremeRtStatsCollisions = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeRtStatsCollisions.setStatus('current')
extremeRtStatsTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeRtStatsTotalErrors.setStatus('current')
extremeRtStatsUtilization = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeRtStatsUtilization.setStatus('current')
mibBuilder.exportSymbols("EXTREME-RTSTATS-MIB", extremeRtStatsEntry=extremeRtStatsEntry, extremeRtStatsOversizePkts=extremeRtStatsOversizePkts, extremeRtStatsUndersizePkts=extremeRtStatsUndersizePkts, extremeRtStatsTable=extremeRtStatsTable, extremeRtStatsTotalErrors=extremeRtStatsTotalErrors, extremeRtStats=extremeRtStats, PYSNMP_MODULE_ID=extremeRtStats, extremeRtStatsCollisions=extremeRtStatsCollisions, extremeRtStatsCRCAlignErrors=extremeRtStatsCRCAlignErrors, extremeRtStatsJabbers=extremeRtStatsJabbers, extremeRtStatsIndex=extremeRtStatsIndex, extremeRtStatsUtilization=extremeRtStatsUtilization, extremeRtStatsIntervalStart=extremeRtStatsIntervalStart, extremeRtStatsFragments=extremeRtStatsFragments)
| [] |
BhavyeMathur/goopylib | goopylib/objects/_BBox.py | f9eb1458e9218a8dd4add6693ce70b804624bf91 | from goopylib.objects.GraphicsObject import GraphicsObject
from goopylib.styles import *
class BBox(GraphicsObject):
# Internal base class for objects represented by bounding box
# (opposite corners) Line segment is a degenerate case.
resizing_objects = []
def __init__(self, p1, p2, bounds=None, fill=None, outline=None, outline_width=None, cursor="arrow", layer=0,
tag=None):
self.p1 = p1
self.p2 = p2
# These make sure that the p2 is 'after' p1, ie the x & y value of p2 is greater than that of p1
if self.p1[0] > self.p2[0]: # Checking if p1's x value is greater than p2's. If so, then swap the values
self.p1[0], self.p2[0] = self.p2[0], self.p1[0]
if self.p1[1] > self.p2[1]: # Checking if p1's y value is greater than p2's. If so, then swap the values
self.p1[1], self.p2[1] = self.p2[1], self.p1[1]
self.anchor = [(self.p1[0] + self.p2[0]) // 2, (self.p1[1] + self.p2[1]) // 2]
GraphicsObject.__init__(self, options=(), cursor=cursor, layer=layer, bounds=bounds, tag=tag)
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = self.p2[0] - self.p1[0]
self.height = self.p2[1] - self.p1[1]
self.min_width = None
self.min_height = None
self.max_width = None
self.max_height = None
self.resizing_bounds = {}
self.is_resizing = {}
self.bounds_thickness = 0
if fill is None:
self.fill = STYLES["default"]["fill"]
elif isinstance(fill, Colour): # Checking if the option is a colour
self.fill = fill
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The Rectangle fill must be a Colour object , not {fill}")
if outline is None:
self.outline = STYLES["default"]["outline"]
elif isinstance(outline, Colour): # Checking if the option is a colour
self.outline = outline
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline must be a Colour object , not {outline}")
if outline_width is None:
self.outline_width = STYLES["default"]["width"]
elif isinstance(outline_width, int): # Checking if the option is an integer
self.outline_width = outline_width
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline width must be an integer, not {outline_width}")
def __repr__(self):
return "_BBox"
def _set_resizable(self, resizables, top_bounds=None, bottom_bounds=None, left_bounds=None, right_bounds=None,
thickness=10):
"""Override in subclasses"""
pass
def _move(self, dx, dy):
self.p1[0] += dx
self.p1[1] += dy
self.p2[0] += dx
self.p2[1] += dy
self.anchor[0] += dx
self.anchor[1] += dy
def is_clicked(self, mouse_pos):
if self.bounds is None:
if mouse_pos is None:
return False
else:
if (self.p1[0] < mouse_pos[0] < self.p2[0] or self.p1[0] > mouse_pos[0] > self.p2[0]) and \
(self.p1[1] < mouse_pos[1] < self.p2[1] or self.p1[1] > mouse_pos[1] > self.p2[1]):
return True
else:
return False
else:
return self.bounds.is_clicked(mouse_pos)
def get_p1(self):
return self.p1.copy()
def get_p2(self):
return self.p2.copy()
def get_top_right(self):
return self.p1.copy()
def get_top_left(self):
return [self.p2[0], self.p1[1]]
def get_bottom_left(self):
return [self.p1[0], self.p2[1]]
def get_bottom_right(self):
return self.p2.copy()
def get_top(self):
return [(self.p2[0] + self.p1[0]) / 2, self.p1[1]]
def get_bottom(self):
return [(self.p2[0] + self.p1[0]) / 2, self.p2[1]]
def get_left(self):
return [self.p1[0], (self.p1[1] + self.p2[1]) / 2]
def get_right(self):
return [self.p2[0], (self.p1[1] + self.p2[1]) / 2]
def get_width(self):
return self.width
def get_height(self):
return self.height
def get_fill(self):
return self.fill
def get_outline(self):
return self.outline
def get_outline_width(self):
return self.outline_width
def get_anchor(self):
return self.anchor
def set_dimensions(self, width, height, horizontal_align="center", vertical_align="center"):
self.set_width(width, horizontal_align)
self.set_height(height, vertical_align)
return self
def set_resizable(self, top=False, left=False, bottom=False, right=False, min_width=40, min_height=40,
bounds_width=10, top_bounds=None, bottom_bounds=None, left_bounds=None, right_bounds=None):
if min_width < 1 or min_height < 1:
raise GraphicsError(f"\n\nGraphicsError: Minimum height and width of resizable object must be greater than "
f"or equal to 1. Right now, min_width={min_width} & min_height={min_height}")
self.min_width = min_width
self.min_height = min_height
self.is_resizing = {"top": top, "left": left, "bottom": bottom, "right": right}
self._set_resizable([top, bottom, left, right], top_bounds=top_bounds, bottom_bounds=bottom_bounds,
left_bounds=left_bounds, right_bounds=right_bounds, thickness=bounds_width)
if top is False and bottom is False and left is False and right is False:
if self in GraphicsObject.resizing_objects:
GraphicsObject.resizing_objects.remove(self)
elif self not in GraphicsObject.resizing_objects:
GraphicsObject.resizing_objects.add(self)
self.bounds_thickness = bounds_width
return self
def set_coords(self, p1, p2):
self.p1 = p1.copy()
self.p2 = p2.copy()
# These make sure that the p2 is 'after' p1, ie the x & y value of p2 is greater than that of p1
if self.p1[0] > self.p2[0]: # Checking if p1's x value is greater than p2's. If so, then swap the values
self.p1[0], self.p2[0] = self.p2[0], self.p1[0]
if self.p1[1] > self.p2[1]: # Checking if p1's y value is greater than p2's. If so, then swap the values
self.p1[1], self.p2[1] = self.p2[1], self.p1[1]
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = self.p2[0] - self.p1[0]
self.height = self.p2[1] - self.p1[1]
width_scale = (p2[0] - p1[0]) / self.width
height_scale = (p2[1] - p1[1]) / self.height
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = p2[0] - p1[0]
self.height = p2[1] - p1[1]
self.anchor = [(self.p1[0] + self.p2[0]) // 2, (self.p1[1] + self.p2[1]) // 2]
self._update_layer()
return self
def set_width(self, width, center="center"):
if center not in {"center", "right", "left"}:
raise GraphicsError(
"\n\nThe center argument for resizing the object (set_outline_width) needs to be one of "
f'{["center", "right", "left"]}')
if center == "left":
self.set_coords(self.p1, self.p2.add_x(width - self.width))
elif center == "right":
self.set_coords(self.p1.add_x(-(width - self.width)), self.p2)
else:
self.set_coords(self.p1.add_x(-(width / 2 - self.width)), self.p2.add_x(width / 2 - self.width))
return self
def set_height(self, height, center="center"):
if center not in {"center", "top", "bottom"}:
raise GraphicsError(
"\n\nThe center argument for resizing the object (set_height) needs to be one of "
f'{["center", "top", "bottom"]}')
if center == "top":
self.set_coords(self.p1, self.p2.add_y(height - self.height))
elif center == "bottom":
self.set_coords(self.p1.add_y(-(height - self.height)), self.p2)
else:
self.set_coords(self.p1.add_y(-(height / 2 - self.height)), self.p2.add_y(height / 2 - self.height))
return self
def set_fill(self, fill):
if fill is None:
self.fill = STYLES["default"]["fill"]
elif isinstance(fill, Colour): # Checking if the option is a colour
self.fill = fill
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The Rectangle fill must be a Colour object , not {fill}")
self._update_layer()
return self
def set_outline(self, outline):
if outline is None:
self.outline = STYLES["default"]["outline"]
elif isinstance(outline, Colour): # Checking if the option is a colour
self.outline = outline
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline must be a Colour object , not {outline}")
self._update_layer()
return self
def set_outline_width(self, outline_width):
if outline_width is None:
self.outline_width = STYLES["default"]["width"]
elif isinstance(outline_width, int): # Checking if the option is an integer
self.outline_width = outline_width
else: # If not, raise an error
raise GraphicsError(
f"\n\nGraphicsError: The rectangle outline width must be an integer, not {outline_width}")
self._update_layer()
return self
| [((1010, 1107), 'goopylib.objects.GraphicsObject.GraphicsObject.__init__', 'GraphicsObject.__init__', (['self'], {'options': '()', 'cursor': 'cursor', 'layer': 'layer', 'bounds': 'bounds', 'tag': 'tag'}), '(self, options=(), cursor=cursor, layer=layer,\n bounds=bounds, tag=tag)\n', (1033, 1107), False, 'from goopylib.objects.GraphicsObject import GraphicsObject\n'), ((5878, 5922), 'goopylib.objects.GraphicsObject.GraphicsObject.resizing_objects.remove', 'GraphicsObject.resizing_objects.remove', (['self'], {}), '(self)\n', (5916, 5922), False, 'from goopylib.objects.GraphicsObject import GraphicsObject\n'), ((5994, 6035), 'goopylib.objects.GraphicsObject.GraphicsObject.resizing_objects.add', 'GraphicsObject.resizing_objects.add', (['self'], {}), '(self)\n', (6029, 6035), False, 'from goopylib.objects.GraphicsObject import GraphicsObject\n')] |
Mayner0220/Programmers | Graph/DFS&BFS.py | 42e4783a526506fb7d8208841a76201909ed5c5c | # https://www.acmicpc.net/problem/1260
n, m, v = map(int, input().split())
graph = [[0] * (n+1) for _ in range(n+1)]
visit = [False] * (n+1)
for _ in range(m):
R, C = map(int, input().split())
graph[R][C] = 1
graph[C][R] = 1
def dfs(v):
visit[v] = True
print(v, end=" ")
for i in range(1, n+1):
if not visit[i] and graph[v][i]==1:
dfs(i)
def bfs(v):
queue = [v]
visit[v] = False
while queue:
v = queue.pop(0)
print(v, end=" ")
for i in range(1, n+1):
if visit[i] and graph[v][i]==1:
queue.append(i)
visit[i] = False
dfs(v)
print()
bfs(v) | [] |
Jahidul007/Python-Bootcamp | coding_intereview/1576. Replace All ?'s to Avoid Consecutive Repeating Characters.py | 3c870587465ff66c2c1871c8d3c4eea72463abda | class Solution:
def modifyString(self, s: str) -> str:
s = list(s)
for i in range(len(s)):
if s[i] == "?":
for c in "abc":
if (i == 0 or s[i-1] != c) and (i+1 == len(s) or s[i+1] != c):
s[i] = c
break
return "".join(s)
| [] |
Fangyh09/pysteps | pysteps/tests/helpers.py | 9eb7f4ead0a946d98b7504d1bd66b18dc405ed51 | """
Testing helper functions
=======================
Collection of helper functions for the testing suite.
"""
from datetime import datetime
import numpy as np
import pytest
import pysteps as stp
from pysteps import io, rcparams
def get_precipitation_fields(num_prev_files=0):
"""Get a precipitation field from the archive to be used as reference."""
# Selected case
date = datetime.strptime("201505151630", "%Y%m%d%H%M")
data_source = rcparams.data_sources["mch"]
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
# Find the input files from the archive
fns = io.archive.find_by_date(date, root_path, path_fmt, fn_pattern, fn_ext,
timestep=5, num_prev_files=num_prev_files)
# Read the radar composites
importer = io.get_method(importer_name, "importer")
reference_field, quality, metadata = io.read_timeseries(fns, importer,
**importer_kwargs)
del quality # Not used
if num_prev_files == 0:
reference_field = np.squeeze(reference_field) # Remove time dimension
# Convert to mm/h
reference_field, metadata = stp.utils.to_rainrate(reference_field, metadata)
# Mask invalid values
reference_field = np.ma.masked_invalid(reference_field)
# Log-transform the data [dBR]
reference_field, metadata = stp.utils.dB_transform(reference_field,
metadata,
threshold=0.1,
zerovalue=-15.0)
return reference_field
def smart_assert(actual_value, expected, tolerance=None):
"""
Assert by equality for non-numeric values, or by approximation otherwise.
If the precision keyword is None, assert by equality.
When the precision is not None, assert that two numeric values
(or two sets of numbers) are equal to each other within the tolerance.
"""
if tolerance is None:
assert actual_value == expected
else:
# Compare numbers up to a certain precision
assert actual_value == pytest.approx(expected, 1e-6)
| [((392, 439), 'datetime.datetime.strptime', 'datetime.strptime', (['"""201505151630"""', '"""%Y%m%d%H%M"""'], {}), "('201505151630', '%Y%m%d%H%M')\n", (409, 439), False, 'from datetime import datetime\n'), ((798, 915), 'pysteps.io.archive.find_by_date', 'io.archive.find_by_date', (['date', 'root_path', 'path_fmt', 'fn_pattern', 'fn_ext'], {'timestep': '(5)', 'num_prev_files': 'num_prev_files'}), '(date, root_path, path_fmt, fn_pattern, fn_ext,\n timestep=5, num_prev_files=num_prev_files)\n', (821, 915), False, 'from pysteps import io, rcparams\n'), ((994, 1034), 'pysteps.io.get_method', 'io.get_method', (['importer_name', '"""importer"""'], {}), "(importer_name, 'importer')\n", (1007, 1034), False, 'from pysteps import io, rcparams\n'), ((1076, 1128), 'pysteps.io.read_timeseries', 'io.read_timeseries', (['fns', 'importer'], {}), '(fns, importer, **importer_kwargs)\n', (1094, 1128), False, 'from pysteps import io, rcparams\n'), ((1381, 1429), 'pysteps.utils.to_rainrate', 'stp.utils.to_rainrate', (['reference_field', 'metadata'], {}), '(reference_field, metadata)\n', (1402, 1429), True, 'import pysteps as stp\n'), ((1479, 1516), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['reference_field'], {}), '(reference_field)\n', (1499, 1516), True, 'import numpy as np\n'), ((1585, 1671), 'pysteps.utils.dB_transform', 'stp.utils.dB_transform', (['reference_field', 'metadata'], {'threshold': '(0.1)', 'zerovalue': '(-15.0)'}), '(reference_field, metadata, threshold=0.1, zerovalue=\n -15.0)\n', (1607, 1671), True, 'import pysteps as stp\n'), ((1273, 1300), 'numpy.squeeze', 'np.squeeze', (['reference_field'], {}), '(reference_field)\n', (1283, 1300), True, 'import numpy as np\n'), ((2374, 2404), 'pytest.approx', 'pytest.approx', (['expected', '(1e-06)'], {}), '(expected, 1e-06)\n', (2387, 2404), False, 'import pytest\n')] |
ehiller/mobilecsp-v18 | modules/courses/courses.py | a59801c44c616d30f5e916d6771e479c8a9e88f7 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Courses module."""
__author__ = 'Pavel Simakov ([email protected])'
from common import resource
from controllers import assessments
from controllers import lessons
from controllers import utils
from models import content
from models import resources_display
from models import custom_modules
from models import roles
from tools import verify
All_LOCALES_PERMISSION = 'can_pick_all_locales'
All_LOCALES_DESCRIPTION = 'Can pick all locales, including unavailable ones.'
SEE_DRAFTS_PERMISSION = 'can_see_draft_content'
SEE_DRAFTS_DESCRIPTION = 'Can see lessons and assessments with draft status.'
custom_module = None
def can_pick_all_locales(app_context):
return roles.Roles.is_user_allowed(
app_context, custom_module, All_LOCALES_PERMISSION)
def can_see_drafts(app_context):
return roles.Roles.is_user_allowed(
app_context, custom_module, SEE_DRAFTS_PERMISSION)
def register_module():
"""Registers this module in the registry."""
def on_module_enabled():
roles.Roles.register_permissions(custom_module, permissions_callback)
resource.Registry.register(resources_display.ResourceCourseSettings)
resource.Registry.register(resources_display.ResourceUnit)
resource.Registry.register(resources_display.ResourceAssessment)
resource.Registry.register(resources_display.ResourceLink)
resource.Registry.register(resources_display.ResourceLesson)
resource.Registry.register(utils.ResourceHtmlHook)
def permissions_callback(unused_app_context):
return [
roles.Permission(All_LOCALES_PERMISSION, All_LOCALES_DESCRIPTION),
roles.Permission(SEE_DRAFTS_PERMISSION, SEE_DRAFTS_DESCRIPTION)
]
# provide parser to verify
verify.parse_content = content.parse_string_in_scope
# setup routes
courses_routes = [
('/', lessons.CourseHandler),
('/activity', lessons.UnitHandler),
('/answer', assessments.AnswerHandler),
('/assessment', lessons.AssessmentHandler),
('/course', lessons.CourseHandler),
('/forum', utils.ForumHandler),
('/preview', utils.PreviewHandler),
('/register', utils.RegisterHandler),
('/resources', utils.ResourcesHandler),
('/rest/locale', utils.StudentLocaleRESTHandler),
('/review', lessons.ReviewHandler),
('/reviewdashboard', lessons.ReviewDashboardHandler),
('/student/editstudent', utils.StudentEditStudentHandler),
('/student/settracks', utils.StudentSetTracksHandler),
('/student/home', utils.StudentProfileHandler),
('/student/unenroll', utils.StudentUnenrollHandler),
('/unit', lessons.UnitHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Course',
'A set of pages for delivering an online course.',
[], courses_routes,
notify_module_enabled=on_module_enabled)
return custom_module
| [((1277, 1356), 'models.roles.Roles.is_user_allowed', 'roles.Roles.is_user_allowed', (['app_context', 'custom_module', 'All_LOCALES_PERMISSION'], {}), '(app_context, custom_module, All_LOCALES_PERMISSION)\n', (1304, 1356), False, 'from models import roles\n'), ((1412, 1490), 'models.roles.Roles.is_user_allowed', 'roles.Roles.is_user_allowed', (['app_context', 'custom_module', 'SEE_DRAFTS_PERMISSION'], {}), '(app_context, custom_module, SEE_DRAFTS_PERMISSION)\n', (1439, 1490), False, 'from models import roles\n'), ((3396, 3547), 'models.custom_modules.Module', 'custom_modules.Module', (['"""Course"""', '"""A set of pages for delivering an online course."""', '[]', 'courses_routes'], {'notify_module_enabled': 'on_module_enabled'}), "('Course',\n 'A set of pages for delivering an online course.', [], courses_routes,\n notify_module_enabled=on_module_enabled)\n", (3417, 3547), False, 'from models import custom_modules\n'), ((1612, 1681), 'models.roles.Roles.register_permissions', 'roles.Roles.register_permissions', (['custom_module', 'permissions_callback'], {}), '(custom_module, permissions_callback)\n', (1644, 1681), False, 'from models import roles\n'), ((1690, 1758), 'common.resource.Registry.register', 'resource.Registry.register', (['resources_display.ResourceCourseSettings'], {}), '(resources_display.ResourceCourseSettings)\n', (1716, 1758), False, 'from common import resource\n'), ((1767, 1825), 'common.resource.Registry.register', 'resource.Registry.register', (['resources_display.ResourceUnit'], {}), '(resources_display.ResourceUnit)\n', (1793, 1825), False, 'from common import resource\n'), ((1834, 1898), 'common.resource.Registry.register', 'resource.Registry.register', (['resources_display.ResourceAssessment'], {}), '(resources_display.ResourceAssessment)\n', (1860, 1898), False, 'from common import resource\n'), ((1907, 1965), 'common.resource.Registry.register', 'resource.Registry.register', (['resources_display.ResourceLink'], {}), '(resources_display.ResourceLink)\n', (1933, 1965), False, 'from common import resource\n'), ((1974, 2034), 'common.resource.Registry.register', 'resource.Registry.register', (['resources_display.ResourceLesson'], {}), '(resources_display.ResourceLesson)\n', (2000, 2034), False, 'from common import resource\n'), ((2043, 2093), 'common.resource.Registry.register', 'resource.Registry.register', (['utils.ResourceHtmlHook'], {}), '(utils.ResourceHtmlHook)\n', (2069, 2093), False, 'from common import resource\n'), ((2174, 2239), 'models.roles.Permission', 'roles.Permission', (['All_LOCALES_PERMISSION', 'All_LOCALES_DESCRIPTION'], {}), '(All_LOCALES_PERMISSION, All_LOCALES_DESCRIPTION)\n', (2190, 2239), False, 'from models import roles\n'), ((2253, 2316), 'models.roles.Permission', 'roles.Permission', (['SEE_DRAFTS_PERMISSION', 'SEE_DRAFTS_DESCRIPTION'], {}), '(SEE_DRAFTS_PERMISSION, SEE_DRAFTS_DESCRIPTION)\n', (2269, 2316), False, 'from models import roles\n')] |
pyre/pyre | packages/merlin/protocols/PrefixLayout.py | 0f903836f52450bf81216c5dfdfdfebb16090177 | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <[email protected]>
# (c) 1998-2021 all rights reserved
# support
import merlin
# the manager of intermediate and final build products
class PrefixLayout(merlin.protocol, family="merlin.layouts.prefix"):
"""
The manager of the all build products, both final and intermediate disposables
"""
# required state
bin = merlin.properties.path()
bin.doc = "the location of executables"
config = merlin.properties.path()
config.doc = "global package configuration files"
doc = merlin.properties.path()
doc.doc = "package documentation"
etc = merlin.properties.path()
etc.doc = "host specific files"
include = merlin.properties.path()
include.doc = "library header files"
lib = merlin.properties.path()
lib.doc = "libraries"
libexec = merlin.properties.path()
libexec.doc = "binaries that are meant to be used by other packages"
share = merlin.properties.path()
share.doc = "architecture independent package files"
var = merlin.properties.path()
var.doc = "runtime files"
# framework hooks
@classmethod
def pyre_default(cls, **kwds):
"""
Specify the default implementation
"""
# choose the default implementer
return merlin.components.fhs
# end of file
| [((400, 424), 'merlin.properties.path', 'merlin.properties.path', ([], {}), '()\n', (422, 424), False, 'import merlin\n'), ((483, 507), 'merlin.properties.path', 'merlin.properties.path', ([], {}), '()\n', (505, 507), False, 'import merlin\n'), ((573, 597), 'merlin.properties.path', 'merlin.properties.path', ([], {}), '()\n', (595, 597), False, 'import merlin\n'), ((647, 671), 'merlin.properties.path', 'merlin.properties.path', ([], {}), '()\n', (669, 671), False, 'import merlin\n'), ((723, 747), 'merlin.properties.path', 'merlin.properties.path', ([], {}), '()\n', (745, 747), False, 'import merlin\n'), ((800, 824), 'merlin.properties.path', 'merlin.properties.path', ([], {}), '()\n', (822, 824), False, 'import merlin\n'), ((866, 890), 'merlin.properties.path', 'merlin.properties.path', ([], {}), '()\n', (888, 890), False, 'import merlin\n'), ((977, 1001), 'merlin.properties.path', 'merlin.properties.path', ([], {}), '()\n', (999, 1001), False, 'import merlin\n'), ((1070, 1094), 'merlin.properties.path', 'merlin.properties.path', ([], {}), '()\n', (1092, 1094), False, 'import merlin\n')] |
bradleyhenke/cortex | test/IECoreMaya/ImageConverterTest.py | f8245cc6c9464b1de9e6c6e57068248198e63de0 | ##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import IECore
import IECoreImage
import IECoreMaya
class ImageConverterTest( IECoreMaya.TestCase ) :
def test( self ) :
imageA = IECore.Reader.create( "test/IECoreImage/data/exr/colorBarsWithAlpha.exr" ).read()
toMaya = IECoreMaya.ToMayaImageConverter( imageA )
mImage = maya.OpenMaya.MImage()
toMaya.convert( mImage )
fromMaya = IECoreMaya.FromMayaImageConverter( mImage )
imageB = fromMaya.convert()
self.assertFalse(
IECoreImage.ImageDiffOp()( imageA=imageA, imageB=imageB, maxError=1.0/256 ).value
)
if __name__ == "__main__":
IECoreMaya.TestProgram()
| [((2364, 2388), 'IECoreMaya.TestProgram', 'IECoreMaya.TestProgram', ([], {}), '()\n', (2386, 2388), False, 'import IECoreMaya\n'), ((2031, 2070), 'IECoreMaya.ToMayaImageConverter', 'IECoreMaya.ToMayaImageConverter', (['imageA'], {}), '(imageA)\n', (2062, 2070), False, 'import IECoreMaya\n'), ((2149, 2190), 'IECoreMaya.FromMayaImageConverter', 'IECoreMaya.FromMayaImageConverter', (['mImage'], {}), '(mImage)\n', (2182, 2190), False, 'import IECoreMaya\n'), ((1937, 2009), 'IECore.Reader.create', 'IECore.Reader.create', (['"""test/IECoreImage/data/exr/colorBarsWithAlpha.exr"""'], {}), "('test/IECoreImage/data/exr/colorBarsWithAlpha.exr')\n", (1957, 2009), False, 'import IECore\n'), ((2248, 2273), 'IECoreImage.ImageDiffOp', 'IECoreImage.ImageDiffOp', ([], {}), '()\n', (2271, 2273), False, 'import IECoreImage\n')] |
PatrykNeubauer/NeMo | tests/core_ptl/check_for_ranks.py | 3ada744b884dba5f233f22c6991fc6092c6ca8d0 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import torch
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.utilities.distributed import rank_zero_only
from nemo.core import ModelPT
from nemo.utils import logging
from nemo.utils.exp_manager import ExpManagerConfig, exp_manager
class OnesDataset(torch.utils.data.Dataset):
def __init__(self, dataset_len):
super().__init__()
self.__dataset_len = dataset_len
def __getitem__(self, *args):
return torch.ones(2)
def __len__(self):
return self.__dataset_len
class ExampleModel(ModelPT):
def __init__(self, *args, **kwargs):
cfg = OmegaConf.structured({})
super().__init__(cfg, trainer=kwargs.get('trainer', None))
# dummy parameter in order to allow DDP to execute
self.l1 = torch.nn.modules.Linear(in_features=2, out_features=1)
def train_dataloader(self):
return None
def val_dataloader(self):
return None
def predict_dataloader(self):
dataset = OnesDataset(2)
return torch.utils.data.DataLoader(dataset, batch_size=2)
def forward(self, batch):
return batch.mean()
def validation_step(self, batch, batch_idx):
return self(batch)
def training_step(self, batch, batch_idx):
return self(batch)
def list_available_models(self):
pass
def setup_training_data(self):
pass
def setup_validation_data(self):
pass
def validation_epoch_end(self, loss):
self.log("val_loss", torch.stack(loss).mean())
def instantiate_multinode_ddp_if_possible():
num_gpus = torch.cuda.device_count()
trainer = Trainer(gpus=num_gpus, accelerator='ddp', logger=None, checkpoint_callback=None)
exp_manager_cfg = ExpManagerConfig(exp_dir='./ddp_check/', use_datetime_version=False, version="")
exp_manager(trainer, cfg=OmegaConf.structured(exp_manager_cfg))
return trainer
def setup_model(trainer: Trainer):
model = ExampleModel(trainer=trainer)
logging.info(f"M.Global Rank:{model.global_rank}")
logging.info(f"M.Local Rank:{model.local_rank}")
logging.info(f"M.World Size:{model.trainer.world_size}")
trainer.predict(model)
return model
def get_rank_info(texts: list, rank_key: str) -> int:
for line in texts:
if rank_key in line:
rank_value = line.split(":")[-1]
rank_value = int(rank_value)
return rank_value
print("Could not find the correct rank key !")
exit(1)
@rank_zero_only
def check_model_ranks(model: ExampleModel):
basedir = os.path.join('./ddp_check/', 'default', 'version_0')
file_template = "nemo_log_globalrank-{rank}_localrank-{rank}.txt"
world_size = torch.cuda.device_count()
for rank in range(world_size):
filename = file_template.format(rank=rank)
filepath = os.path.join(basedir, filename)
with open(filepath, 'r') as f:
texts = f.readlines()
texts = [t.replace("\n", "") for t in texts]
log_global_rank = get_rank_info(texts, rank_key='M.Global Rank')
log_world_size = get_rank_info(texts, rank_key='M.World Size')
if log_global_rank != rank:
print("Logged global rank is not equal to trainer.global_rank !")
exit(1)
if log_world_size != world_size:
print("Logged world size if not equal to trainer.world_size !")
exit(1)
@rank_zero_only
def cleanup():
if os.path.exists('./ddp_check'):
shutil.rmtree('./ddp_check', ignore_errors=True)
def run_checks():
cleanup()
trainer = instantiate_multinode_ddp_if_possible()
model = setup_model(trainer)
check_model_ranks(model)
print("DDP checks passed !")
cleanup()
if __name__ == '__main__':
run_checks()
| [((2257, 2282), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2280, 2282), False, 'import torch\n'), ((2297, 2382), 'pytorch_lightning.Trainer', 'Trainer', ([], {'gpus': 'num_gpus', 'accelerator': '"""ddp"""', 'logger': 'None', 'checkpoint_callback': 'None'}), "(gpus=num_gpus, accelerator='ddp', logger=None, checkpoint_callback=None\n )\n", (2304, 2382), False, 'from pytorch_lightning import Trainer\n'), ((2401, 2486), 'nemo.utils.exp_manager.ExpManagerConfig', 'ExpManagerConfig', ([], {'exp_dir': '"""./ddp_check/"""', 'use_datetime_version': '(False)', 'version': '""""""'}), "(exp_dir='./ddp_check/', use_datetime_version=False, version=''\n )\n", (2417, 2486), False, 'from nemo.utils.exp_manager import ExpManagerConfig, exp_manager\n'), ((2653, 2703), 'nemo.utils.logging.info', 'logging.info', (['f"""M.Global Rank:{model.global_rank}"""'], {}), "(f'M.Global Rank:{model.global_rank}')\n", (2665, 2703), False, 'from nemo.utils import logging\n'), ((2708, 2756), 'nemo.utils.logging.info', 'logging.info', (['f"""M.Local Rank:{model.local_rank}"""'], {}), "(f'M.Local Rank:{model.local_rank}')\n", (2720, 2756), False, 'from nemo.utils import logging\n'), ((2761, 2817), 'nemo.utils.logging.info', 'logging.info', (['f"""M.World Size:{model.trainer.world_size}"""'], {}), "(f'M.World Size:{model.trainer.world_size}')\n", (2773, 2817), False, 'from nemo.utils import logging\n'), ((3227, 3279), 'os.path.join', 'os.path.join', (['"""./ddp_check/"""', '"""default"""', '"""version_0"""'], {}), "('./ddp_check/', 'default', 'version_0')\n", (3239, 3279), False, 'import os\n'), ((3368, 3393), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3391, 3393), False, 'import torch\n'), ((4120, 4149), 'os.path.exists', 'os.path.exists', (['"""./ddp_check"""'], {}), "('./ddp_check')\n", (4134, 4149), False, 'import os\n'), ((1115, 1128), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (1125, 1128), False, 'import torch\n'), ((1273, 1297), 'omegaconf.OmegaConf.structured', 'OmegaConf.structured', (['{}'], {}), '({})\n', (1293, 1297), False, 'from omegaconf import OmegaConf\n'), ((1442, 1496), 'torch.nn.modules.Linear', 'torch.nn.modules.Linear', ([], {'in_features': '(2)', 'out_features': '(1)'}), '(in_features=2, out_features=1)\n', (1465, 1496), False, 'import torch\n'), ((1684, 1734), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(2)'}), '(dataset, batch_size=2)\n', (1711, 1734), False, 'import torch\n'), ((3499, 3530), 'os.path.join', 'os.path.join', (['basedir', 'filename'], {}), '(basedir, filename)\n', (3511, 3530), False, 'import os\n'), ((4159, 4207), 'shutil.rmtree', 'shutil.rmtree', (['"""./ddp_check"""'], {'ignore_errors': '(True)'}), "('./ddp_check', ignore_errors=True)\n", (4172, 4207), False, 'import shutil\n'), ((2511, 2548), 'omegaconf.OmegaConf.structured', 'OmegaConf.structured', (['exp_manager_cfg'], {}), '(exp_manager_cfg)\n', (2531, 2548), False, 'from omegaconf import OmegaConf\n'), ((2169, 2186), 'torch.stack', 'torch.stack', (['loss'], {}), '(loss)\n', (2180, 2186), False, 'import torch\n')] |
Lofi-Lemonade/Python-Discord-Bot-Template | helpers/json_manager.py | 4cb79197c751c88100ad396adb38e88bf2a4d1ed | """"
Copyright © Krypton 2022 - https://github.com/kkrypt0nn (https://krypton.ninja)
Description:
This is a template to create your own discord bot in python.
Version: 4.1
"""
import json
def add_user_to_blacklist(user_id: int) -> None:
"""
This function will add a user based on its ID in the blacklist.json file.
:param user_id: The ID of the user that should be added into the blacklist.json file.
"""
with open("blacklist.json", "r+") as file:
file_data = json.load(file)
file_data["ids"].append(user_id)
with open("blacklist.json", "w") as file:
file.seek(0)
json.dump(file_data, file, indent=4)
def remove_user_from_blacklist(user_id: int) -> None:
"""
This function will remove a user based on its ID from the blacklist.json file.
:param user_id: The ID of the user that should be removed from the blacklist.json file.
"""
with open("blacklist.json", "r") as file:
file_data = json.load(file)
file_data["ids"].remove(user_id)
with open("blacklist.json", "w") as file:
file.seek(0)
json.dump(file_data, file, indent=4)
| [((492, 507), 'json.load', 'json.load', (['file'], {}), '(file)\n', (501, 507), False, 'import json\n'), ((624, 660), 'json.dump', 'json.dump', (['file_data', 'file'], {'indent': '(4)'}), '(file_data, file, indent=4)\n', (633, 660), False, 'import json\n'), ((974, 989), 'json.load', 'json.load', (['file'], {}), '(file)\n', (983, 989), False, 'import json\n'), ((1106, 1142), 'json.dump', 'json.dump', (['file_data', 'file'], {'indent': '(4)'}), '(file_data, file, indent=4)\n', (1115, 1142), False, 'import json\n')] |
ColinKennedy/ways | tests/test_common.py | 1eb44e4aa5e35fb839212cd8cb1c59c714ba10d3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Make sure that generic functions work exactly as we expect.'''
# IMPORT STANDARD LIBRARIES
import unittest
# IMPORT WAYS LIBRARIES
from ways import common
class ParseTestCase(unittest.TestCase):
'''Test generic parsing-related functions.'''
def test_working_0001(self):
'''Test that correct input for expand_string works as expected.'''
pattern = '/jobs/{JOB}/some_kind/{THING}/real_folders'
text = '/jobs/some_job_here/some_kind/of/real_folders'
expected_output = {'JOB': 'some_job_here', 'THING': 'of'}
self.assertEqual(expected_output, common.expand_string(pattern, text))
def test_working_0002(self):
'''Test that correct input for expand_string works as expected.'''
shot = 'NAME_010'
format_string = '{SHOT}_{ID}'
expected_output = {'SHOT': 'NAME', 'ID': '010'}
self.assertEqual(expected_output, common.expand_string(format_string, shot))
def test_expand_string_failure_0001(self):
'''Force expand_string fails to prevent a bad match from occurring.'''
text = '/jobs/some_job/some_kind/of/real_folders'
pattern = '/jobs/{JOB}/some_kind/of/real_folders/inner'
self.assertFalse(common.expand_string(pattern, text))
def test_expand_string_failure_0002(self):
'''Force expand_string fails to prevent a bad match from occurring.'''
text = '/jobs/some_job/some_kind/of/real_folders'
pattern = '/jobs/{JOB}/some_kind/{SHOTNAME}/real_folders/inner'
self.assertFalse(common.expand_string(pattern, text))
| [((644, 679), 'ways.common.expand_string', 'common.expand_string', (['pattern', 'text'], {}), '(pattern, text)\n', (664, 679), False, 'from ways import common\n'), ((953, 994), 'ways.common.expand_string', 'common.expand_string', (['format_string', 'shot'], {}), '(format_string, shot)\n', (973, 994), False, 'from ways import common\n'), ((1271, 1306), 'ways.common.expand_string', 'common.expand_string', (['pattern', 'text'], {}), '(pattern, text)\n', (1291, 1306), False, 'from ways import common\n'), ((1591, 1626), 'ways.common.expand_string', 'common.expand_string', (['pattern', 'text'], {}), '(pattern, text)\n', (1611, 1626), False, 'from ways import common\n')] |
glibin/natasha | setup.py | 4f5c153f754759c189779f9879decd8d218356af | from setuptools import setup, find_packages
setup(
name='natasha',
version='0.2.0',
description='Named-entity recognition for russian language',
url='https://github.com/bureaucratic-labs/natasha',
author='Dmitry Veselov',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='natural language processing, russian morphology, named entity recognition, tomita',
packages=find_packages(),
install_requires=[
'yargy==0.3.0'
],
extras_require={
'web': [
'ujson',
'aiohttp',
],
},
)
| [((770, 785), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (783, 785), False, 'from setuptools import setup, find_packages\n')] |
OneScreenfulOfPython/screenfuls | GeneratePassword/generate_password_v2.py | ea4e378c8d9e530edadd4a3315fe9e8acc98460b | import os, sys
import random
import string
try:
# Make Python2 work like Python3
input = raw_input
except NameError:
# On Python3; already using input
pass
letters = string.ascii_letters
numbers = string.digits
punctuation = string.punctuation
def generate(password_length, at_least_one_letter, at_least_one_number, at_least_one_punctuation):
"""Generate a password by include enough random
characters to meet the password length restriction.
In addition, the user can specify that at least one
of the each of the classes of character be used.
"""
#
# Any combination of characters is valid
#
valid_characters = ""
if at_least_one_letter:
valid_characters += letters
if at_least_one_number:
valid_characters += numbers
if at_least_one_punctuation:
valid_characters += punctuation
#
# Start with a blank password and then go round enough
# times to make a password of the required length.
#
password = ""
for i in range(password_length):
#
# Each time around, ensure that one of each of the selected
# groups is chosen, and then just choose randomly from all
# groups.
#
if at_least_one_letter:
character = random.choice(letters)
at_least_one_letter = False
elif at_least_one_number:
character = random.choice(numbers)
at_least_one_number = False
elif at_least_one_punctuation:
character = random.choice(punctuation)
at_least_one_punctuation = False
else:
character = random.choice(valid_characters)
password += character
#
# Finally, shuffle the password so we don't always get a
# letter at the beginning, with a number after and some
# punctuation.
#
characters = list(password)
#
# random.shuffle shuffles a list *in place*
#
random.shuffle(characters)
#
# X.join(...) means: return all the strings in (...) joined by X
# ", ".join(['Eggs', 'Bacon', 'Beans']) => "Eggs, Bacon, Beans"
# But if you want to generate *real* .csv files, use the csv module
# because there are lots of corner-cases.
#
password = "".join(characters)
return password
if __name__ == '__main__':
password_length = int(input("How many letters? "))
at_least_one_letter = "Y" == (input("At least one letter [Y/n]? ").upper() or "Y")
at_least_one_number = "Y" == (input("At least one number [Y/n]? ").upper() or "Y")
at_least_one_punctuation = "Y" == (input("At least one punctuation [Y/n]? ").upper() or "Y")
password = generate(password_length, at_least_one_letter, at_least_one_number, at_least_one_punctuation)
print("Your password is: {}".format(password))
| [((1951, 1977), 'random.shuffle', 'random.shuffle', (['characters'], {}), '(characters)\n', (1965, 1977), False, 'import random\n'), ((1283, 1305), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (1296, 1305), False, 'import random\n'), ((1404, 1426), 'random.choice', 'random.choice', (['numbers'], {}), '(numbers)\n', (1417, 1426), False, 'import random\n'), ((1530, 1556), 'random.choice', 'random.choice', (['punctuation'], {}), '(punctuation)\n', (1543, 1556), False, 'import random\n'), ((1640, 1671), 'random.choice', 'random.choice', (['valid_characters'], {}), '(valid_characters)\n', (1653, 1671), False, 'import random\n')] |
block42-blockchain-company/thornode-telegram-bot | bot/jobs/thorchain_node_jobs.py | 6478b1eb41e36c5fdd327b963b55343de1ce5337 | from constants.messages import get_node_health_warning_message, get_node_healthy_again_message
from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users
from packaging import version
from service.utils import *
def check_thornodes(context):
chat_id = context.job.context['chat_id']
chat_data = context.job.context['chat_data']
inactive_nodes = []
for node_address, local_node in chat_data.get('nodes', {}).items():
try:
remote_node = get_thornode_object_or_none(address=node_address)
except HTTPError as e:
logger.exception(e)
continue
if remote_node is None:
text = 'THORNode ' + local_node['alias'] + ' is not active anymore! 💀' + '\n' + \
'Address: ' + node_address + '\n\n' + \
'Please enter another THORNode address.'
inactive_nodes.append(node_address)
try_message_with_home_menu(context=context,
chat_id=chat_id,
text=text)
continue
is_not_blocked = float(local_node['last_notification_timestamp']) < \
datetime.timestamp(
datetime.now() - timedelta(seconds=local_node['notification_timeout_in_seconds']))
if is_not_blocked:
message = build_notification_message_for_active_node(local_node, remote_node, context)
if message:
# Update data
local_node['status'] = remote_node['status']
local_node['bond'] = remote_node['bond']
local_node['slash_points'] = remote_node['slash_points']
local_node['ip_address'] = remote_node['ip_address']
local_node['last_notification_timestamp'] = datetime.timestamp(datetime.now())
local_node['notification_timeout_in_seconds'] *= NOTIFICATION_TIMEOUT_MULTIPLIER
try_message_with_home_menu(context=context,
chat_id=chat_id,
text=message)
else:
local_node['notification_timeout_in_seconds'] = INITIAL_NOTIFICATION_TIMEOUT
if local_node['status'].upper() in MONITORED_STATUSES and is_thornode_healthy(context, node_address):
check_thorchain_block_height(context, node_address=node_address)
check_thorchain_catch_up_status(context, node_address=node_address)
check_thorchain_midgard_api(context, node_address=node_address)
for node_address in inactive_nodes:
del chat_data['nodes'][node_address]
def build_notification_message_for_active_node(local_node, remote_node, context) -> [str, None]:
changed_fields = [
field for field in ['status', 'bond', 'slash_points']
if local_node[field] != remote_node[field]
]
threshold = get_slash_points_threshold(context)
slash_point_change = abs(int(local_node['slash_points']) - int(remote_node['slash_points']))
if (len(changed_fields) <= 1) and ('slash_points' in changed_fields) and (slash_point_change <= threshold):
return None
if len(changed_fields) > 0:
text = f"THORNode: {local_node['alias']}\n" \
f"Address: {local_node['node_address']}\n" \
f"Status: {local_node['status'].capitalize()}"
if 'status' in changed_fields:
text += f' ➡️ {remote_node["status"].capitalize()}'
text += f"\nBond: {tor_to_rune(int(local_node['bond']))}"
if 'bond' in changed_fields:
text += f" ➡️ {tor_to_rune(int(remote_node['bond']))}"
text += '\nSlash Points: ' + '{:,}'.format(int(local_node['slash_points']))
if 'slash_points' in changed_fields:
text += ' ➡️ ' + '{:,}'.format(int(remote_node['slash_points']))
return text
else:
return None
def check_versions_status(context):
chat_data = context.job.context['chat_data']
try:
node_accounts = get_node_accounts()
except Exception as e:
logger.exception(e)
logger.error("I couldn't get the node accounts while checking version status.")
return
highest_version = max(map(lambda n: n['version'], node_accounts),
key=lambda v: version.parse(v))
last_newest_version = chat_data.get('newest_software_version', None)
if last_newest_version is None or version.parse(
highest_version) > version.parse(last_newest_version):
chat_data['newest_software_version'] = highest_version
for node in chat_data.get('nodes', {}).values():
if version.parse(node['version']) < version.parse(highest_version):
message = f"Consider updating the software on your node: *{node['alias']}* ‼️\n" \
f"Your software version is *{node['version']}* " \
f"but one of the nodes already runs on *{highest_version}*"
try_message_with_home_menu(
context,
chat_id=context.job.context['chat_id'],
text=message)
def check_churning(context):
try:
validators = get_node_accounts()
except Exception as e:
logger.exception(e)
logger.error("I couldn't get the node accounts while checking if churning occurred.")
return
if 'node_statuses' not in context.bot_data:
context.bot_data['node_statuses'] = {}
for validator in validators:
context.bot_data['node_statuses'][
validator['node_address']] = validator['status']
return
local_node_statuses = context.bot_data['node_statuses']
churned_in = []
churned_out = []
highest_churn_status_since = 0
for validator in validators:
if did_churn_happen(validator, local_node_statuses, highest_churn_status_since):
highest_churn_status_since = int(validator['status_since'])
for validator in validators:
remote_status = validator['status']
local_status = local_node_statuses[
validator['node_address']] if validator[
'node_address'] in local_node_statuses else "unknown"
if remote_status != local_status:
if 'active' == remote_status:
churned_in.append({
"address": validator['node_address'],
"bond": validator['bond']
})
elif 'active' == local_status:
churned_out.append({
"address": validator['node_address'],
"bond": validator['bond']
})
if len(churned_in) or len(churned_out):
text = "🔄 CHURN SUMMARY\n" \
"THORChain has successfully churned:\n\n"
text += "Nodes Added:\n" if len(churned_in) else ""
for node in churned_in:
text += f"*{node['address']}*\nBond: *{tor_to_rune(node['bond'])}*\n"
text += "\nNodes Removed:\n" if len(churned_out) else ""
for node in churned_out:
text += f"*{node['address']}*\nBond: *{tor_to_rune(node['bond'])}*\n"
text += "\nSystem:\n"
try:
network = get_network_data()
text += f"📡 Network Security: *{network_security_ratio_to_string(get_network_security_ratio(network))}*\n\n" \
f"💚 Total Active Bond: *{tor_to_rune(network['bondMetrics']['totalActiveBond'])}* (total)\n\n" \
"⚖️ Bonded/Staked Ratio: *" + '{:.2f}'.format(
int(get_network_security_ratio(network) * 100)) + " %*\n\n" \
"↩️ Bonding ROI: *" + '{:.2f}'.format(
float(network['bondingAPY']) * 100) + " %* APY\n\n" \
"↩️ Liquidity ROI: *" + '{:.2f}'.format(
float(network['liquidityAPY']) * 100) + " %* APY"
context.bot_data.setdefault("vault_addresses", {})
current_chains = get_pool_addresses_from_any_node()
for chain in current_chains:
if chain['chain'] in context.bot_data['vault_addresses']:
if chain['address'] != context.bot_data['vault_addresses'][chain['chain']]:
text += f"\n\n🔐 Vault Addresses:" if "Vault Addresses" not in text else ""
text += f"\n*{chain['chain']}*: \n" \
f"Old Vault address: {context.bot_data['vault_addresses'][chain['chain']]}\n"\
f"⬇️\n" \
f"New Vault address: {chain['address']}\n"
else:
text += "\n\n⚠️ 🚨 CHURNING BUT THE VAULT ADDRESSES DID NOT CHANGE 🚨\n"
context.bot_data['vault_addresses'][chain['chain']] = chain['address']
except Exception as e:
logger.exception(e)
try_message_to_all_users(context, text=text)
for validator in validators:
context.bot_data['node_statuses'][
validator['node_address']] = validator['status']
def did_churn_happen(validator, local_node_statuses, highest_churn_status_since) -> bool:
remote_status = validator['status']
local_status = local_node_statuses[validator['node_address']] if validator[
'node_address'] in local_node_statuses else "unknown"
if int(validator['status_since']) > highest_churn_status_since and \
((local_status == 'ready' and remote_status == 'active') or (
local_status == 'active' and remote_status == 'standby')):
return True
return False
def is_thornode_healthy(context, node_address) -> bool:
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
# If not initialized assuming node was healhty.
if "healthy" not in context.job.context['chat_data']['nodes'][node_address]:
context.job.context['chat_data']['nodes'][node_address]["healthy"] = True
was_healthy = node_data["healthy"]
try:
# Check whether node answers. If it doesn't we get an Exception.
get_latest_block_height(node_data['ip_address'])
if not was_healthy:
try_message_with_home_menu(context=context, chat_id=chat_id, text=get_node_healthy_again_message(node_data))
context.job.context['chat_data']['nodes'][node_address]["healthy"] = True
return True
except (Timeout, ConnectionError, BadStatusException, Exception):
if was_healthy:
try_message_with_home_menu(context=context, chat_id=chat_id, text=get_node_health_warning_message(node_data))
context.job.context['chat_data']['nodes'][node_address]["healthy"] = False
return False
def check_thorchain_block_height(context, node_address):
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
try:
block_height = get_latest_block_height(node_data['ip_address'])
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error with {node_data['ip_address']}")
return
is_stuck = block_height <= node_data.setdefault('block_height', 0)
block_height_stuck_count = node_data.setdefault("block_height_stuck_count", 0)
if is_stuck:
block_height_stuck_count += 1
if block_height_stuck_count == 1:
text = 'Block height is not increasing anymore! 💀' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n' + \
'Block height stuck at: ' + block_height + '\n\n' + \
'Please check your Thornode immediately!'
try_message_with_home_menu(context=context, chat_id=chat_id, text=text)
else:
if block_height_stuck_count >= 1:
text = f"Block height is increasing again! 👌\n" + \
f"IP: {node_data['ip_address']}\n" + \
f"THORNode: {node_data['alias']}\n" + \
f"Node address: {node_address}\n" + \
f"Block height now at: {block_height}\n"
try_message_with_home_menu(context=context, chat_id=chat_id, text=text)
block_height_stuck_count = 0
node_data['block_height'] = block_height
node_data["block_height_stuck_count"] = block_height_stuck_count
def check_solvency_job(context):
message = check_solvency(context)
if message:
try_message_to_all_users(context, text=message)
def check_solvency(context) -> [str, None]:
try:
asgard_solvency = asgard_solvency_check()
yggdrasil_solvency = yggdrasil_solvency_check()
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error while querying Asgard and Yggdrasil.")
return None
except Exception as e:
logger.exception(e)
return None
is_solvent = asgard_solvency['is_solvent'] and yggdrasil_solvency['is_solvent']
insolvency_count = context.bot_data.setdefault("insolvency_count", 0)
message = None
if not is_solvent:
insolvency_count += 1
if insolvency_count == MISSING_FUNDS_THRESHOLD:
message = 'THORChain is *missing funds*! 💀\n\n'
message += get_insolvent_balances_message(asgard_solvency, yggdrasil_solvency)
else:
if insolvency_count >= MISSING_FUNDS_THRESHOLD:
message = 'THORChain is *100% solvent* again! 👌\n'
insolvency_count = 0
context.bot_data["insolvency_count"] = insolvency_count
return message
def check_thorchain_catch_up_status(context, node_address):
"""
Check if node is some blocks behind with catch up status
"""
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
if 'is_catching_up' not in node_data:
node_data['is_catching_up'] = False
try:
is_currently_catching_up = is_thorchain_catching_up(
node_data['ip_address'])
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error with {node_data['ip_address']}")
return
if node_data['is_catching_up'] != is_currently_catching_up:
try:
block_height = get_latest_block_height(node_data['ip_address'])
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error with {node_data['ip_address']}")
block_height = "currently unavailable"
if is_currently_catching_up:
node_data['is_catching_up'] = True
text = 'The Node is behind the latest block height and catching up! 💀 ' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n' + \
'Current block height: ' + block_height + '\n\n' + \
'Please check your Thornode immediately!'
else:
node_data['is_catching_up'] = False
text = 'The node caught up to the latest block height again! 👌' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n' + \
'Current block height: ' + block_height
try_message_with_home_menu(context=context, chat_id=chat_id, text=text)
def check_thorchain_midgard_api(context, node_address):
"""
Check that Midgard API is ok
"""
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
was_healthy = node_data.setdefault('is_midgard_healthy', True)
is_midgard_healthy = is_midgard_api_healthy(node_data['ip_address'])
if was_healthy != is_midgard_healthy:
if is_midgard_healthy:
text = 'Midgard API is healthy again! 👌' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address
try_message_with_home_menu(context, chat_id=chat_id, text=text)
else:
text = 'Midgard API is not healthy anymore! 💀' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n\n' + \
'Please check your Thornode immediately!'
try_message_with_home_menu(context, chat_id=chat_id, text=text)
node_data['is_midgard_healthy'] = is_midgard_healthy
| [((9092, 9136), 'handlers.chat_helpers.try_message_to_all_users', 'try_message_to_all_users', (['context'], {'text': 'text'}), '(context, text=text)\n', (9116, 9136), False, 'from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users\n'), ((12838, 12885), 'handlers.chat_helpers.try_message_to_all_users', 'try_message_to_all_users', (['context'], {'text': 'message'}), '(context, text=message)\n', (12862, 12885), False, 'from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users\n'), ((15788, 15859), 'handlers.chat_helpers.try_message_with_home_menu', 'try_message_with_home_menu', ([], {'context': 'context', 'chat_id': 'chat_id', 'text': 'text'}), '(context=context, chat_id=chat_id, text=text)\n', (15814, 15859), False, 'from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users\n'), ((947, 1018), 'handlers.chat_helpers.try_message_with_home_menu', 'try_message_with_home_menu', ([], {'context': 'context', 'chat_id': 'chat_id', 'text': 'text'}), '(context=context, chat_id=chat_id, text=text)\n', (973, 1018), False, 'from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users\n'), ((4512, 4542), 'packaging.version.parse', 'version.parse', (['highest_version'], {}), '(highest_version)\n', (4525, 4542), False, 'from packaging import version\n'), ((4558, 4592), 'packaging.version.parse', 'version.parse', (['last_newest_version'], {}), '(last_newest_version)\n', (4571, 4592), False, 'from packaging import version\n'), ((12083, 12154), 'handlers.chat_helpers.try_message_with_home_menu', 'try_message_with_home_menu', ([], {'context': 'context', 'chat_id': 'chat_id', 'text': 'text'}), '(context=context, chat_id=chat_id, text=text)\n', (12109, 12154), False, 'from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users\n'), ((12517, 12588), 'handlers.chat_helpers.try_message_with_home_menu', 'try_message_with_home_menu', ([], {'context': 'context', 'chat_id': 'chat_id', 'text': 'text'}), '(context=context, chat_id=chat_id, text=text)\n', (12543, 12588), False, 'from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users\n'), ((16554, 16617), 'handlers.chat_helpers.try_message_with_home_menu', 'try_message_with_home_menu', (['context'], {'chat_id': 'chat_id', 'text': 'text'}), '(context, chat_id=chat_id, text=text)\n', (16580, 16617), False, 'from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users\n'), ((16966, 17029), 'handlers.chat_helpers.try_message_with_home_menu', 'try_message_with_home_menu', (['context'], {'chat_id': 'chat_id', 'text': 'text'}), '(context, chat_id=chat_id, text=text)\n', (16992, 17029), False, 'from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users\n'), ((2004, 2078), 'handlers.chat_helpers.try_message_with_home_menu', 'try_message_with_home_menu', ([], {'context': 'context', 'chat_id': 'chat_id', 'text': 'message'}), '(context=context, chat_id=chat_id, text=message)\n', (2030, 2078), False, 'from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users\n'), ((4382, 4398), 'packaging.version.parse', 'version.parse', (['v'], {}), '(v)\n', (4395, 4398), False, 'from packaging import version\n'), ((4729, 4759), 'packaging.version.parse', 'version.parse', (["node['version']"], {}), "(node['version'])\n", (4742, 4759), False, 'from packaging import version\n'), ((4762, 4792), 'packaging.version.parse', 'version.parse', (['highest_version'], {}), '(highest_version)\n', (4775, 4792), False, 'from packaging import version\n'), ((5074, 5167), 'handlers.chat_helpers.try_message_with_home_menu', 'try_message_with_home_menu', (['context'], {'chat_id': "context.job.context['chat_id']", 'text': 'message'}), "(context, chat_id=context.job.context['chat_id'],\n text=message)\n", (5100, 5167), False, 'from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users\n'), ((10555, 10596), 'constants.messages.get_node_healthy_again_message', 'get_node_healthy_again_message', (['node_data'], {}), '(node_data)\n', (10585, 10596), False, 'from constants.messages import get_node_health_warning_message, get_node_healthy_again_message\n'), ((10874, 10916), 'constants.messages.get_node_health_warning_message', 'get_node_health_warning_message', (['node_data'], {}), '(node_data)\n', (10905, 10916), False, 'from constants.messages import get_node_health_warning_message, get_node_healthy_again_message\n')] |
jjhenkel/dockerizeme | hard-gists/7578539/snippet.py | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | from pylab import *
from numpy import *
from numpy.linalg import solve
from scipy.integrate import odeint
from scipy.stats import norm, uniform, beta
from scipy.special import jacobi
a = 0.0
b = 3.0
theta=1.0
sigma=sqrt(theta/(2*(a+b+2)))
tscale = 0.05
invariant_distribution = poly1d( [-1 for x in range(int(a))], True)*poly1d( [1 for x in range(int(b))], True)
def eigenvalue(n):
return theta*n*(n+a+b+1)/(a+b+2)
gaussian_var = norm()
def dW(dt):
return norm.rvs() / sqrt(dt)
def random_walk(y0, tmax, dt, times = None):
dt = dt * tscale
def rhs(y,t):
return -theta*(y-(a-b)/(a+b+2)) + sqrt(2*theta*(1-y*y)/(a+b+2))*dW(dt/tscale)
if (times is None):
times = arange(0,tmax,dt)
y = zeros(shape=times.shape, dtype=float)
y[0] = y0
for i in range(1,y.shape[0]):
y[i] = y[i-1] + rhs(y[i-1], times[i])*dt
if abs(y[i]) > 1:
y[i] = y[i] / abs(y[i])
return (times, y)
def beta_prior(s, f):
return poly1d(ones(shape=(s,)), True)*poly1d(-1*ones(shape=(f,)), True)
def poly_to_jacobi(x):
"""x is a poly1d object"""
xc = x.coeffs
N = x.order+1
matrix = zeros(shape=(N,N), dtype=float)
for i in range(N):
matrix[N-i-1:N, i] = jacobi(i,a,b).coeffs
return solve(matrix, xc)
def jacobi_to_poly(x):
result = poly1d([0])
for i in range(x.shape[0]):
result = result + (jacobi(i,a,b)*invariant_distribution)*x[i]
return result
def jacobi_to_poly_no_invariant(x):
result = poly1d([0])
for i in range(x.shape[0]):
result = result + jacobi(i,a,b)*x[i]
return result
def propagate_jacobi(pc, t):
"""Takes jacobi coefficients and propagates them"""
n = arange(pc.shape[0], dtype=float)
l = theta*n*(n+a+b+1.0)/(a+b+2.0)*tscale
return exp(-l*t)*pc
def truncate_unnecessary_jacobi(p):
p_normalized = p / (abs(p).sum())
cs = cumsum(abs(p_normalized[::-1]))[::-1]
return p_normalized[where(abs(cs) > 1e-4)]
def pde_solve(prior, t):
result = zeros(shape=(t.shape[0], prior.shape[0]), dtype=float)
result[0,:] = prior
for i in range(1,t.shape[0]):
result[i,:] = propagate_jacobi(result[i-1,:], t[i]-t[i-1])
return result
def transform_to_x(pdf, x):
result = zeros(shape=(pdf.shape[0], x.shape[0]), dtype=float)
for i in range(0, pdf.shape[0]):
p = jacobi_to_poly(pdf[i,:])
result[i,:] = p(x)
result[i,:] /= result[i,:].sum()
return result
tmax = 4
prior = beta_prior(40, 20)
prior_in_jacobi = poly_to_jacobi(prior)
dt = 0.1
times = arange(0,tmax,dt)
x = arange(-1,1,0.01)
rw_dt = 0.01
t, y = random_walk(0.35*2-1, tmax, rw_dt)
solution_as_x = zeros(shape=(times.size, x.size), dtype=float)
solution_as_jacobi = None
empirical_ctr = zeros(shape=(4,), dtype=float)
for i in range(0,4):
nt = int(1.0/dt)
prior = prior_in_jacobi
rnd = uniform(0,1)
if (i > 0):
nsamples = 40
r = rnd.rvs(nsamples)
ctr = (y[i/rw_dt]+1)/2.0
print "CTR: " + str(ctr)
success = (r < ctr).sum()
print "Empirical: " + str(success / float(nsamples))
evidence = beta_prior( nsamples - success, success)
prior = None
j = truncate_unnecessary_jacobi(solution_as_jacobi[int(1/dt)-1])
prior = poly_to_jacobi(evidence * jacobi_to_poly_no_invariant(j))
empirical_ctr[i] = success / float(nsamples)
solution_as_jacobi = pde_solve(prior, times[i*nt:(i+1)*nt])
solution_as_x[i*nt:(i+1)*nt] = transform_to_x(solution_as_jacobi, x)
plot(arange(0,4), empirical_ctr, 'go')
plot(t, (y+1)/2.0, 'k')
imshow(solution_as_x.transpose(), origin='lower', extent=[0,tmax,0,1])
xlabel("time")
ylabel("CTR")
title("Bayesian Estimate of CTR")
colorbar()
show()
| [] |
lennykioko/Flask-social-network | forms.py | 15bfe1f7dca90074c0cbef62c5da9d5a25b5ce65 | # forms are not just about display, instead they are more of validation
# wtf forms protect our site against csrf attacks
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, TextAreaField
from wtforms.validators import (DataRequired, Regexp, ValidationError, Email,
Length, EqualTo)
from models import User
def name_exists(form, field):
if User.select().where(User.username == field.data).exists():
raise ValidationError('User with this name already exists.')
def email_exists(form, field):
if User.select().where(User.email == field.data).exists():
raise ValidationError('User with this email already exists.')
class RegisterForm(FlaskForm):
username = StringField(
'Username', # is the label
validators=[
DataRequired(),
Regexp(
r'^[a-zA-Z0-9_]+$',
message = ("Username should be one word, letters, numbers and underscores only.")
),
name_exists
])
email = StringField(
'Email',
validators=[
DataRequired(),
Email(),
email_exists
])
password = PasswordField(
'Password',
validators=[
DataRequired(),
Length(min=8),
EqualTo('password2', message = 'Passwords must match')
])
password2 = PasswordField(
'Confirm Password',
validators=[DataRequired()
])
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
class PostForm(FlaskForm):
content = TextAreaField("What's Up?", validators = [DataRequired()])
| [((450, 504), 'wtforms.validators.ValidationError', 'ValidationError', (['"""User with this name already exists."""'], {}), "('User with this name already exists.')\n", (465, 504), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((605, 660), 'wtforms.validators.ValidationError', 'ValidationError', (['"""User with this email already exists."""'], {}), "('User with this email already exists.')\n", (620, 660), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((765, 779), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (777, 779), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((784, 893), 'wtforms.validators.Regexp', 'Regexp', (['"""^[a-zA-Z0-9_]+$"""'], {'message': '"""Username should be one word, letters, numbers and underscores only."""'}), "('^[a-zA-Z0-9_]+$', message=\n 'Username should be one word, letters, numbers and underscores only.')\n", (790, 893), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((981, 995), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (993, 995), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1000, 1007), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (1005, 1007), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1090, 1104), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1102, 1104), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1109, 1122), 'wtforms.validators.Length', 'Length', ([], {'min': '(8)'}), '(min=8)\n', (1115, 1122), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1127, 1179), 'wtforms.validators.EqualTo', 'EqualTo', (['"""password2"""'], {'message': '"""Passwords must match"""'}), "('password2', message='Passwords must match')\n", (1134, 1179), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1252, 1266), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1264, 1266), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1344, 1358), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1356, 1358), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1360, 1367), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (1365, 1367), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1420, 1434), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1432, 1434), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1518, 1532), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1530, 1532), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((383, 396), 'models.User.select', 'User.select', ([], {}), '()\n', (394, 396), False, 'from models import User\n'), ((541, 554), 'models.User.select', 'User.select', ([], {}), '()\n', (552, 554), False, 'from models import User\n')] |
flmnt/pantam | pantam_cli/utils/messages.py | da47d977e69ec410d0642b5ade1f2323c1b6b350 | from sys import stderr, stdout
from enum import Enum
from colored import fg, attr
PANTAM: str = fg("yellow") + attr("bold") + "PANTAM" + attr("reset")
colour_msg = lambda msg, colour: fg(colour) + attr("bold") + msg + attr("reset")
info_msg = lambda msg: colour_msg(msg, "blue")
success_msg = lambda msg: colour_msg(msg, "green")
error_msg = lambda msg: colour_msg(msg, "red")
class NewLine(Enum):
before = 1
after = 2
both = 3
def write_msg(msg: str, spacing: NewLine = None) -> None:
"""Write message to stdout"""
prefix: str = "\n" if spacing in (NewLine.before, NewLine.both) else ""
suffix: str = "\n" if spacing in (NewLine.after, NewLine.both) else ""
stdout.write("%s%s%s" % (prefix, msg, suffix))
def write_error(msg: str) -> None:
"""Write message to stderr"""
stderr.write("\n%s\n" % msg)
welcome_msg = (
lambda: PANTAM
+ """
The microframework for microservices.
Let's build your app...
"""
)
name_index_file_msg = lambda: "What is the name of your main script?"
name_actions_folder_msg = lambda: "What is the name of your actions folder?"
def create_actions_file_msg(second_run: bool):
"""Actions File Message"""
article = "another" if second_run else "an"
return "Do you want to create %s action file?" % article
name_actions_file_msg = lambda: "What is the name of your actions file?"
confirm_structure_msg = (
lambda structure: """Your application will look like this:
%s
Happy to proceed?"""
% structure
)
| [((138, 151), 'colored.attr', 'attr', (['"""reset"""'], {}), "('reset')\n", (142, 151), False, 'from colored import fg, attr\n'), ((693, 739), 'sys.stdout.write', 'stdout.write', (["('%s%s%s' % (prefix, msg, suffix))"], {}), "('%s%s%s' % (prefix, msg, suffix))\n", (705, 739), False, 'from sys import stderr, stdout\n'), ((815, 843), 'sys.stderr.write', 'stderr.write', (["('\\n%s\\n' % msg)"], {}), "('\\n%s\\n' % msg)\n", (827, 843), False, 'from sys import stderr, stdout\n'), ((220, 233), 'colored.attr', 'attr', (['"""reset"""'], {}), "('reset')\n", (224, 233), False, 'from colored import fg, attr\n'), ((97, 109), 'colored.fg', 'fg', (['"""yellow"""'], {}), "('yellow')\n", (99, 109), False, 'from colored import fg, attr\n'), ((112, 124), 'colored.attr', 'attr', (['"""bold"""'], {}), "('bold')\n", (116, 124), False, 'from colored import fg, attr\n'), ((186, 196), 'colored.fg', 'fg', (['colour'], {}), '(colour)\n', (188, 196), False, 'from colored import fg, attr\n'), ((199, 211), 'colored.attr', 'attr', (['"""bold"""'], {}), "('bold')\n", (203, 211), False, 'from colored import fg, attr\n')] |
zmc/ocs-ci | tests/manage/test_remove_mon_from_cluster.py | fcf51f3637f657689ba5a8ac869f2b14ac04b0cf | """
A Testcase to remove mon from
when I/O's are happening.
Polarion-ID- OCS-355
"""
import logging
import pytest
from ocs_ci.ocs import ocp, constants
from ocs_ci.framework.testlib import tier4, ManageTest
from ocs_ci.framework import config
from ocs_ci.ocs.resources import pod
from tests.helpers import run_io_with_rados_bench, delete_cephblockpool
from ocs_ci.ocs.cluster import CephCluster
from ocs_ci.utility.retry import retry
from ocs_ci.ocs.exceptions import CephHealthException
log = logging.getLogger(__name__)
@retry(CephHealthException, 8, 3, 1)
def verify_mon_pod_up(ceph_cluster, pods):
"""
Verify mon pods are in Running state.
Returns:
bool: True for wait for the resource, False otherwise
"""
log.info(f"Verifying all mons pods are up and Running")
ceph_cluster.cluster_health_check(timeout=3)
ret = pods.wait_for_resource(
condition=constants.STATUS_RUNNING, selector='app=rook-ceph-mon',
resource_count=3, timeout=700)
log.info(f"waited for all mon pod to come up and running {ret}")
return ret
def run_io_on_pool():
"""
Runs the I/O on the pool and delete the pool
Returns: A thread of I/O
"""
tools_pod = pod.get_ceph_tools_pod()
tools_pod.add_role(role='client')
return run_io_with_rados_bench(
ceph_pods=[tools_pod],
config={'time': 45, 'cleanup': False,
'pool': 'test-pool'
}
)
@tier4
@pytest.mark.polarion_id("OCS-355")
class TestRemoveMonFromCluster(ManageTest):
def test_remove_mon_pod_from_cluster(self):
"""
To remove mon pod from the cluster
after the I/O is performed on the pool
and waiting for the operator to create a
new mon pod on its own
"""
ceph_cluster = CephCluster()
pods = ocp.OCP(
kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace']
)
list_mons = ceph_cluster.get_mons_from_cluster()
assert len(list_mons) > 1, pytest.skip(
"INVALID: Mon count should be more than one to delete."
)
assert run_io_on_pool(), 'Failed to run I/O on the pool'
assert delete_cephblockpool('test-pool'), 'Failed to delete pool'
ceph_cluster.cluster_health_check(timeout=0)
ceph_cluster.remove_mon_from_cluster()
assert verify_mon_pod_up(ceph_cluster, pods), f"Mon pods are not up and running state"
ceph_cluster.cluster_health_check(timeout=60)
| [((499, 526), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (516, 526), False, 'import logging\n'), ((530, 565), 'ocs_ci.utility.retry.retry', 'retry', (['CephHealthException', '(8)', '(3)', '(1)'], {}), '(CephHealthException, 8, 3, 1)\n', (535, 565), False, 'from ocs_ci.utility.retry import retry\n'), ((1466, 1500), 'pytest.mark.polarion_id', 'pytest.mark.polarion_id', (['"""OCS-355"""'], {}), "('OCS-355')\n", (1489, 1500), False, 'import pytest\n'), ((1219, 1243), 'ocs_ci.ocs.resources.pod.get_ceph_tools_pod', 'pod.get_ceph_tools_pod', ([], {}), '()\n', (1241, 1243), False, 'from ocs_ci.ocs.resources import pod\n'), ((1294, 1404), 'tests.helpers.run_io_with_rados_bench', 'run_io_with_rados_bench', ([], {'ceph_pods': '[tools_pod]', 'config': "{'time': 45, 'cleanup': False, 'pool': 'test-pool'}"}), "(ceph_pods=[tools_pod], config={'time': 45,\n 'cleanup': False, 'pool': 'test-pool'})\n", (1317, 1404), False, 'from tests.helpers import run_io_with_rados_bench, delete_cephblockpool\n'), ((1812, 1825), 'ocs_ci.ocs.cluster.CephCluster', 'CephCluster', ([], {}), '()\n', (1823, 1825), False, 'from ocs_ci.ocs.cluster import CephCluster\n'), ((1841, 1916), 'ocs_ci.ocs.ocp.OCP', 'ocp.OCP', ([], {'kind': 'constants.POD', 'namespace': "config.ENV_DATA['cluster_namespace']"}), "(kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace'])\n", (1848, 1916), False, 'from ocs_ci.ocs import ocp, constants\n'), ((2031, 2099), 'pytest.skip', 'pytest.skip', (['"""INVALID: Mon count should be more than one to delete."""'], {}), "('INVALID: Mon count should be more than one to delete.')\n", (2042, 2099), False, 'import pytest\n'), ((2202, 2235), 'tests.helpers.delete_cephblockpool', 'delete_cephblockpool', (['"""test-pool"""'], {}), "('test-pool')\n", (2222, 2235), False, 'from tests.helpers import run_io_with_rados_bench, delete_cephblockpool\n')] |
Caaz/smartystreets-python-sdk | smartystreets_python_sdk/us_autocomplete_pro/client.py | f56cd00d29861bde297143c128f79a4b1d89541c | from smartystreets_python_sdk import Request
from smartystreets_python_sdk.exceptions import SmartyException
from smartystreets_python_sdk.us_autocomplete_pro import Suggestion, geolocation_type
class Client:
def __init__(self, sender, serializer):
"""
It is recommended to instantiate this class using ClientBuilder.build_us_autocomplete_pro_api_client()
"""
self.sender = sender
self.serializer = serializer
def send(self, lookup):
"""
Sends a Lookup object to the US Autocomplete Pro API and stores the result in the Lookup's result field.
"""
if not lookup or not lookup.search:
raise SmartyException('Send() must be passed a Lookup with the search field set.')
request = self.build_request(lookup)
response = self.sender.send(request)
if response.error:
raise response.error
result = self.serializer.deserialize(response.payload)
suggestions = self.convert_suggestions(result.get('suggestions') or [])
lookup.result = suggestions
return suggestions
def build_request(self, lookup):
request = Request()
self.add_parameter(request, 'search', lookup.search)
self.add_parameter(request, 'max_results', lookup.max_results)
self.add_parameter(request, 'include_only_cities', self.build_filter_string(lookup.city_filter))
self.add_parameter(request, 'include_only_states', self.build_filter_string(lookup.state_filter))
self.add_parameter(request, 'include_only_zip_codes', self.build_filter_string(lookup.zip_filter))
self.add_parameter(request, 'exclude_states', self.build_filter_string(lookup.exclude))
self.add_parameter(request, 'prefer_cities', self.build_filter_string(lookup.prefer_cities))
self.add_parameter(request, 'prefer_states', self.build_filter_string(lookup.prefer_states))
self.add_parameter(request, 'prefer_zip_codes', self.build_filter_string(lookup.prefer_zips))
self.add_parameter(request, 'prefer_ratio', lookup.prefer_ratio)
self.add_parameter(request, 'prefer_geolocation', lookup.prefer_geo)
self.add_parameter(request, 'selected', lookup.selected)
return request
@staticmethod
def build_filter_string(filter_list):
return ','.join(filter_list or []) or None
@staticmethod
def convert_suggestions(suggestion_dictionaries):
return [Suggestion(suggestion) for suggestion in suggestion_dictionaries]
@staticmethod
def add_parameter(request, key, value):
if value and value != 'none':
request.parameters[key] = value
| [((1178, 1187), 'smartystreets_python_sdk.Request', 'Request', ([], {}), '()\n', (1185, 1187), False, 'from smartystreets_python_sdk import Request\n'), ((684, 760), 'smartystreets_python_sdk.exceptions.SmartyException', 'SmartyException', (['"""Send() must be passed a Lookup with the search field set."""'], {}), "('Send() must be passed a Lookup with the search field set.')\n", (699, 760), False, 'from smartystreets_python_sdk.exceptions import SmartyException\n'), ((2479, 2501), 'smartystreets_python_sdk.us_autocomplete_pro.Suggestion', 'Suggestion', (['suggestion'], {}), '(suggestion)\n', (2489, 2501), False, 'from smartystreets_python_sdk.us_autocomplete_pro import Suggestion, geolocation_type\n')] |
Saritasa/mssqlvc | mssqlvc.py | 836caeea59cc0ed23234687b94062e007707c603 | # -*- coding: utf-8 -*-
"""
mssqlvc
~~~~~~~
Database version control utility for Microsoft SQL Server. See README.md for more information.
Licensed under the BSD license. See LICENSE file in the project root for full license information.
"""
import argparse
import datetime
import io
import logging
import os
import re
import sys
import urlparse
try:
import clr
except ImportError:
print('Cannot import crl module, make sure you run this script using IronPython')
exit(2)
import System
clr.AddReference('Microsoft.SqlServer.Smo')
clr.AddReference('Microsoft.SqlServer.SqlEnum')
clr.AddReference('Microsoft.SqlServer.ConnectionInfo')
import Microsoft.SqlServer.Management.Smo as Smo
import Microsoft.SqlServer.Management.Common as Common
__author__ = 'Ivan Kozhin'
__copyright__ = 'Copyright (c) 2015-2016, Saritasa'
__license__ = 'BSD'
__version__ = '1.4.5'
__all__ = ['MsSqlVersion']
class ScriptExecutionError(Exception):
pass
class MsSqlVersion(object):
"""
SQL Server patch migration class.
"""
class bcolors:
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
def __init__(self, connection_string, patch_dir='.', exclude_pattern=None, logger=None,
stop_on_error=False, noexecute=False, case_insensitive=False, record_files_only=False):
"""
Initialize instance with connection and database objects.
:param connection_string: Connection string in rfc1738 url format
:param patch_dir: Patch directory with .sql files
:param exclude_pattern: String with regular expression the patch files should match
:param logger: Logger that is used for logging
:param stop_on_error: Stop execution on error, default behavior is to continue
:param case_insensitive: Use case insensitive to compare patch files
:param record_files_only: Only file names will be stored to patch table without folder paths
"""
url = urlparse.urlparse(connection_string)
is_local_login = not url.username
self.connection = Common.ServerConnection(LoginSecure=is_local_login, ServerInstance=url.hostname,
DatabaseName=url.path.replace('/', ''))
if not is_local_login:
self.connection.Login = url.username
self.connection.Password = url.password
self.server = Smo.Server(self.connection)
self.database = self.server.Databases[self.connection.DatabaseName]
self.server.ConnectionContext.ConnectTimeout = 90
self.exclude_pattern = exclude_pattern
self.patch_dir = patch_dir
self.stop_on_error = stop_on_error
self.case_insensitive = case_insensitive
self.record_files_only = record_files_only
self.executed_count = 0
self.logger = logging.NullHandler() if not logger else logger
if not os.path.exists(patch_dir):
raise Exception('Patch folder does not exist')
if 'mssql' not in connection_string:
raise Exception('Wrong connection string, it should contain mssql word')
exists = self._create_patch_table_if_not_exists(self.database)
if not exists:
self.logger.info('[%s] created _patch_history table' % (self.database.Name,))
def __del__(self):
if self.server:
self.server.ConnectionContext.Disconnect()
def update(self):
"""Executes database update process"""
patches = self.get_pending_patches()
self.logger.debug('Files to execute %s' % (patches,))
for patch in patches:
success = self.execute_file(patch)
if success:
self.executed_count += 1
self.put_patch(patch)
if not success and self.stop_on_error:
self.logger.critical(MsSqlVersion.bcolors.WARNING + 'Execution stopped. Please fix errors and try again.'
+ MsSqlVersion.bcolors.ENDC)
raise ScriptExecutionError()
self.logger.info('[%s] Executed %d patch(-es)' % (self.database.Name, self.executed_count))
def fill(self):
"""Skip scripts execution but add them to patches table"""
patches = self.get_pending_patches()
for patch in patches:
self.logger.info('Add file %s' % (patch,))
self.put_patch(patch)
def get_pending_patches(self):
applied_patches = self.get_applied_patches()
if self.record_files_only:
applied_patches = [os.path.basename(f) for f in applied_patches]
patches = self._get_sql_files_from_dir(applied_patches)
patches.sort()
return patches
def execute_file(self, file):
"""Executes file against database in transaction, returns True if success"""
ret = True
try:
full_name = os.path.join(os.path.normpath(self.patch_dir), file)
with io.open(full_name, 'r', encoding='utf8') as sql_file:
sql = sql_file.read()
self.logger.info('[%s] Executing %s...' % (self.database.Name, file))
self.connection.BeginTransaction()
self.database.ExecuteNonQuery(sql)
self.connection.CommitTransaction()
except Exception as e:
self.connection.RollBackTransaction()
self.logger.error('Exception on %s' % (file,))
message = e.message or e
if e.clsException.InnerException is not None and e.clsException.InnerException.InnerException is not None:
message += ' ' + e.clsException.InnerException.InnerException.Message
self.logger.error('[%s] %s (%s)' % (self.database.Name, full_name, message))
ret = False
return ret
def put_patch(self, file):
"""Write record that file has been executed"""
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if self.record_files_only:
file = os.path.basename(file)
sql = 'insert [_patch_history] (name, applied_at) values(\'%s\', \'%s\');' % (file, now)
self.database.ExecuteNonQuery(sql)
def get_applied_patches(self):
rows = self.database.ExecuteWithResults('select name from [_patch_history];').Tables[0].Rows
return set([row['name'] for row in rows])
def _get_sql_files_from_dir(self, exclude_list=[]):
"""Get all script files from directory"""
_exclude_list = set(exclude_list) if not self.case_insensitive else [f.lower() for f in exclude_list]
prevdir = os.getcwd()
os.chdir(self.patch_dir)
sql_files = []
for root, dirs, files in os.walk('.'):
for file in files:
file = os.path.normpath(os.path.join(root, file))
_file = file
if self.case_insensitive:
_file = _file.lower()
if self.record_files_only:
_file = os.path.basename(_file)
if (_file in _exclude_list or not _file.lower().endswith('.sql') or
(self.exclude_pattern and re.search(self.exclude_pattern, file))):
continue
sql_files.append(file)
os.chdir(prevdir)
return sql_files
@staticmethod
def _create_patch_table_if_not_exists(database):
"""Create patch table in database if not exists"""
sql = 'select * from sys.objects where object_id = object_id(\'_patch_history\') AND type in (\'U\');'
exists = database.ExecuteWithResults(sql).Tables[0].Rows.Count > 0
if not exists:
sql = """
create table [_patch_history] (id int not null identity(1, 1), name varchar(100) not null,
applied_at datetime not null);
alter table [_patch_history] add constraint _patch_history_PK primary key clustered (id);
"""
database.ExecuteNonQuery(sql)
return exists
def get_cmd_line_parser():
"""Get initialized argparse.ArgumentParser object"""
parser = argparse.ArgumentParser(
description='MSSQL database patch history tool',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''Example: %(prog)s -c "mssql://sa:123@host\instance/database" -d "D:/1/project/patch"''')
parser.add_argument('--connection', '-c',
required=True,
dest='connection',
action='store',
help='connection string in rfc1738 url format, required')
parser.add_argument('--directory', '-d',
dest='directory',
action='store',
default='.',
help='directory with patch files')
parser.add_argument('--log', '-l',
dest='log',
action='store',
help='log file')
parser.add_argument('--noexecute', '-n',
action='store_true',
dest='noexecute',
default=False,
help='displays pending script files with no execution')
parser.add_argument('--noexecute-fill', '-nf',
action='store_true',
dest='noexecute_fill',
default=False,
help='displays pending script files with no execution and fills patch table')
parser.add_argument('--stop-on-error', '-soe',
action='store_true',
dest='stop_on_error',
default=False,
help='stops execution if any script fails')
parser.add_argument('--exclude-pattern', '-ep',
dest='exclude_pattern',
help='skips files match to regular expression')
parser.add_argument('--record-files-only', '-rfo',
action='store_true',
dest='record_files_only',
default=False,
help='only file names will be stored to patch table without folder paths')
parser.add_argument('--case-insensitive', '-ci',
action='store_true',
dest='case_insensitive',
default=False,
help='use case insensitive to compare patch files so "PatchName.sql" and "patchname.sql" is the same')
parser.add_argument('--debug',
action='store_true',
dest='debug',
default=False,
help='enables debug output')
parser.add_argument('--version', '-v',
action='version',
version='%(prog)s ' + __version__)
return parser
if __name__ == '__main__':
# parser
parser = get_cmd_line_parser()
parser_args = parser.parse_args()
if parser_args.connection is None or parser_args.directory is None:
parser.print_help()
exit(1)
# logging
logger = logging.getLogger('mssql')
if parser_args.log:
fh = logging.FileHandler(parser_args.log)
fh.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
logger.setLevel(logging.DEBUG if parser_args.debug else logging.INFO)
logger.addHandler(ch)
# database handle
sqlvc = MsSqlVersion(parser_args.connection, parser_args.directory, exclude_pattern=parser_args.exclude_pattern,
stop_on_error=parser_args.stop_on_error, case_insensitive=parser_args.case_insensitive,
record_files_only=parser_args.record_files_only, logger=logger)
if parser_args.noexecute:
for patch in sqlvc.get_pending_patches():
logger.info(' ' + patch)
elif parser_args.noexecute_fill:
sqlvc.fill()
else:
sqlvc.update()
| [((519, 562), 'clr.AddReference', 'clr.AddReference', (['"""Microsoft.SqlServer.Smo"""'], {}), "('Microsoft.SqlServer.Smo')\n", (535, 562), False, 'import clr\n'), ((563, 610), 'clr.AddReference', 'clr.AddReference', (['"""Microsoft.SqlServer.SqlEnum"""'], {}), "('Microsoft.SqlServer.SqlEnum')\n", (579, 610), False, 'import clr\n'), ((611, 665), 'clr.AddReference', 'clr.AddReference', (['"""Microsoft.SqlServer.ConnectionInfo"""'], {}), "('Microsoft.SqlServer.ConnectionInfo')\n", (627, 665), False, 'import clr\n'), ((8187, 8423), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MSSQL database patch history tool"""', 'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'epilog': '"""Example: %(prog)s -c "mssql://sa:123@host\\\\instance/database" -d "D:/1/project/patch\\""""'}), '(description=\'MSSQL database patch history tool\',\n formatter_class=argparse.RawDescriptionHelpFormatter, epilog=\n \'Example: %(prog)s -c "mssql://sa:123@host\\\\instance/database" -d "D:/1/project/patch"\'\n )\n', (8210, 8423), False, 'import argparse\n'), ((10631, 10657), 'logging.getLogger', 'logging.getLogger', (['"""mssql"""'], {}), "('mssql')\n", (10648, 10657), False, 'import logging\n'), ((10857, 10880), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (10878, 10880), False, 'import logging\n'), ((2072, 2108), 'urlparse.urlparse', 'urlparse.urlparse', (['connection_string'], {}), '(connection_string)\n', (2089, 2108), False, 'import urlparse\n'), ((2465, 2492), 'Microsoft.SqlServer.Management.Smo.Server', 'Smo.Server', (['self.connection'], {}), '(self.connection)\n', (2475, 2492), True, 'import Microsoft.SqlServer.Management.Smo as Smo\n'), ((6667, 6678), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6676, 6678), False, 'import os\n'), ((6688, 6712), 'os.chdir', 'os.chdir', (['self.patch_dir'], {}), '(self.patch_dir)\n', (6696, 6712), False, 'import os\n'), ((6769, 6781), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (6776, 6781), False, 'import os\n'), ((7335, 7352), 'os.chdir', 'os.chdir', (['prevdir'], {}), '(prevdir)\n', (7343, 7352), False, 'import os\n'), ((10695, 10731), 'logging.FileHandler', 'logging.FileHandler', (['parser_args.log'], {}), '(parser_args.log)\n', (10714, 10731), False, 'import logging\n'), ((10901, 10961), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(levelname)s] %(message)s"""'], {}), "('%(asctime)s [%(levelname)s] %(message)s')\n", (10918, 10961), False, 'import logging\n'), ((2907, 2928), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (2926, 2928), False, 'import logging\n'), ((2971, 2996), 'os.path.exists', 'os.path.exists', (['patch_dir'], {}), '(patch_dir)\n', (2985, 2996), False, 'import os\n'), ((6082, 6104), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (6098, 6104), False, 'import os\n'), ((10756, 10816), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(levelname)s] %(message)s"""'], {}), "('%(asctime)s [%(levelname)s] %(message)s')\n", (10773, 10816), False, 'import logging\n'), ((4606, 4625), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (4622, 4625), False, 'import os\n'), ((4951, 4983), 'os.path.normpath', 'os.path.normpath', (['self.patch_dir'], {}), '(self.patch_dir)\n', (4967, 4983), False, 'import os\n'), ((5008, 5048), 'io.open', 'io.open', (['full_name', '"""r"""'], {'encoding': '"""utf8"""'}), "(full_name, 'r', encoding='utf8')\n", (5015, 5048), False, 'import io\n'), ((5974, 5997), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5995, 5997), False, 'import datetime\n'), ((6854, 6878), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (6866, 6878), False, 'import os\n'), ((7064, 7087), 'os.path.basename', 'os.path.basename', (['_file'], {}), '(_file)\n', (7080, 7087), False, 'import os\n'), ((7218, 7255), 're.search', 're.search', (['self.exclude_pattern', 'file'], {}), '(self.exclude_pattern, file)\n', (7227, 7255), False, 'import re\n')] |
KshitizSharmaV/Quant_Platform_Python | lib/python3.6/site-packages/statsmodels/iolib/tests/test_table_econpy.py | d784aa0604d8de5ba5ca0c3a171e3556c0cd6b39 | '''
Unit tests table.py.
:see: http://docs.python.org/lib/minimal-example.html for an intro to unittest
:see: http://agiletesting.blogspot.com/2005/01/python-unit-testing-part-1-unittest.html
:see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/305292
'''
from __future__ import absolute_import
from statsmodels.compat.python import zip
import numpy as np
from numpy.testing import assert_equal
__docformat__ = "restructuredtext en"
from statsmodels.iolib.table import Cell, SimpleTable
from statsmodels.iolib.table import default_latex_fmt
from statsmodels.iolib.table import default_html_fmt
ltx_fmt1 = default_latex_fmt.copy()
html_fmt1 = default_html_fmt.copy()
txt_fmt1 = dict(
data_fmts = ['%0.2f', '%d'],
empty_cell = ' ',
colwidths = 1,
colsep=' * ',
row_pre = '* ',
row_post = ' *',
table_dec_above='*',
table_dec_below='*',
header_dec_below='*',
header_fmt = '%s',
stub_fmt = '%s',
title_align='r',
header_align = 'r',
data_aligns = "r",
stubs_align = "l",
fmt = 'txt'
)
cell0data = 0.0000
cell1data = 1
row0data = [cell0data, cell1data]
row1data = [2, 3.333]
table1data = [ row0data, row1data ]
test1stubs = ('stub1', 'stub2')
test1header = ('header1', 'header2')
#test1header = ('header1\nheader1a', 'header2\nheader2a')
tbl = SimpleTable(table1data, test1header, test1stubs,
txt_fmt=txt_fmt1, ltx_fmt=ltx_fmt1, html_fmt=html_fmt1)
def custom_labeller(cell):
if cell.data is np.nan:
return 'missing'
class TestCell(object):
def test_celldata(self):
celldata = cell0data, cell1data, row1data[0], row1data[1]
cells = [Cell(datum, datatype=i % 2)
for i, datum in enumerate(celldata)]
for cell, datum in zip(cells, celldata):
assert_equal(cell.data, datum)
class TestSimpleTable(object):
def test_txt_fmt1(self):
# Limited test of custom txt_fmt
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * 0.00 * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text()
#print('actual')
#print(actual)
#print('desired')
#print(desired)
assert_equal(actual, desired)
def test_ltx_fmt1(self):
# Limited test of custom ltx_fmt
desired = r"""
\begin{center}
\begin{tabular}{lcc}
\toprule
& \textbf{header1} & \textbf{header2} \\
\midrule
\textbf{stub1} & 0.0 & 1 \\
\textbf{stub2} & 2 & 3.333 \\
\bottomrule
\end{tabular}
\end{center}
"""
actual = '\n%s\n' % tbl.as_latex_tabular()
#print(actual)
#print(desired)
assert_equal(actual, desired)
def test_html_fmt1(self):
# Limited test of custom html_fmt
desired = """
<table class="simpletable">
<tr>
<td></td> <th>header1</th> <th>header2</th>
</tr>
<tr>
<th>stub1</th> <td>0.0</td> <td>1</td>
</tr>
<tr>
<th>stub2</th> <td>2</td> <td>3.333</td>
</tr>
</table>
"""
#the previous has significant trailing whitespace that got removed
#desired = '''\n<table class="simpletable">\n<tr>\n <td></td> <th>header1</th> <th>header2</th>\n</tr>\n<tr>\n <th>stub1</th> <td>0.0</td> <td>1</td> \n</tr>\n<tr>\n <th>stub2</th> <td>2</td> <td>3.333</td> \n</tr>\n</table>\n'''
actual = '\n%s\n' % tbl.as_html()
actual = '\n'.join((line.rstrip() for line in actual.split('\n')))
#print(actual)
#print(desired)
#print len(actual), len(desired)
assert_equal(actual, desired)
def test_customlabel(self):
# Limited test of custom custom labeling
tbl = SimpleTable(table1data, test1header, test1stubs, txt_fmt=txt_fmt1)
tbl[1][1].data = np.nan
tbl.label_cells(custom_labeller)
#print([[c.datatype for c in row] for row in tbl])
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * -- * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text(missing='--')
assert_equal(actual, desired)
| [((619, 643), 'statsmodels.iolib.table.default_latex_fmt.copy', 'default_latex_fmt.copy', ([], {}), '()\n', (641, 643), False, 'from statsmodels.iolib.table import default_latex_fmt\n'), ((656, 679), 'statsmodels.iolib.table.default_html_fmt.copy', 'default_html_fmt.copy', ([], {}), '()\n', (677, 679), False, 'from statsmodels.iolib.table import default_html_fmt\n'), ((1318, 1427), 'statsmodels.iolib.table.SimpleTable', 'SimpleTable', (['table1data', 'test1header', 'test1stubs'], {'txt_fmt': 'txt_fmt1', 'ltx_fmt': 'ltx_fmt1', 'html_fmt': 'html_fmt1'}), '(table1data, test1header, test1stubs, txt_fmt=txt_fmt1, ltx_fmt=\n ltx_fmt1, html_fmt=html_fmt1)\n', (1329, 1427), False, 'from statsmodels.iolib.table import Cell, SimpleTable\n'), ((1756, 1776), 'statsmodels.compat.python.zip', 'zip', (['cells', 'celldata'], {}), '(cells, celldata)\n', (1759, 1776), False, 'from statsmodels.compat.python import zip\n'), ((2278, 2307), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (2290, 2307), False, 'from numpy.testing import assert_equal\n'), ((2775, 2804), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (2787, 2804), False, 'from numpy.testing import assert_equal\n'), ((3673, 3702), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (3685, 3702), False, 'from numpy.testing import assert_equal\n'), ((3799, 3865), 'statsmodels.iolib.table.SimpleTable', 'SimpleTable', (['table1data', 'test1header', 'test1stubs'], {'txt_fmt': 'txt_fmt1'}), '(table1data, test1header, test1stubs, txt_fmt=txt_fmt1)\n', (3810, 3865), False, 'from statsmodels.iolib.table import Cell, SimpleTable\n'), ((4266, 4295), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (4278, 4295), False, 'from numpy.testing import assert_equal\n'), ((1647, 1674), 'statsmodels.iolib.table.Cell', 'Cell', (['datum'], {'datatype': '(i % 2)'}), '(datum, datatype=i % 2)\n', (1651, 1674), False, 'from statsmodels.iolib.table import Cell, SimpleTable\n'), ((1790, 1820), 'numpy.testing.assert_equal', 'assert_equal', (['cell.data', 'datum'], {}), '(cell.data, datum)\n', (1802, 1820), False, 'from numpy.testing import assert_equal\n')] |
MrDelik/core | homeassistant/components/todoist/types.py | 93a66cc357b226389967668441000498a10453bb | """Types for the Todoist component."""
from __future__ import annotations
from typing import TypedDict
class DueDate(TypedDict):
"""Dict representing a due date in a todoist api response."""
date: str
is_recurring: bool
lang: str
string: str
timezone: str | None
| [] |
corneliusroemer/pyzstd | src/c/c_pyzstd.py | 06f14ad29735d9ae85c188703dcb64c24686c4f2 | from collections import namedtuple
from enum import IntEnum
from ._zstd import *
from . import _zstd
__all__ = (# From this file
'compressionLevel_values', 'get_frame_info',
'CParameter', 'DParameter', 'Strategy',
# From _zstd
'ZstdCompressor', 'RichMemZstdCompressor',
'ZstdDecompressor', 'EndlessZstdDecompressor',
'ZstdDict', 'ZstdError', 'decompress', 'get_frame_size',
'compress_stream', 'decompress_stream',
'zstd_version', 'zstd_version_info', 'zstd_support_multithread')
# Used in __init__.py
_ZSTD_DStreamInSize = _zstd._ZSTD_DStreamInSize
_train_dict = _zstd._train_dict
_finalize_dict = _zstd._finalize_dict
# compressionLevel_values
_nt_values = namedtuple('values', ['default', 'min', 'max'])
compressionLevel_values = _nt_values(_zstd._ZSTD_defaultCLevel,
_zstd._ZSTD_minCLevel,
_zstd._ZSTD_maxCLevel)
_nt_frame_info = namedtuple('frame_info',
['decompressed_size', 'dictionary_id'])
def get_frame_info(frame_buffer):
"""Get zstd frame infomation from a frame header.
Argument
frame_buffer: A bytes-like object. It should starts from the beginning of
a frame, and needs to include at least the frame header (6 to
18 bytes).
Return a two-items namedtuple: (decompressed_size, dictionary_id)
If decompressed_size is None, decompressed size is unknown.
dictionary_id is a 32-bit unsigned integer value. 0 means dictionary ID was
not recorded in the frame header, the frame may or may not need a dictionary
to be decoded, and the ID of such a dictionary is not specified.
It's possible to append more items to the namedtuple in the future."""
ret_tuple = _zstd._get_frame_info(frame_buffer)
return _nt_frame_info(*ret_tuple)
class CParameter(IntEnum):
"""Compression parameters"""
compressionLevel = _zstd._ZSTD_c_compressionLevel
windowLog = _zstd._ZSTD_c_windowLog
hashLog = _zstd._ZSTD_c_hashLog
chainLog = _zstd._ZSTD_c_chainLog
searchLog = _zstd._ZSTD_c_searchLog
minMatch = _zstd._ZSTD_c_minMatch
targetLength = _zstd._ZSTD_c_targetLength
strategy = _zstd._ZSTD_c_strategy
enableLongDistanceMatching = _zstd._ZSTD_c_enableLongDistanceMatching
ldmHashLog = _zstd._ZSTD_c_ldmHashLog
ldmMinMatch = _zstd._ZSTD_c_ldmMinMatch
ldmBucketSizeLog = _zstd._ZSTD_c_ldmBucketSizeLog
ldmHashRateLog = _zstd._ZSTD_c_ldmHashRateLog
contentSizeFlag = _zstd._ZSTD_c_contentSizeFlag
checksumFlag = _zstd._ZSTD_c_checksumFlag
dictIDFlag = _zstd._ZSTD_c_dictIDFlag
nbWorkers = _zstd._ZSTD_c_nbWorkers
jobSize = _zstd._ZSTD_c_jobSize
overlapLog = _zstd._ZSTD_c_overlapLog
def bounds(self):
"""Return lower and upper bounds of a parameter, both inclusive."""
# 1 means compression parameter
return _zstd._get_param_bounds(1, self.value)
class DParameter(IntEnum):
"""Decompression parameters"""
windowLogMax = _zstd._ZSTD_d_windowLogMax
def bounds(self):
"""Return lower and upper bounds of a parameter, both inclusive."""
# 0 means decompression parameter
return _zstd._get_param_bounds(0, self.value)
class Strategy(IntEnum):
"""Compression strategies, listed from fastest to strongest.
Note : new strategies _might_ be added in the future, only the order
(from fast to strong) is guaranteed.
"""
fast = _zstd._ZSTD_fast
dfast = _zstd._ZSTD_dfast
greedy = _zstd._ZSTD_greedy
lazy = _zstd._ZSTD_lazy
lazy2 = _zstd._ZSTD_lazy2
btlazy2 = _zstd._ZSTD_btlazy2
btopt = _zstd._ZSTD_btopt
btultra = _zstd._ZSTD_btultra
btultra2 = _zstd._ZSTD_btultra2
# Set CParameter/DParameter types for validity check
_zstd._set_parameter_types(CParameter, DParameter) | [((752, 799), 'collections.namedtuple', 'namedtuple', (['"""values"""', "['default', 'min', 'max']"], {}), "('values', ['default', 'min', 'max'])\n", (762, 799), False, 'from collections import namedtuple\n'), ((1003, 1067), 'collections.namedtuple', 'namedtuple', (['"""frame_info"""', "['decompressed_size', 'dictionary_id']"], {}), "('frame_info', ['decompressed_size', 'dictionary_id'])\n", (1013, 1067), False, 'from collections import namedtuple\n')] |
quacksawbones/galaxy-1 | test/unit/data/model/mapping/common.py | 65f7259b29d3886e526d9be670c60d9da9fbe038 | from abc import ABC, abstractmethod
from contextlib import contextmanager
from uuid import uuid4
import pytest
from sqlalchemy import (
delete,
select,
UniqueConstraint,
)
class AbstractBaseTest(ABC):
@pytest.fixture
def cls_(self):
"""
Return class under test.
Assumptions: if the class under test is Foo, then the class grouping
the tests should be a subclass of BaseTest, named TestFoo.
"""
prefix = len("Test")
class_name = self.__class__.__name__[prefix:]
return getattr(self.get_model(), class_name)
@abstractmethod
def get_model(self):
pass
def dbcleanup_wrapper(session, obj, where_clause=None):
with dbcleanup(session, obj, where_clause):
yield obj
@contextmanager
def dbcleanup(session, obj, where_clause=None):
"""
Use the session to store obj in database; delete from database on exit, bypassing the session.
If obj does not have an id field, a SQLAlchemy WHERE clause should be provided to construct
a custom select statement.
"""
return_id = where_clause is None
try:
obj_id = persist(session, obj, return_id)
yield obj_id
finally:
table = obj.__table__
if where_clause is None:
where_clause = _get_default_where_clause(type(obj), obj_id)
stmt = delete(table).where(where_clause)
session.execute(stmt)
def persist(session, obj, return_id=True):
"""
Use the session to store obj in database, then remove obj from session,
so that on a subsequent load from the database we get a clean instance.
"""
session.add(obj)
session.flush()
obj_id = obj.id if return_id else None # save this before obj is expunged
session.expunge(obj)
return obj_id
def delete_from_database(session, objects):
"""
Delete each object in objects from database.
May be called at the end of a test if use of a context manager is impractical.
(Assume all objects have the id field as their primary key.)
"""
# Ensure we have a list of objects (check for list explicitly: a model can be iterable)
if not isinstance(objects, list):
objects = [objects]
for obj in objects:
table = obj.__table__
stmt = delete(table).where(table.c.id == obj.id)
session.execute(stmt)
def get_stored_obj(session, cls, obj_id=None, where_clause=None, unique=False):
# Either obj_id or where_clause must be provided, but not both
assert bool(obj_id) ^ (where_clause is not None)
if where_clause is None:
where_clause = _get_default_where_clause(cls, obj_id)
stmt = select(cls).where(where_clause)
result = session.execute(stmt)
# unique() is required if result contains joint eager loads against collections
# https://gerrit.sqlalchemy.org/c/sqlalchemy/sqlalchemy/+/2253
if unique:
result = result.unique()
return result.scalar_one()
def has_unique_constraint(table, fields):
for constraint in table.constraints:
if isinstance(constraint, UniqueConstraint):
col_names = {c.name for c in constraint.columns}
if set(fields) == col_names:
return True
def has_index(table, fields):
for index in table.indexes:
col_names = {c.name for c in index.columns}
if set(fields) == col_names:
return True
def collection_consists_of_objects(collection, *objects):
"""
Returns True iff list(collection) == list(objects), where object equality is determined
by primary key equality: object1.id == object2.id.
"""
if len(collection) != len(objects): # False if lengths are different
return False
if not collection: # True if both are empty
return True
# Sort, then compare each member by its 'id' attribute, which must be its primary key.
collection.sort(key=lambda item: item.id)
objects_l = list(objects)
objects_l.sort(key=lambda item: item.id)
for item1, item2 in zip(collection, objects_l):
if item1.id is None or item2.id is None or item1.id != item2.id:
return False
return True
def get_unique_value():
"""Generate unique values to accommodate unique constraints."""
return uuid4().hex
def _get_default_where_clause(cls, obj_id):
where_clause = cls.__table__.c.id == obj_id
return where_clause
| [((4284, 4291), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (4289, 4291), False, 'from uuid import uuid4\n'), ((2669, 2680), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (2675, 2680), False, 'from sqlalchemy import delete, select, UniqueConstraint\n'), ((1366, 1379), 'sqlalchemy.delete', 'delete', (['table'], {}), '(table)\n', (1372, 1379), False, 'from sqlalchemy import delete, select, UniqueConstraint\n'), ((2293, 2306), 'sqlalchemy.delete', 'delete', (['table'], {}), '(table)\n', (2299, 2306), False, 'from sqlalchemy import delete, select, UniqueConstraint\n')] |
chrisBrookes93/django-events-management | django_events/users/management/commands/create_default_su.py | 93886448a7bb85c8758324977ff67bcacc80bbec | from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
class Command(BaseCommand):
help = "Creates a default super user if one doesn't already exist. " \
"This is designed to be used in the docker-compose.yml to create an initial super user on deployment."
def handle(self, *args, **kwargs):
"""
Checks whether any super users exist and creates a default one if not
:param args: Unused
:param kwargs: Unused
"""
super_users = get_user_model().objects.filter(is_superuser=True)
if super_users.exists():
self.stdout.write('A superuser already exists, not creating one')
else:
get_user_model().objects.create_superuser(email="[email protected]", password="EventsEvents")
self.stdout.write('Created default superuser "[email protected]"')
self.stdout.write('Make sure you change the password immediately!')
| [((556, 572), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (570, 572), False, 'from django.contrib.auth import get_user_model\n'), ((750, 766), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (764, 766), False, 'from django.contrib.auth import get_user_model\n')] |
zeroxoneb/antlir | antlir/bzl/image_layer.bzl | 811d88965610d16a5c85d831d317f087797ca732 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
An `image.layer` is a set of `feature` with some additional parameters. Its
purpose to materialize those `feature`s as a btrfs subvolume in the
per-repo `buck-image/out/volume/targets`.
We call the subvolume a "layer" because it can be built on top of a snapshot
of its `parent_layer`, and thus can be represented as a btrfs send-stream for
more efficient storage & distribution.
The Buck output of an `image.layer` target is a JSON file with information
on how to find the resulting layer in the per-repo
`buck-image/out/volume/targets`. See `SubvolumeOnDisk.to_json_file`.
## Implementation notes
The implementation of this converter deliberately minimizes the amount of
business logic in its command. The converter must include **only** our
interactions with the buck target graph. Everything else should be
delegated to subcommands.
### Command
In composing the `bash` command, our core maxim is: make it a hermetic
function of the converter's inputs -- do not read data from disk, do not
insert disk paths into the command, do not do anything that might cause the
bytes of the command to vary between machines or between runs. To achieve
this, we use Buck macros to resolve all paths, including those to helper
scripts. We rely on environment variables or pipes to pass data between the
helper scripts.
Another reason to keep this converter minimal is that `buck test` cannot
make assertions about targets that fail to build. Since we only have the
ability to test the "good" targets, it behooves us to put most logic in
external scripts, so that we can unit-test its successes **and** failures
thoroughly.
### Output
We mark `image.layer` uncacheable, because there's no easy way to teach Buck
to serialize a btrfs subvolume (for that, we have `package.new`).
That said, we should still follow best practices to avoid problems if e.g.
the user renames their repo, or similar. These practices include:
- The output JSON must store no absolute paths.
- Store Buck target paths instead of paths into the output directory.
### Dependency resolution
An `image.layer` consumes a set of `feature` outputs to decide what to put into
the btrfs subvolume. These outputs are actually just JSON files that
reference other targets, and do not contain the data to be written into the
image.
Therefore, `image.layer` has to explicitly tell buck that it needs all
direct dependencies of its `feature`s to be present on disk -- see our
`attrfilter` queries below. Without this, Buck would merrily fetch the just
the `feature` JSONs from its cache, and not provide us with any of the
buid artifacts that comprise the image.
We do NOT need the direct dependencies of the parent layer's features,
because we treat the parent layer as a black box -- whatever it has laid
down in the image, that's what it provides (and we don't care about how).
The consequences of this information hiding are:
- Better Buck cache efficiency -- we don't have to download
the dependencies of the ancestor layers' features. Doing that would be
wasteful, since those bits are redundant with what's in the parent.
- Ability to use genrule image layers / apply non-pure post-processing to
a layer. In terms of engineering, both of these non-pure approaches are
a terrible idea and a maintainability headache, but they do provide a
useful bridge for transitioning to Buck image builds from legacy
imperative systems.
- The image compiler needs a litte extra code to walk the parent layer and
determine what it provides.
- We cannot have "unobservable" dependencies between features. Since
feature dependencies are expected to routinely cross layer boundaries,
feature implementations are forced only to depend on data that can be
inferred from the filesystem -- since this is all that the parent layer
implementation can do. NB: This is easy to relax in the future by
writing a manifest with additional metadata into each layer, and using
that metadata during compilation.
"""
load(":compile_image_features.bzl", "compile_image_features")
load(":image_layer_utils.bzl", "image_layer_utils")
load(":image_utils.bzl", "image_utils")
def image_layer(
name,
parent_layer = None,
features = None,
flavor = None,
flavor_config_override = None,
antlir_rule = "user-internal",
**image_layer_kwargs):
"""
Arguments
- `parent_layer`: The name of another `image_layer` target, on
top of which the current layer will install its features.
- `features`: List of `feature` target paths and/or
nameless structs from `feature.new`.
- `flavor`: Picks default build options for the layer, including
`build_appliance`, RPM installer, and others. See `flavor_helpers.bzl`
for details.
- `flavor_config_override`: A struct that can override the default
values fetched from `REPO_CFG[flavor].flavor_to_config`.
- `mount_config`: Specifies how this layer is mounted in the
`mounts` field of a `feature` of a parent layer. See
the field in `_image_layer_impl` in `image_layer_utils.bzl`
- `runtime`: A list of desired helper buck targets to be emitted.
`container` is always included in the list by default.
See the field in `_image_layer_impl` in `image_layer_utils.bzl` and the
[docs](/docs/tutorials/helper-buck-targets#imagelayer) for the list of
possible helpers, their respective behaviours, and how to invoke them.
"""
image_layer_utils.image_layer_impl(
_rule_type = "image_layer",
_layer_name = name,
# Build a new layer. It may be empty.
_make_subvol_cmd = compile_image_features(
name = name,
current_target = image_utils.current_target(name),
parent_layer = parent_layer,
features = features,
flavor = flavor,
flavor_config_override = flavor_config_override,
),
antlir_rule = antlir_rule,
**image_layer_kwargs
)
| [] |
jnthn/intellij-community | python/testData/debug/test_ignore_lib.py | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | from calendar import setfirstweekday
stopped_in_user_file = True
setfirstweekday(15) | [((65, 84), 'calendar.setfirstweekday', 'setfirstweekday', (['(15)'], {}), '(15)\n', (80, 84), False, 'from calendar import setfirstweekday\n')] |
ffreemt/promt-tr-free | promt_tr/__main__.py | ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee | ''' __main__, to run:
python -m promt_tr
'''
import sys
from random import randint
from promt_tr import promt_tr, LANG_CODES
# pragma: no cover
def main():
'''main'''
from_lang = 'auto'
to_lang = 'zh'
text = 'test ' + str(randint(0, 10000))
if not sys.argv[1:]:
print('Provide some English text, with an optional to_lang')
print('E.g., python -m promt_tr test this and that de')
print('Testing with some random text\n')
else:
argv = sys.argv[1:]
len_ = len(argv)
if len_ == 1:
if argv[0] in LANG_CODES:
to_lang = argv[0]
else:
text = argv[0]
elif argv[-1] in LANG_CODES:
to_lang = argv[-1]
text = ' '.join(argv[:-1])
else:
text = ' '.join(argv)
for to_lang in ['zh', 'de', 'fr', 'it', 'es']:
resu = promt_tr(text, from_lang, to_lang)
print(f'[{text}] translated to [{to_lang}]: [{resu}]')
if __name__ == '__main__':
main()
| [((898, 932), 'promt_tr.promt_tr', 'promt_tr', (['text', 'from_lang', 'to_lang'], {}), '(text, from_lang, to_lang)\n', (906, 932), False, 'from promt_tr import promt_tr, LANG_CODES\n'), ((242, 259), 'random.randint', 'randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (249, 259), False, 'from random import randint\n')] |
askoki/nfl_dpi_prediction | src/features/v3/proc_v3_n1_calc_distance.py | dc3256f24ddc0b6725eace2081d1fb1a7e5ce805 | import os
import sys
import pandas as pd
from datetime import datetime
from settings import RAW_DATA_DIR, DataV3, DATA_V3_SUBVERSION
from src.features.helpers.processing import add_missing_timestamp_values
from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, \
normalize_according_to_play_direction, check_group_event
from src.features.helpers.processing_v4 import home_has_possession, calculate_team_sitation
week_num = int(sys.argv[1])
data_v3 = DataV3(DATA_V3_SUBVERSION)
save_file_path = data_v3.get_step1_checkpoint_path(week_num)
try:
clean_df = pd.read_csv(save_file_path)
save_file_exists = True
except FileNotFoundError:
save_file_exists = False
if not save_file_exists:
print("Started loading data")
play_df = pd.read_csv(os.path.join(RAW_DATA_DIR, 'plays.csv'))
games_df = pd.read_csv(os.path.join(RAW_DATA_DIR, 'games.csv'))
week_and_games = games_df[games_df.week == week_num]
tracking_df = pd.read_csv(os.path.join(RAW_DATA_DIR, f'week{week_num}.csv'))
print("Data loaded. Start processing timestamps")
tracking_df = add_missing_timestamp_values(tracking_df)
games_n_plays_df = play_df.merge(week_and_games, how='inner', on='gameId')
m_grouped = games_n_plays_df.groupby(['gameId', 'playId'])
df_t = tracking_df.merge(games_n_plays_df, how='left', on=['gameId', 'playId'])
# Remove all events without 'pass_forward'
df_t_grouped = df_t.groupby(['gameId', 'playId'])
df_t_v3 = df_t.copy().sort_index()
for name, group in df_t_grouped:
game_id, play_id = name
# if group does not contain pass forward, drop it
if all(group.event != 'pass_forward'):
df_t_v3 = df_t_v3[(df_t_v3.gameId != game_id) | (df_t_v3.playId != play_id)]
df_t_v3_s = df_t_v3.sort_values(by=['gameId', 'playId', 'time', 'event'])
df_t_v3_s = df_t_v3_s.reset_index(drop=True)
df_t_grouped = df_t_v3_s.groupby(['gameId', 'playId'])
# remove all values before 'pass_forward'
print("Removing all values before pass forward event...")
for name, group in df_t_grouped:
game_id, play_id = name
pass_forward_frame_id = group[group.event == 'pass_forward'].index.min() - 1
remove_start = group.index.min()
df_t_v3_s = df_t_v3_s.drop(df_t_v3_s.loc[remove_start:pass_forward_frame_id].index)
pd.options.mode.chained_assignment = None
gb = df_t_v3_s.groupby(['gameId', 'playId'])
print('Getting closest players...')
keep_indices = []
for name, group in gb:
game_id, play_id = name
try:
event_3rd = group.event.unique()[2]
except IndexError:
print('Number of events is < 3, skipping...')
continue
situation_df = group[group.event == event_3rd]
# convert dataframe into series
ball_row = situation_df[situation_df.team == 'football'].head(1)
# remove ball
player_situation_df = situation_df[situation_df.team != 'football']
try:
p1, p2 = get_closest_players(player_situation_df, ball_row.x.item(), ball_row.y.item())
except ValueError:
print('Value Error raised. This group will be skipped.')
continue
p_n_b_indices = get_players_and_ball_indices(group, p1, p2)
if p_n_b_indices:
keep_indices.extend(p_n_b_indices)
clean_df = df_t_v3_s[df_t_v3_s.index.isin(keep_indices)]
clean_df.to_csv(
save_file_path,
index=False
)
print('Normalize...')
clean_df = normalize_according_to_play_direction(clean_df)
clean_df['homeHasPossession'] = clean_df.apply(
lambda row: home_has_possession(row), axis=1
)
clean_df['teamSituation'] = clean_df.apply(
lambda row: calculate_team_sitation(row), axis=1
)
print('Creating features...')
min_df = clean_df[[
'time', 'x', 'y', 's', 'o', 'dir', 'event', 'team',
'gameId', 'playId', 'frameId', 'isDefensivePI'
]]
gb_2 = clean_df.groupby(['gameId', 'playId', 'frameId'])
# ball direction and orientation are NaN
calc_df = pd.DataFrame(
columns=[
'time',
'att_def_d', 'att_ball_d', 'def_ball_d',
'att_s', 'def_s', 'ball_s',
'att_o', 'def_o',
'att_dir', 'def_dir',
'event', 'gameId', 'playId', 'frameId', 'isDefensivePI'
]
)
GROUP_SIZE_MINIMUM = 3
for name, group in gb_2:
game_id, play_id, frameId = name
if len(group) < GROUP_SIZE_MINIMUM:
continue
ball = group[group.teamSituation == 'football'].head(1).squeeze()
p_att = group[group.teamSituation == 'attacking'].head(1).squeeze()
p_def = group[group.teamSituation == 'defending'].head(1).squeeze()
group_row = group.head(1).squeeze()
group_events = group.event.unique().tolist()
dict_to_append = {
'time': group_row.time,
'att_def_d': calculate_distance(p_att.x, p_att.y, p_def.x, p_def.y),
'att_ball_d': calculate_distance(p_att.x, p_att.y, ball.x, ball.y),
'def_ball_d': calculate_distance(p_def.x, p_def.y, ball.x, ball.y),
'att_s': p_att.s, 'def_s': p_def.s, 'ball_s': ball.s,
'att_a': p_att.a, 'def_a': p_def.a, 'ball_a': ball.a,
'att_o': p_att.o, 'def_o': p_def.o,
'att_dir': p_att.dir, 'def_dir': p_def.dir,
'event': group_row.event,
'pass_arrived': check_group_event(group_events, 'pass_arrived'),
'pass_outcome_caught': check_group_event(group_events, 'pass_outcome_caught'),
'tackle': check_group_event(group_events, 'tackle'),
'first_contact': check_group_event(group_events, 'first_contact'),
'pass_outcome_incomplete': check_group_event(group_events, 'pass_outcome_incomplete'),
'out_of_bounds': check_group_event(group_events, 'out_of_bounds'),
'week': week_num,
'gameId': group_row.gameId,
'playId': group_row.playId,
'frameId': group_row.frameId,
'isDefensivePI': group_row.isDefensivePI
}
calc_df = calc_df.append(
dict_to_append,
ignore_index=True
)
print("Saving data...")
calc_df.to_csv(
data_v3.get_step1_end_path(week_num),
index=False
)
print(f'End time: {datetime.now().strftime("%H:%M:%S")}')
| [((519, 545), 'settings.DataV3', 'DataV3', (['DATA_V3_SUBVERSION'], {}), '(DATA_V3_SUBVERSION)\n', (525, 545), False, 'from settings import RAW_DATA_DIR, DataV3, DATA_V3_SUBVERSION\n'), ((3598, 3645), 'src.features.helpers.processing_v3.normalize_according_to_play_direction', 'normalize_according_to_play_direction', (['clean_df'], {}), '(clean_df)\n', (3635, 3645), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((4116, 4320), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['time', 'att_def_d', 'att_ball_d', 'def_ball_d', 'att_s', 'def_s',\n 'ball_s', 'att_o', 'def_o', 'att_dir', 'def_dir', 'event', 'gameId',\n 'playId', 'frameId', 'isDefensivePI']"}), "(columns=['time', 'att_def_d', 'att_ball_d', 'def_ball_d',\n 'att_s', 'def_s', 'ball_s', 'att_o', 'def_o', 'att_dir', 'def_dir',\n 'event', 'gameId', 'playId', 'frameId', 'isDefensivePI'])\n", (4128, 4320), True, 'import pandas as pd\n'), ((627, 654), 'pandas.read_csv', 'pd.read_csv', (['save_file_path'], {}), '(save_file_path)\n', (638, 654), True, 'import pandas as pd\n'), ((1145, 1186), 'src.features.helpers.processing.add_missing_timestamp_values', 'add_missing_timestamp_values', (['tracking_df'], {}), '(tracking_df)\n', (1173, 1186), False, 'from src.features.helpers.processing import add_missing_timestamp_values\n'), ((824, 863), 'os.path.join', 'os.path.join', (['RAW_DATA_DIR', '"""plays.csv"""'], {}), "(RAW_DATA_DIR, 'plays.csv')\n", (836, 863), False, 'import os\n'), ((892, 931), 'os.path.join', 'os.path.join', (['RAW_DATA_DIR', '"""games.csv"""'], {}), "(RAW_DATA_DIR, 'games.csv')\n", (904, 931), False, 'import os\n'), ((1021, 1070), 'os.path.join', 'os.path.join', (['RAW_DATA_DIR', 'f"""week{week_num}.csv"""'], {}), "(RAW_DATA_DIR, f'week{week_num}.csv')\n", (1033, 1070), False, 'import os\n'), ((3313, 3356), 'src.features.helpers.processing_v3.get_players_and_ball_indices', 'get_players_and_ball_indices', (['group', 'p1', 'p2'], {}), '(group, p1, p2)\n', (3341, 3356), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((3711, 3735), 'src.features.helpers.processing_v4.home_has_possession', 'home_has_possession', (['row'], {}), '(row)\n', (3730, 3735), False, 'from src.features.helpers.processing_v4 import home_has_possession, calculate_team_sitation\n'), ((3807, 3835), 'src.features.helpers.processing_v4.calculate_team_sitation', 'calculate_team_sitation', (['row'], {}), '(row)\n', (3830, 3835), False, 'from src.features.helpers.processing_v4 import home_has_possession, calculate_team_sitation\n'), ((4899, 4953), 'src.features.helpers.processing_v3.calculate_distance', 'calculate_distance', (['p_att.x', 'p_att.y', 'p_def.x', 'p_def.y'], {}), '(p_att.x, p_att.y, p_def.x, p_def.y)\n', (4917, 4953), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((4977, 5029), 'src.features.helpers.processing_v3.calculate_distance', 'calculate_distance', (['p_att.x', 'p_att.y', 'ball.x', 'ball.y'], {}), '(p_att.x, p_att.y, ball.x, ball.y)\n', (4995, 5029), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5053, 5105), 'src.features.helpers.processing_v3.calculate_distance', 'calculate_distance', (['p_def.x', 'p_def.y', 'ball.x', 'ball.y'], {}), '(p_def.x, p_def.y, ball.x, ball.y)\n', (5071, 5105), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5385, 5432), 'src.features.helpers.processing_v3.check_group_event', 'check_group_event', (['group_events', '"""pass_arrived"""'], {}), "(group_events, 'pass_arrived')\n", (5402, 5432), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5465, 5519), 'src.features.helpers.processing_v3.check_group_event', 'check_group_event', (['group_events', '"""pass_outcome_caught"""'], {}), "(group_events, 'pass_outcome_caught')\n", (5482, 5519), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5539, 5580), 'src.features.helpers.processing_v3.check_group_event', 'check_group_event', (['group_events', '"""tackle"""'], {}), "(group_events, 'tackle')\n", (5556, 5580), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5607, 5655), 'src.features.helpers.processing_v3.check_group_event', 'check_group_event', (['group_events', '"""first_contact"""'], {}), "(group_events, 'first_contact')\n", (5624, 5655), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5692, 5750), 'src.features.helpers.processing_v3.check_group_event', 'check_group_event', (['group_events', '"""pass_outcome_incomplete"""'], {}), "(group_events, 'pass_outcome_incomplete')\n", (5709, 5750), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5777, 5825), 'src.features.helpers.processing_v3.check_group_event', 'check_group_event', (['group_events', '"""out_of_bounds"""'], {}), "(group_events, 'out_of_bounds')\n", (5794, 5825), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((6226, 6240), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6238, 6240), False, 'from datetime import datetime\n')] |
Rajpratik71/devel-scripts | annotate-preprocessed.py | 068285719a13b02889b1314361cc5bdb764d9a3a | #!/usr/bin/python
"""Annotates -E preprocessed source input with line numbers.
Read std input, then annotate each line with line number based on previous
expanded line directives from -E output. Useful in the context of compiler
debugging.
"""
import getopt
import os
import re
import sys
import script_utils as u
flag_reverse = True
def usage(msgarg):
"""Print usage and exit."""
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print """\
usage: %s [options] < input > output
options:
-d increase debug msg verbosity level
""" % os.path.basename(sys.argv[0])
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_reverse
try:
optlist, _ = getopt.getopt(sys.argv[1:], "dr")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
for opt, _ in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-r":
flag_reverse = False
# Setup
u.setdeflanglocale()
parse_args()
# Read
lines = sys.stdin.readlines()
lnum = -1
matcher = re.compile(r"^\#\s+(\d+)\s+\"(\S+)\".*$")
for line in lines:
m = matcher.match(line)
if m:
lnum = int(m.group(1))
afile = m.group(2)
print "<%s:%d>" % (afile, lnum)
continue
print "%d:%s" % (lnum, line.strip())
lnum += 1
| [] |
tekeburak/dam-occupancy-model | pages/lstm.py | f39d436bf27088068177245f0180cafaa56ad123 | import streamlit as st
import tensorflow as tf
import numpy
from utils.get_owm_data import get_open_weather_map_data
from utils.get_date import get_date_list_for_gmt
import plotly.graph_objects as go
from plotly import tools
import plotly.offline as py
import plotly.express as px
def app():
st.title("LSTM Model")
st.subheader('What does LSTM model do?')
st.markdown("""<p style='text-align: justify;'>LSTM networks are an extension of recurrent neural networks (RNNs) mainly introduced to handle situations where RNNs fail. It has been so designed that thevanishing gradient problem is almost completely removed, while the training model is left unaltered. Long-time lags in certain problems are bridged using LSTMs where they also handle noise, distributed representations, and continuous values.</p>""", unsafe_allow_html=True)
st.subheader('Why we chose LSTM?')
st.markdown("""<p style='text-align: justify;'>LSTM is well-suited to classify, process and predict time series given time lags of unknown duration. Relative insensitivity to gap length gives an advantage to LSTM over alternative RNNs, hidden Markov models and other sequence learningmethods. In addition, LSTM works great because LSTM cells have a memory that can store previous timestep information and this is how it learns.</p>""", unsafe_allow_html=True)
st.subheader('LSTM model input and output')
st.markdown("Model input is 7 days daily weather data from [OpenWeatherAPI](https://openweathermap.org/api). Model input features are *Rain*, *MaxTemp*, *MinTemp*, *AvgWind*, *AvgHumidity* and *AvgPressure*. Model predicts 7 days dam occupancy rate of İstanbul using these features.", unsafe_allow_html=True)
LSTM_model_name = 'models/LSTM_model.h5'
model_lstm = tf.keras.models.load_model(LSTM_model_name)
features = get_open_weather_map_data()
prediction_lstm = model_lstm.predict(features) * 100
prediction_lstm = prediction_lstm.ravel()
date_list = get_date_list_for_gmt()
data = []
layout = go.Layout(
title= "<b>LSTM Dam Occupancy Forecasting Plot</b>",paper_bgcolor = 'rgb(248, 248, 255)',plot_bgcolor = 'rgb(248, 248, 255)',barmode = "stack",
xaxis = dict(title="Time", linecolor="#BCCCDC",showspikes=True,spikethickness=2,spikedash="dot",spikecolor= "#ffffff",spikemode="across",),
yaxis= dict(title="Dam Occupancy Rate (%)",linecolor="#021C1E"))
line_chart= go.Scatter(x=date_list, y=prediction_lstm, marker_color='rgb(0, 200, 200)' )
data.append(line_chart)
fig= go.Figure(data=data, layout=layout)
st.plotly_chart(fig)
| [((296, 318), 'streamlit.title', 'st.title', (['"""LSTM Model"""'], {}), "('LSTM Model')\n", (304, 318), True, 'import streamlit as st\n'), ((322, 362), 'streamlit.subheader', 'st.subheader', (['"""What does LSTM model do?"""'], {}), "('What does LSTM model do?')\n", (334, 362), True, 'import streamlit as st\n'), ((364, 845), 'streamlit.markdown', 'st.markdown', (['"""<p style=\'text-align: justify;\'>LSTM networks are an extension of recurrent neural networks (RNNs) mainly introduced to handle situations where RNNs fail. It has been so designed that thevanishing gradient problem is almost completely removed, while the training model is left unaltered. Long-time lags in certain problems are bridged using LSTMs where they also handle noise, distributed representations, and continuous values.</p>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<p style=\'text-align: justify;\'>LSTM networks are an extension of recurrent neural networks (RNNs) mainly introduced to handle situations where RNNs fail. It has been so designed that thevanishing gradient problem is almost completely removed, while the training model is left unaltered. Long-time lags in certain problems are bridged using LSTMs where they also handle noise, distributed representations, and continuous values.</p>"\n , unsafe_allow_html=True)\n', (375, 845), True, 'import streamlit as st\n'), ((843, 877), 'streamlit.subheader', 'st.subheader', (['"""Why we chose LSTM?"""'], {}), "('Why we chose LSTM?')\n", (855, 877), True, 'import streamlit as st\n'), ((879, 1344), 'streamlit.markdown', 'st.markdown', (['"""<p style=\'text-align: justify;\'>LSTM is well-suited to classify, process and predict time series given time lags of unknown duration. Relative insensitivity to gap length gives an advantage to LSTM over alternative RNNs, hidden Markov models and other sequence learningmethods. In addition, LSTM works great because LSTM cells have a memory that can store previous timestep information and this is how it learns.</p>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<p style=\'text-align: justify;\'>LSTM is well-suited to classify, process and predict time series given time lags of unknown duration. Relative insensitivity to gap length gives an advantage to LSTM over alternative RNNs, hidden Markov models and other sequence learningmethods. In addition, LSTM works great because LSTM cells have a memory that can store previous timestep information and this is how it learns.</p>"\n , unsafe_allow_html=True)\n', (890, 1344), True, 'import streamlit as st\n'), ((1343, 1386), 'streamlit.subheader', 'st.subheader', (['"""LSTM model input and output"""'], {}), "('LSTM model input and output')\n", (1355, 1386), True, 'import streamlit as st\n'), ((1388, 1706), 'streamlit.markdown', 'st.markdown', (['"""Model input is 7 days daily weather data from [OpenWeatherAPI](https://openweathermap.org/api). Model input features are *Rain*, *MaxTemp*, *MinTemp*, *AvgWind*, *AvgHumidity* and *AvgPressure*. Model predicts 7 days dam occupancy rate of İstanbul using these features."""'], {'unsafe_allow_html': '(True)'}), "(\n 'Model input is 7 days daily weather data from [OpenWeatherAPI](https://openweathermap.org/api). Model input features are *Rain*, *MaxTemp*, *MinTemp*, *AvgWind*, *AvgHumidity* and *AvgPressure*. Model predicts 7 days dam occupancy rate of İstanbul using these features.'\n , unsafe_allow_html=True)\n", (1399, 1706), True, 'import streamlit as st\n'), ((1757, 1800), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['LSTM_model_name'], {}), '(LSTM_model_name)\n', (1783, 1800), True, 'import tensorflow as tf\n'), ((1814, 1841), 'utils.get_owm_data.get_open_weather_map_data', 'get_open_weather_map_data', ([], {}), '()\n', (1839, 1841), False, 'from utils.get_owm_data import get_open_weather_map_data\n'), ((1954, 1977), 'utils.get_date.get_date_list_for_gmt', 'get_date_list_for_gmt', ([], {}), '()\n', (1975, 1977), False, 'from utils.get_date import get_date_list_for_gmt\n'), ((2381, 2456), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'date_list', 'y': 'prediction_lstm', 'marker_color': '"""rgb(0, 200, 200)"""'}), "(x=date_list, y=prediction_lstm, marker_color='rgb(0, 200, 200)')\n", (2391, 2456), True, 'import plotly.graph_objects as go\n'), ((2489, 2524), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'layout'}), '(data=data, layout=layout)\n', (2498, 2524), True, 'import plotly.graph_objects as go\n'), ((2526, 2546), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {}), '(fig)\n', (2541, 2546), True, 'import streamlit as st\n')] |
kamidev/autobuild_fst | fst_web/demo_settings.py | 6baffa955075ffe3c5f197789e9fd065fa74058e | # -*- coding: utf-8 -*-
import os
ROOT = os.path.abspath(os.path.dirname(__file__))
path = lambda *args: os.path.join(ROOT, *args)
""" Template for local settings of the FST webservice (fst_web)
Please edit this file and replace all generic values with values suitable to
your particular installation.
"""
# NOTE! Always set this to False before deploying
DEBUG = True
# NOTE! Before deploying on a public, uncomment ALLOWED_HOSTS
# and add IP address and/or domain of your site
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'fst.magokoro.nu']
# Look for instance-specific settings
try:
from .instance_settings import *
except ImportError:
from .default_instance_settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path('database/fst_demo.db')
}
}
LOG_LEVEL = "DEBUG"
# Enable this to override global DB Debug setting
# DB_DEBUG_LEVEL = "DEBUG"
# Setup mail server for sending email notifications.
# You can use any mail server you want.
# But a very simple way to get started is to use a gmail account.
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
# EMAIL_HOST_USER = 'your email'
# EMAIL_HOST_PASSWORD = 'your password'
# Admins specified here receive email notifications on critical errors.
ADMINS = ()
MANAGERS = ADMINS
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = os.path.join("/dokument/")
# Site and port for hosting FST service (do not add ending '/').
FST_SITE_URL = "http://127.0.0.1:8000"
# TODO - Check if FST_INSTANCE_PREFIX can be removed
# Site and port of specific FST instance (do not add ending '/').
FST_INSTANCE_URL = os.path.join(
"http://127.0.0.1:8000",
FST_INSTANCE_PREFIX)
| [((1508, 1534), 'os.path.join', 'os.path.join', (['"""/dokument/"""'], {}), "('/dokument/')\n", (1520, 1534), False, 'import os\n'), ((1779, 1837), 'os.path.join', 'os.path.join', (['"""http://127.0.0.1:8000"""', 'FST_INSTANCE_PREFIX'], {}), "('http://127.0.0.1:8000', FST_INSTANCE_PREFIX)\n", (1791, 1837), False, 'import os\n'), ((58, 83), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (73, 83), False, 'import os\n'), ((106, 131), 'os.path.join', 'os.path.join', (['ROOT', '*args'], {}), '(ROOT, *args)\n', (118, 131), False, 'import os\n')] |
joaocamargo/estudos-python | BookingScraper-joao_v2/BookingScraper/airbnb.py | c5fbf59a1f06131d9789dca7dbdfdcf2200d0227 | #! /usr/bin/env python3.6
import argparse
import argcomplete
from argcomplete.completers import ChoicesCompleter
from argcomplete.completers import EnvironCompleter
import requests
from bthread import BookingThread
from bs4 import BeautifulSoup
from file_writer import FileWriter
hotels = []
def get_countries():
with open("europa2020.txt", "r") as f:
countries = f.read().splitlines()
return countries
def get_booking_page(session, offset, rooms, country, dest_id, DayIni, DayFim):
print('get_booking_page(session, offset, rooms, country, dest_id, DayIni, DayFim):')
print(session, offset, rooms, country, dest_id, DayIni, DayFim)
diaInicial = str(int(DayIni[0:2]))
mesInicial = str(int(DayIni[3:5]))
anoInicial = str(int(DayIni[6:10]))
diaFinal = str(int(DayFim[0:2]))
mesFinal = str(int(DayFim[3:5]))
anoFinal = str(int(DayFim[6:10]))
'''
Make request to airbnb page and parse html
:param offset:
:return: html page
'''
url = 'https://www.airbnb.com.br/s/Londres/'\
'homes?refinement_paths%5B%5D=%2Fhomes¤t_tab_id=home_tab&selected_tab_id=home_tab&source=mc_search_bar&search_type=unknown'\
'&click_referer=t%3ASEE_ALL%7Csid%3A874f16ee-6196-4289-9717-17dec73e1e5c%7Cst%3AMAGAZINE_HOMES&screen_size=large&hide_dates_and_guests_filters=false'\
'&ne_lat=51.80546533345978&ne_lng=0.4969575708007312&sw_lat=51.17528882051496&sw_lng=-0.8200285131836154&zoom=10&search_by_map=false&checkin={anoInicial}-{mesInicial}-{diaInicial}'\
'&checkout={anoFinal}-{mesFinal}-{diaFinal}&adults={rooms}&property_type_id%5B%5D=1&property_type_id%5B%5D=43&property_type_id%5B%5D=47'\
'&place_id=ChIJdd4hrwug2EcRmSrV3Vo6llI&room_types%5B%5D=Entire%20home%2Fapt'\
'§ion_offset=6&items_offset=18'.format(rooms=rooms, country=country.replace(' ', '+'),anoFinal=anoFinal,mesFinal=mesFinal,diaInicial=diaInicial,mesInicial=mesInicial,anoInicial=anoInicial,diaFinal=diaFinal,dest_id=dest_id) + str(offset)
r = requests.get(url, headers=
{'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0)'
' Gecko/20100101 Firefox/48.0'})
html = r.content
print(url)
parsed_html = BeautifulSoup(html, 'lxml')
return parsed_html
def process_hotels(session, offset, rooms, country, dest_id, DayIni, DayFim):
parsed_html = get_booking_page(session, offset, rooms, country, dest_id,DayIni, DayFim)
hotel = parsed_html.find_all('div', {'class': 'sr_item'})
for ho in hotel:
#print("ho.find('a', {'class': 'jq_tooltip'})")
#print(ho.find('a', {'class': 'jq_tooltip'}))
#name = ho.find('a', {'class': 'jq_tooltip'})['data-title']
print("ho.find('span', {'class': 'sr-hotel__name'})")
#print(ho.find('span', {'class': 'sr-hotel__name'}))
if ho.find('span', {'class': 'sr-hotel__name'}) is not None:
name = str(ho.find('span', {'class': 'sr-hotel__name'}).text.encode('utf-8')).replace('\\n','').replace("b","").replace("'","").replace('\\','')
else:
name = '-1'
if ho.find('div', {'class': 'bui-price-display__value prco-inline-block-maker-helper'}) is not None:
price = ho.find('div', {'class': 'bui-price-display__value prco-inline-block-maker-helper'}).text.replace('\n','').replace("b","").replace("'","")
else:
price = '-1'
if ho.find('span', {'class': '_ky9opu0'}) is not None:
nota = str(ho.find('span', {'class': '_ky9opu0'}).text.replace('\n','').replace("b","").replace("'",""))
else :
nota = '-1'
if ho.find('span', {'title': 'This is the straight-line distance on the map. Actual travel distance may vary.'}) is not None:
distance = str(ho.find('span', {'title': 'This is the straight-line distance on the map. Actual travel distance may vary.'}).text.encode('utf-8')).replace('\\n','').replace("b","").replace("'","").replace('\\','')
else :
distance = '-1'
# if ho.find('a', {'class': 'bui-link'}) is not None :
# result = [str(item) for item in ho.find_all('span', attrs={'data-bui-component' : 'Tooltip'})]
# print('TAMANHO TOOLTIP', str(len(result)))
# for i in result:
# print(i)
# for i in result:
# if i in 'km':
# distance = str(i)
# else:
# distance = '----'
# else:
# distance = '----'
# if len(result) ==1:
# if result[0] in 'km':
# distance = result
# else:
# distance = 'aaaaa' + str(len(result))
# else:
# distance = '---'
hotels.append(DayIni+';'+DayFim+';'+name + ';' + price + ';' + nota + ';' + distance)
#hotels.append(str(len(hotels) + 1) + ' : ' + name + ' : ' + price)
def prep_data(rooms=1, country='Macedonia', dest_id='-1', DayIni='01/01/2019', DayFim='02/01/2019', out_format=None):
'''
Prepare data for saving
:return: hotels: set()
'''
offset = 1
session = requests.Session()
parsed_html = get_booking_page(session, offset, rooms, country, dest_id, DayIni,DayFim)
all_offset = parsed_html.find_all('li', {'class':
'sr_pagination_item'})[-1].get_text().splitlines()[-1]
threads = []
for i in range(int(all_offset)):
offset += 1
t = BookingThread(session, offset, rooms, country,dest_id,DayIni, DayFim, process_hotels)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
hotels2 = hotels
return hotels2
def get_data(rooms=1, country='Macedonia', dest_id='-1',DayIni='01/01/2019',DayFim='02/01/2019', out_format=None):
'''
Get all accomodations in Macedonia and save them in file
:return: hotels-in-macedonia.{txt/csv/xlsx} file
'''
print('Procurando por',country)
hotels_list = prep_data(rooms, country,dest_id, DayIni, DayFim, out_format)
save_data(hotels_list , out_format=out_format, country=country)
def save_data(data, out_format, country):
'''
Saves hotels list in file
:param data: hotels list
:param out_format: json, csv or excel
:return:
'''
writer = FileWriter(data, out_format, country)
file = writer.output_file()
print('All accommodations are saved.')
print('You can find them in', file, 'file')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
countries = get_countries()
parser.add_argument("--rooms",
help='Add the number of rooms to the booking request.',
default=1,
type=int,
nargs='?')
parser.add_argument("--country",
help='Add the country to the booking request.',
default='Macedonia',
nargs='?').completer = ChoicesCompleter(countries)
parser.add_argument("--dest_id",
help='Add the country to the booking request.',
default='0',
nargs='?')
parser.add_argument("--DayIni",
help='Data inicial',
default='01/01/2019',
nargs='?')
parser.add_argument("--DayFim",
help='Data inicial',
default='02/01/2019',
nargs='?')
parser.add_argument("--out_format",
help='Add the format for the output file. Add excel, json or csv.',
default='json',
choices=['json', 'excel', 'csv'],
nargs='?').completer = EnvironCompleter
argcomplete.autocomplete(parser)
args = parser.parse_args()
localidades = [{
'Pais': 'London',
'dest_id': '-2601889'
}, {
'Pais': 'Utrecht',
'dest_id': '-2154382'
}, {
'Pais': 'Buzios',
'dest_id': '-626254'
}, {
'Pais': '',
'dest_id': ''
}]
countryAux = [d['Pais'] for d in localidades if args.dest_id in d['dest_id']]
if len(countryAux)>0:
country = countryAux[0]
print('Parametros')
print(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format)
get_data(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format)
else:
country = 'Nao Identificado'
locais = [d['Pais'] + ':' + d['dest_id'] for d in localidades if d['Pais'] != '']
print('----------')
print('Utilize uma das seguintes localizações')
for i in locais:
print(i)
print('----------')
| [((2042, 2173), 'requests.get', 'requests.get', (['url'], {'headers': "{'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/48.0'\n }"}), "(url, headers={'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/48.0'\n })\n", (2054, 2173), False, 'import requests\n'), ((2251, 2278), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (2264, 2278), False, 'from bs4 import BeautifulSoup\n'), ((5198, 5216), 'requests.Session', 'requests.Session', ([], {}), '()\n', (5214, 5216), False, 'import requests\n'), ((6392, 6429), 'file_writer.FileWriter', 'FileWriter', (['data', 'out_format', 'country'], {}), '(data, out_format, country)\n', (6402, 6429), False, 'from file_writer import FileWriter\n'), ((6595, 6620), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6618, 6620), False, 'import argparse\n'), ((7076, 7103), 'argcomplete.completers.ChoicesCompleter', 'ChoicesCompleter', (['countries'], {}), '(countries)\n', (7092, 7103), False, 'from argcomplete.completers import ChoicesCompleter\n'), ((7908, 7940), 'argcomplete.autocomplete', 'argcomplete.autocomplete', (['parser'], {}), '(parser)\n', (7932, 7940), False, 'import argcomplete\n'), ((5543, 5634), 'bthread.BookingThread', 'BookingThread', (['session', 'offset', 'rooms', 'country', 'dest_id', 'DayIni', 'DayFim', 'process_hotels'], {}), '(session, offset, rooms, country, dest_id, DayIni, DayFim,\n process_hotels)\n', (5556, 5634), False, 'from bthread import BookingThread\n')] |
valurhrafn/chromium-sync | main.py | df5e3299d179fc47ff34d1a95409383f46aac4d4 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.api import users
import webapp2
# For datastore
import cgi
import urllib
from google.appengine.ext import ndb
class UserId(ndb.Model):
content = ndb.StringProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def query_user(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).order(-cls.date)
# ************** MainHandler ************* #
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Hello world!')
# ************** GetUser ************* #
class GetUser(webapp2.RequestHandler):
def get(self):
self.response.out.write('<html><body>')
client_id = self.request.get('client_id')
ancestor_key = ndb.Key("ID", client_id or "*no_id*")
userids = UserId.query_user(ancestor_key).fetch(20)
self.response.out.write('her er eitthvad')
for userid in userids:
self.response.out.write('<blockquote>%s</blockquote>' %
cgi.escape(userid.content))
# Checks for active Google account session
# user = users.get_current_user()
# if user:
# self.response.headers['Content-Type'] = 'text/plain'
# self.response.write('Hello, ' + user.nickname())
# else:
# self.redirect(users.create_login_url(self.request.uri))
self.response.out.write('</body></html>')
def post(self):
pass
# ************** HasData ************* #
class HasData(webapp2.RequestHandler):
def get(self):
pass
#TODO does user have data
class PostData(webapp2.RequestHandler):
def post(self):
client_id = self.request.get('client_id')
chrome_user = UserId(parent=ndb.Key("ID", client_id or "*no_id*"),
content = self.request.get('client_id'))
chrome_user.put()
#TODO recieve data from client
class GetSyncData(object):
"""docstring for GetSyncData"""
def __init__(self, arg):
super(GetSyncData, self).__init__()
self.arg = arg
#implement get data for user
# property user.email() or user.user_id()
app = webapp2.WSGIApplication([
('/', MainHandler),
('/GetUser/', GetUser),
('/HasData/', HasData),
('/chrome-sync/command/', PostData),
('/GetSyncData/', GetSyncData)
], debug=True)
| [((2721, 2905), 'webapp2.WSGIApplication', 'webapp2.WSGIApplication', (["[('/', MainHandler), ('/GetUser/', GetUser), ('/HasData/', HasData), (\n '/chrome-sync/command/', PostData), ('/GetSyncData/', GetSyncData)]"], {'debug': '(True)'}), "([('/', MainHandler), ('/GetUser/', GetUser), (\n '/HasData/', HasData), ('/chrome-sync/command/', PostData), (\n '/GetSyncData/', GetSyncData)], debug=True)\n", (2744, 2905), False, 'import webapp2\n'), ((773, 793), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (791, 793), False, 'from google.appengine.ext import ndb\n'), ((803, 842), 'google.appengine.ext.ndb.DateTimeProperty', 'ndb.DateTimeProperty', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (823, 842), False, 'from google.appengine.ext import ndb\n'), ((1332, 1369), 'google.appengine.ext.ndb.Key', 'ndb.Key', (['"""ID"""', "(client_id or '*no_id*')"], {}), "('ID', client_id or '*no_id*')\n", (1339, 1369), False, 'from google.appengine.ext import ndb\n'), ((2324, 2361), 'google.appengine.ext.ndb.Key', 'ndb.Key', (['"""ID"""', "(client_id or '*no_id*')"], {}), "('ID', client_id or '*no_id*')\n", (2331, 2361), False, 'from google.appengine.ext import ndb\n'), ((1610, 1636), 'cgi.escape', 'cgi.escape', (['userid.content'], {}), '(userid.content)\n', (1620, 1636), False, 'import cgi\n')] |
dneise/Comet | comet/service/subscriber.py | abaa0da65d69f90a5262d81416477b4e71deb2ad | # Comet VOEvent Broker.
from twisted.application.internet import ClientService
from comet.protocol.subscriber import VOEventSubscriberFactory
__all__ = ["makeSubscriberService"]
def makeSubscriberService(endpoint, local_ivo, validators, handlers, filters):
"""Create a reconnecting VOEvent subscriber service.
Parameters
----------
endpoint : implements `twisted.internet.interfaces.IStreamClientEndpoint`
The endpoint to which the service will connect.
local_ivo : `str` or `None`
IVOA identifier for the subscriber.
validators : `list` of implementers of `~comet.icomet.IValidator`.
Validators which will be applied to incoming events. Events which fail
validation will be rejected.
handlers : `list` of implementers of `~comet.icomet.IHandler`.
Handlers to which events which pass validation will be passed.
filters : `list` of `str`
XPath filters. Will be passed to upstream as a request to filter the
alerts being sent.
Notes
-----
Upstream brokes may not provide support for XPath filtering; in this case,
the filters suppplied will be ignored.
Reconnection is handled according to the default policies of
`twisted.application.internet.ClientService`.
"""
factory = VOEventSubscriberFactory(local_ivo, validators, handlers, filters)
service = ClientService(endpoint, factory)
return service
| [((1300, 1366), 'comet.protocol.subscriber.VOEventSubscriberFactory', 'VOEventSubscriberFactory', (['local_ivo', 'validators', 'handlers', 'filters'], {}), '(local_ivo, validators, handlers, filters)\n', (1324, 1366), False, 'from comet.protocol.subscriber import VOEventSubscriberFactory\n'), ((1381, 1413), 'twisted.application.internet.ClientService', 'ClientService', (['endpoint', 'factory'], {}), '(endpoint, factory)\n', (1394, 1413), False, 'from twisted.application.internet import ClientService\n')] |
bopopescu/build | scripts/master/cros_try_job_git.py | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import os
import re
import shutil
import zlib
from StringIO import StringIO
try:
# Create a block to work around evil sys.modules manipulation in
# email/__init__.py that triggers pylint false positives.
# pylint: disable=E0611,F0401
from email.Message import Message
from email.Utils import formatdate
except ImportError:
raise
from buildbot.process.properties import Properties
from buildbot.schedulers.trysched import TryBase
from twisted.internet import defer, reactor, utils
from twisted.mail.smtp import SMTPSenderFactory
from twisted.python import log
from common.twisted_util.response import StringResponse
from master import gitiles_poller
from master.try_job_base import BadJobfile
class CbuildbotConfigs(object):
# Valid 'etc' builder targets. Specifically, this ensures:
# - The build name doesn't begin with a flag ('--')
# - The build name doesn't contain spaces (to spill into extra args).
_ETC_TARGET_RE = re.compile(r'^[a-zA-Z][\w-]+\w$')
def __init__(self, configs, etc_builder=None):
"""Holds base state of the master's try job related configuration.
configs (dict): A dictionary of all known CrOS configs. This will be as
up-to-date as the Chromite pin.
etc_builder (str): If not None, the name of the etc builder.
"""
self.configs = configs
self.etc_builder = etc_builder
def AddBuildBucketHooks(self, c):
"""Build mutation hook called via BuildBucket when scheduling builds.
The cbuildbot config is specified in the `cbb_config` property. The
callback transforms that property to an actual waterfall builder name by
mapping it based on its config.
If an 'etc' builder is configured and the config name is unknown, it will be
mapped to the 'etc' builder if possible.
A tryserver BuildBucket build takes the form:
- Empty `builder_name` parameter. If one is supplied, it will be ignored.
- BuildBot changes can be added by including one or more BuildBucket
`changes` parameters: [{'author': {'email': '[email protected]'}}].
- `cbb_config` property must be set to the build's cbuildbot config target.
- `extra_args` property (optional) may be a JSON list of additional
parameters to pass to the tryjob.
- `slaves_request` property (optional) may be a JSON list of slaves on which
this build may run.
- Additional BuildBot properties may be added.
NOTE: Internally, all of these parameters are converted to BuildBot
properties and referenced as such in other areas of code. The Git poller
also constructs the same property set, so code paths converge.
"""
def params_hook(params, _build):
# Map `cbb_config` to a builder name.
properties = params.get('properties', {})
config_name = properties.get('cbb_config')
if not config_name:
raise ValueError('Missing required `cbb_config` property.')
params['builder_name'] = self.GetBuilderForConfig(config_name)
# Validate other fields.
if not isinstance(properties.get('extra_args', []), list):
raise ValueError('`extra_args` property is not a list.')
if not isinstance(properties.get('slaves_request', []), list):
raise ValueError('`slaves_request` is not a list.')
# Add mandatory properties to build.
params['properties'] = properties
c['buildbucket_params_hook'] = params_hook
def GetBuilderForConfig(self, config_name):
config = self.configs.get(config_name)
if config:
return config['_template'] or config_name
self.ValidateEtcBuild(config_name)
return self.etc_builder
def ValidateEtcBuild(self, config_name):
"""Tests whether a specified build config_name is candidate for etc build.
Raises a ValueError if an etc build cannot be dispatched.
"""
if not self.etc_builder:
raise ValueError('etc builder is not configured.')
if not config_name:
raise ValueError('Empty config name')
if not self._ETC_TARGET_RE.match(config_name):
raise ValueError('invalid etc config name (%s).' % (config_name,))
def translate_v1_to_v2(parsed_job):
"""Translate tryjob desc from V1 to V2."""
parsed_job.setdefault('extra_args', []).append('--remote-trybot')
parsed_job['version'] = 2
def translate_v2_to_v3(parsed_job):
"""Translate tryjob desc from V2 to V3."""
# V3 --remote-patches format is not backwards compatible.
if any(a.startswith('--remote-patches')
for a in parsed_job.get('extra_args', ())):
raise BadJobfile('Cannot translate --remote-patches from tryjob v.2 to '
'v.3. Please run repo sync.')
parsed_job['version'] = 3
class CrOSTryJobGit(TryBase):
"""Poll a Git server to grab patches to try."""
# Name of property source for generated properties.
_PROPERTY_SOURCE = 'Try Job'
# The version of tryjob that the master is expecting.
_TRYJOB_FORMAT_VERSION = 3
# Functions that translate from one tryjob version to another.
_TRANSLATION_FUNCS = {
1 : translate_v1_to_v2,
2 : translate_v2_to_v3,
}
# Template path URL component to retrieve the Base64 contents of a file from
# Gitiles.
_GITILES_PATH_TMPL = '%(repo)s/+/%(revision)s/%(path)s?format=text'
@classmethod
def updateJobDesc(cls, parsed_job):
"""Ensure job description is in the format we expect."""
while parsed_job['version'] < cls._TRYJOB_FORMAT_VERSION:
prev_ver = parsed_job['version']
translation_func = cls._TRANSLATION_FUNCS[parsed_job['version']]
translation_func(parsed_job)
if parsed_job['version'] <= prev_ver:
raise AssertionError('translation function %s not incrementing version!'
% str(translation_func))
def __init__(self, name, pollers, smtp_host, from_addr, reply_to,
email_footer, cbuildbot_configs, properties=None):
"""Initialize the class.
Arguments:
name: See TryBase.__init__().
pollers: A list of job repo git pit pollers.
smtp_host: The smtp host for sending out error emails.
from_addr: The email address to display as being sent from.
reply_to: The email address to put in the 'Reply-To' email header field.
email_footer: The footer to append to any emails sent out.
cbuildbot_configs: (CbuildbotConfigs) A configuration set instance. Any
'bot' request outside of this list will go to an 'etc' builder, if
available.
properties: See TryBase.__init__()
"""
TryBase.__init__(self, name, [], properties or {})
self.pollers = pollers
self.smtp_host = smtp_host
self.from_addr = from_addr
self.reply_to = reply_to
self.email_footer = email_footer
self.cbb = cbuildbot_configs
def startService(self):
TryBase.startService(self)
self.startConsumingChanges()
@staticmethod
def load_job(data):
try:
return json.loads(data)
except ValueError as e:
raise BadJobfile("Failed to parse job JSON: %s" % (e.message,))
def validate_job(self, parsed_job):
# A list of field description tuples of the format:
# (name, type, required).
fields = [('name', basestring, True),
('user', basestring, True),
('email', list, True),
('bot', list, True),
('extra_args', list, False),
('version', int, True),
('slaves_request', list, False),
]
error_msgs = []
for name, f_type, required in fields:
val = parsed_job.get(name)
if val is None:
if required:
error_msgs.append('Option %s missing!' % name)
elif not isinstance(val, f_type):
error_msgs.append('Option %s of wrong type!' % name)
# If we're an 'etc' job, we must have bots defined to execute.
for bot in parsed_job['bot']:
if bot in self.cbb.configs:
continue
# Assert that this is a valid 'etc' build.
try:
self.cbb.ValidateEtcBuild(bot)
except ValueError as e:
error_msgs.append("Invalid 'etc' build (%s): %s" % (bot, e.message))
if error_msgs:
raise BadJobfile('\n'.join(error_msgs))
def get_props(self, config, options):
"""Overriding base class method."""
props = Properties()
props.setProperty('slaves_request', options.get('slaves_request', []),
self._PROPERTY_SOURCE)
props.setProperty('cbb_config', config, self._PROPERTY_SOURCE)
extra_args = options.get('extra_args')
if extra_args:
# This field can be quite large, and exceed BuildBot property limits.
# Compress it, Base64 encode it, and prefix it with "z:" so the consumer
# knows its size.
extra_args = 'z:' + base64.b64encode(zlib.compress(json.dumps(
extra_args)))
props.setProperty('cbb_extra_args', extra_args,
self._PROPERTY_SOURCE)
return props
def create_buildset(self, ssid, parsed_job):
"""Overriding base class method."""
dlist = []
buildset_name = '%s:%s' % (parsed_job['user'], parsed_job['name'])
for bot in parsed_job['bot']:
builder_name = self.cbb.GetBuilderForConfig(bot)
log.msg("Creating '%s' try job(s) %s for %s" % (builder_name, ssid, bot))
dlist.append(self.addBuildsetForSourceStamp(ssid=ssid,
reason=buildset_name,
external_idstring=buildset_name,
builderNames=[builder_name],
properties=self.get_props(bot, parsed_job)))
return defer.DeferredList(dlist)
def send_validation_fail_email(self, name, emails, error):
"""Notify the user via email about the tryjob error."""
html_content = []
html_content.append('<html><body>')
body = """
Your tryjob with name '%(name)s' failed the validation step. This is most
likely because <br>you are running an older version of cbuildbot. Please run
<br><code>repo sync chromiumos/chromite</code> and try again. If you still
see<br>this message please contact [email protected].<br>
"""
html_content.append(body % {'name': name})
html_content.append("Extra error information:")
html_content.append(error.replace('\n', '<br>\n'))
html_content.append(self.email_footer)
m = Message()
m.set_payload('<br><br>'.join(html_content), 'utf8')
m.set_type("text/html")
m['Date'] = formatdate(localtime=True)
m['Subject'] = 'Tryjob failed validation'
m['From'] = self.from_addr
m['Reply-To'] = self.reply_to
result = defer.Deferred()
sender_factory = SMTPSenderFactory(self.from_addr, emails,
StringIO(m.as_string()), result)
reactor.connectTCP(self.smtp_host, 25, sender_factory)
@defer.inlineCallbacks
def gotChange(self, change, important):
try:
yield self._gotChangeImpl(change, important)
except Exception as e:
log.msg('Exception in try job scheduler: %s' % (e,))
import traceback
traceback.print_exc()
@defer.inlineCallbacks
def _gotChangeImpl(self, change, _important):
"""Process the received data and send the queue buildset."""
# Find poller that this change came from.
for poller in self.pollers:
if not isinstance(poller, gitiles_poller.GitilesPoller):
continue
if poller.repo_url == change.repository:
break
else:
raise BadJobfile(
'Received tryjob from unsupported repository %s' % change.repository)
# pylint: disable=W0631
file_contents = yield self.loadGitilesChangeFile(poller, change)
parsed = {}
try:
parsed = self.load_job(file_contents)
self.validate_job(parsed)
self.updateJobDesc(parsed)
except BadJobfile as e:
self.send_validation_fail_email(parsed.setdefault('name', ''),
parsed['email'], str(e))
raise
# The sourcestamp/buildsets created will be merge-able.
ssid = yield self.master.db.sourcestamps.addSourceStamp(
branch=change.branch,
revision=change.revision,
project=change.project,
repository=change.repository,
changeids=[change.number])
yield self.create_buildset(ssid, parsed)
@defer.inlineCallbacks
def loadGitilesChangeFile(self, poller, change):
if len(change.files) != 1:
# We only accept changes with 1 diff file.
raise BadJobfile(
'Try job with too many files %s' % (','.join(change.files)))
# Load the contents of the modified file.
path = self._GITILES_PATH_TMPL % {
'repo': poller.repo_path,
'revision': change.revision,
'path': change.files[0],
}
contents_b64 = yield poller.agent.request('GET', path, retry=5,
protocol=StringResponse.Get)
defer.returnValue(base64.b64decode(contents_b64))
| [((1143, 1177), 're.compile', 're.compile', (['"""^[a-zA-Z][\\\\w-]+\\\\w$"""'], {}), "('^[a-zA-Z][\\\\w-]+\\\\w$')\n", (1153, 1177), False, 'import re\n'), ((4707, 4811), 'master.try_job_base.BadJobfile', 'BadJobfile', (['"""Cannot translate --remote-patches from tryjob v.2 to v.3. Please run repo sync."""'], {}), "(\n 'Cannot translate --remote-patches from tryjob v.2 to v.3. Please run repo sync.'\n )\n", (4717, 4811), False, 'from master.try_job_base import BadJobfile\n'), ((6695, 6745), 'buildbot.schedulers.trysched.TryBase.__init__', 'TryBase.__init__', (['self', 'name', '[]', '(properties or {})'], {}), '(self, name, [], properties or {})\n', (6711, 6745), False, 'from buildbot.schedulers.trysched import TryBase\n'), ((6965, 6991), 'buildbot.schedulers.trysched.TryBase.startService', 'TryBase.startService', (['self'], {}), '(self)\n', (6985, 6991), False, 'from buildbot.schedulers.trysched import TryBase\n'), ((8431, 8443), 'buildbot.process.properties.Properties', 'Properties', ([], {}), '()\n', (8441, 8443), False, 'from buildbot.process.properties import Properties\n'), ((9683, 9708), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', (['dlist'], {}), '(dlist)\n', (9701, 9708), False, 'from twisted.internet import defer, reactor, utils\n'), ((10412, 10421), 'email.Message.Message', 'Message', ([], {}), '()\n', (10419, 10421), False, 'from email.Message import Message\n'), ((10523, 10549), 'email.Utils.formatdate', 'formatdate', ([], {'localtime': '(True)'}), '(localtime=True)\n', (10533, 10549), False, 'from email.Utils import formatdate\n'), ((10674, 10690), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (10688, 10690), False, 'from twisted.internet import defer, reactor, utils\n'), ((10830, 10884), 'twisted.internet.reactor.connectTCP', 'reactor.connectTCP', (['self.smtp_host', '(25)', 'sender_factory'], {}), '(self.smtp_host, 25, sender_factory)\n', (10848, 10884), False, 'from twisted.internet import defer, reactor, utils\n'), ((7086, 7102), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (7096, 7102), False, 'import json\n'), ((9352, 9425), 'twisted.python.log.msg', 'log.msg', (['("Creating \'%s\' try job(s) %s for %s" % (builder_name, ssid, bot))'], {}), '("Creating \'%s\' try job(s) %s for %s" % (builder_name, ssid, bot))\n', (9359, 9425), False, 'from twisted.python import log\n'), ((11530, 11615), 'master.try_job_base.BadJobfile', 'BadJobfile', (["('Received tryjob from unsupported repository %s' % change.repository)"], {}), "('Received tryjob from unsupported repository %s' % change.repository\n )\n", (11540, 11615), False, 'from master.try_job_base import BadJobfile\n'), ((12976, 13006), 'base64.b64decode', 'base64.b64decode', (['contents_b64'], {}), '(contents_b64)\n', (12992, 13006), False, 'import base64\n'), ((7143, 7200), 'master.try_job_base.BadJobfile', 'BadJobfile', (["('Failed to parse job JSON: %s' % (e.message,))"], {}), "('Failed to parse job JSON: %s' % (e.message,))\n", (7153, 7200), False, 'from master.try_job_base import BadJobfile\n'), ((11046, 11098), 'twisted.python.log.msg', 'log.msg', (["('Exception in try job scheduler: %s' % (e,))"], {}), "('Exception in try job scheduler: %s' % (e,))\n", (11053, 11098), False, 'from twisted.python import log\n'), ((11128, 11149), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (11147, 11149), False, 'import traceback\n'), ((8931, 8953), 'json.dumps', 'json.dumps', (['extra_args'], {}), '(extra_args)\n', (8941, 8953), False, 'import json\n')] |
Hellofafar/Leetcode | Medium/515.py | 7a459e9742958e63be8886874904e5ab2489411a | # ------------------------------
# 515. Find Largest Value in Each Tree Row
#
# Description:
# You need to find the largest value in each row of a binary tree.
# Example:
# Input:
# 1
# / \
# 3 2
# / \ \
# 5 3 9
# Output: [1, 3, 9]
#
# Version: 1.0
# 12/22/18 by Jianfa
# ------------------------------
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def largestValues(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
children = [root]
res = []
while children:
temp = [] # Node of next row
largest = -sys.maxsize # Largest number of this row
for i in range(len(children)):
node = children[i]
largest = max(node.val, largest)
if node.left:
temp.append(node.left)
if node.right:
temp.append(node.right)
res.append(largest)
children = temp
return res
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# BFS solution. | [] |
lachmanfrantisek/opsmop | opsmop/meta/docs/exparser.py | 562ae2d753ff84b3d794a6815d0436753e82d2a0 | # Copyright 2018 Michael DeHaan LLC, <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class Example(object):
def __init__(self):
# things we'll figure out as we scan an example
self.name = ""
self.see_files = []
self.description = []
self.code = []
class Record(object):
def __init__(self):
# things which we'll figure out as we scan the example
self.name = ""
self.purpose = ""
self.provider_names = []
self.related_modules = []
self.category = ""
self.description = []
self.examples = []
self.current_example = Example()
self.phase = 'module'
self.count = 0
def set_phase(self, phase):
self.phase = phase
print("---------------------------------------------------------")
print("%s phase | %s" % (self.count, self.phase))
print("---------------------------------------------------------")
@classmethod
def from_file(cls, filename):
r = cls()
r.name = os.path.basename(filename).replace(".py","")
print("=========================================================")
print("%s M | %s" % ('0', r.name))
data = open(filename).read().splitlines()
for line in data:
if not r.handle_line(line):
break
return r
def load_command(self, line):
if "DESCRIPTION" in line or '----' in line or '====' in line:
pass
elif not ":" in line:
# commands must contain a colon unless they are blocks or DESCRIPTION starters
return (False, None, None)
if not line.startswith("#"):
# commands must be in comments
return (False, None, None)
if ":" in line:
tokens = line.split(":")
if tokens[0].upper() != tokens[0]:
# commands must be in all caps. This is done
# so we don't get confused by colons in URLs and so on.
print("REJECT: %s" % tokens[0])
return (False, None, None)
# at this point we are sure it is a command
if '#------------' in line.replace(" ",""):
return (True, 'start_block', None)
if '#============' in line.replace(" ",""):
return (True, 'end_block', None)
# throw away the leading comment
line = line.replace("#","",1).strip()
if line.startswith("DESCRIPTION"):
return (True, 'description', None)
tokens = line.split(':', 1)
command = tokens[0].replace("#","").strip().lower()
rest = tokens[1].strip()
return (True, command, rest)
def handle_line(self, line):
self.count = self.count + 1
(is_command, command, rest) = self.load_command(line)
print("%s line | %s" % (self.count, line))
#if command == 'policy':
# return False
if is_command:
#if command not in [ 'start_block', 'end_block' ]:
# print("keyword: %s => %s" % (command, rest))
self.handle_command(command, rest)
return True
#print("PHASE=%s" % self.phase)
#print("LINE=%s" % line)
if self.phase == 'module':
if not line.startswith("#") or line.replace("#","").strip():
raise Exception("the module phase should be all commands")
elif self.phase == 'description':
# module description lines must be comments
self.handle_module_description(line)
elif self.phase == 'example':
if not line.startswith("#") or line.replace("#","").strip():
raise Exception("the example phase should be all commands")
elif self.phase == 'example_description':
self.handle_example_description(self.current_example, line)
elif self.phase == 'example_code':
self.handle_example_code(self.current_example, line)
elif self.phase == 'limbo':
#print("ignoring line while in limbo: %s" % line)
pass
elif self.phase == 'done':
#print("ignoring line while done: %s" % line)
pass
else:
raise Exception("unknown phase: %s" % self.phase)
return True # continue
def handle_command(self, command, rest):
#print("<PHASE: %s, COMMAND: %s, REST: %s>" % (self.phase, command, rest))
if self.phase == 'done':
return False
if self.phase == 'module':
# from module mode the only state transition is into module_description mode
# when we find the description command
if command not in ['start_block', 'end_block']:
print("%s set | %-20s | %s" % (self.count, command, rest))
if command == 'module':
pass
elif command == 'start_block':
pass
elif command == 'category':
self.category = rest
elif command == 'purpose':
self.purpose = rest
elif command == 'related':
self.related_modules = [ x.strip() for x in rest.split(",") ]
elif command == 'providers':
self.providers = [ x.strip() for x in rest.split(",") ]
elif command == 'fyi':
pass
elif command == 'description':
print("---------------------------------------------------------")
self.set_phase('description')
elif command == 'end_block':
raise Exception("unexpected end block without description")
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'description':
# in description phase end block moves us into limbo until we find
# another example start block
if command == 'end_block':
self.set_phase('limbo')
else:
raise Exception("invalid command: %s" % command)
elif self.phase == 'limbo':
# in limbo, seeing a start block moves us into example phase
if command == 'start_block':
self.set_phase('example')
else:
raise Exception("invalid command: %s" % command)
elif self.phase == 'example':
# in example phase we can only move into example description phase
# by hitting the description command
if command == 'example':
print("---------------------------------------------------------")
print("%s exmp | %s" % (self.count, rest))
print("---------------------------------------------------------")
self.current_example.name = rest
elif command == 'setup':
self.set_phase('done')
elif command == 'description':
print("MOV!")
self.set_phase('example_description')
elif command == 'see_files' or command == 'see_file':
self.current_example.see_files = [ x.strip() for x in rest.split(",")]
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'example_description':
# in example description phase we can only move into example code phase
# by hitting an end block
if command == 'end_block':
print("-------")
self.set_phase('example_code')
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'example_code':
# in example code phase we can only move back into example phase by
# hitting a start block
if command == 'start_block':
self.examples.append(self.current_example)
self.current_example = Example()
self.set_phase('example')
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'done':
return False
else:
raise Exception("unknown phase: %s" % self.phase)
def handle_example_description(self, example, line):
# could be a comment or the code example, we want to keep both
if line.startswith("#"):
line = line.replace("#","")
line = line.strip()
print("%s desc | %s" % (self.count, line))
example.description.append(line)
def handle_example_code(self, example, line):
line = line.rstrip()
example.code.append(line)
print("%s code | %s" % (self.count, line))
def handle_module_description(self, line):
if line.startswith("#"):
line = line.replace("#","")
line = line.strip()
if line:
print("%s mdesc | %s" % (self.count, line))
self.description.append(line)
| [((1598, 1624), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1614, 1624), False, 'import os\n')] |
sheunl/Compiler_Tests | pylox/TokenType.py | 18c5e0568bc39a60094f3e44943ac252c279ffb9 | from enum import Enum
class T(Enum):
#single character Tokens
LEFT_PAREN =1
RIGHT_PAREN =2
LEFT_BRACE = 3
RIGHT_BRACE = 4
COMMA = 5
DOT = 6
MINUS = 7
PLUS = 8
SEMICOLON = 9
SLASH = 10
STAR = 11
#one or two character tokens
BANG = 12
BANG_EQUAL = 13
EQUAL = 14
EQUAL_EQUAL = 15
GREATER = 16
GREATER_EQUAL = 17
LESS = 18
LESS_EQUAL = 19
#Literals
IDENTIFIER = 20
STRING = 21
NUMBER = 22
#keywords
AND = 23
CLASS = 24
ELSE = 25
FALSE = 26
FUN = 27
FOR = 28
IF = 29
NIL =30
OR =31
PRINT =32
RETURN = 33
SUPER = 34
THIS = 35
TRUE = 36
VAR = 37
WHILE = 38
EOF= 39 | [] |
dios-game/dios-cocos | src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/framework/framework_add.py | b7fbcbafe02f516ef18fdb64b4519dbf806303fc |
import cocos
from MultiLanguage import MultiLanguage
from package.helper import ProjectHelper
class FrameworkAdd(cocos.CCPlugin):
@staticmethod
def plugin_name():
return "add-framework"
@staticmethod
def brief_description():
return MultiLanguage.get_string('FRAMEWORK_ADD_BRIEF')
# parse arguments
def parse_args(self, argv):
from argparse import ArgumentParser
parser = ArgumentParser(prog="cocos %s" % self.__class__.plugin_name(),
description=self.__class__.brief_description())
parser.add_argument("name", metavar="NAME", help=MultiLanguage.get_string('FRAMEWORK_ADD_ARG_NAME'))
return parser.parse_args(argv)
def run(self, argv):
args = self.parse_args(argv)
name = args.name
project = ProjectHelper.get_current_project()
ProjectHelper.add_framework(project, name)
| [((269, 316), 'MultiLanguage.MultiLanguage.get_string', 'MultiLanguage.get_string', (['"""FRAMEWORK_ADD_BRIEF"""'], {}), "('FRAMEWORK_ADD_BRIEF')\n", (293, 316), False, 'from MultiLanguage import MultiLanguage\n'), ((832, 867), 'package.helper.ProjectHelper.get_current_project', 'ProjectHelper.get_current_project', ([], {}), '()\n', (865, 867), False, 'from package.helper import ProjectHelper\n'), ((876, 918), 'package.helper.ProjectHelper.add_framework', 'ProjectHelper.add_framework', (['project', 'name'], {}), '(project, name)\n', (903, 918), False, 'from package.helper import ProjectHelper\n'), ((634, 684), 'MultiLanguage.MultiLanguage.get_string', 'MultiLanguage.get_string', (['"""FRAMEWORK_ADD_ARG_NAME"""'], {}), "('FRAMEWORK_ADD_ARG_NAME')\n", (658, 684), False, 'from MultiLanguage import MultiLanguage\n')] |
f-grimaldi/explain_ML | src/utils.py | 00892675be32bebd023b274270ccb05b798fb388 | from matplotlib import colors
import numpy as np
class SaveOutput:
def __init__(self):
self.outputs = []
def __call__(self, module, module_in, module_out):
self.outputs.append(module_out)
def clear(self):
self.outputs = []
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, vcenter=None, clip=False):
self.vcenter = vcenter
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.vcenter, self.vmax], [self.vmin, self.vcenter, self.vmax]
return np.ma.masked_array(np.interp(value, x, y))
| [((417, 466), 'matplotlib.colors.Normalize.__init__', 'colors.Normalize.__init__', (['self', 'vmin', 'vmax', 'clip'], {}), '(self, vmin, vmax, clip)\n', (442, 466), False, 'from matplotlib import colors\n'), ((737, 759), 'numpy.interp', 'np.interp', (['value', 'x', 'y'], {}), '(value, x, y)\n', (746, 759), True, 'import numpy as np\n')] |
erkyrath/tworld | lib/two/mongomgr.py | 9f5237771196b03753d027277ffc296e25fd7425 | """
Manage the connection to the MongoDB server.
"""
import tornado.gen
import tornado.ioloop
import motor
class MongoMgr(object):
def __init__(self, app):
# Keep a link to the owning application.
self.app = app
self.log = self.app.log
# This will be the Motor (MongoDB) connection. We'll open it in the
# first monitor_mongo_status call.
self.mongo = None
self.mongoavailable = False # true if self.mongo exists and is open
self.mongotimerbusy = False # true while monitor_mongo_status runs
# We also manage self.app.mongodb, a MotorDatabase. This must be
# non-None exactly when mongoavailable is true.
def init_timers(self):
ioloop = tornado.ioloop.IOLoop.instance()
# The mongo status monitor. We set up one call immediately, and then
# try again every three seconds.
ioloop.add_callback(self.monitor_mongo_status)
res = tornado.ioloop.PeriodicCallback(self.monitor_mongo_status, 3000)
res.start()
def close(self):
"""Close the connection to mongodb. (The monitor will start it
right back up again, or try to.)
"""
if self.mongo:
try:
self.mongo.disconnect()
except Exception as ex:
self.log.error('Problem disconnecting mongo: %s', ex)
self.mongo = None
self.app.mongodb = None
@tornado.gen.coroutine
def monitor_mongo_status(self):
if (self.mongotimerbusy):
self.log.warning('monitor_mongo_status: already in flight; did a previous call jam?')
return
if (self.app.shuttingdown):
self.log.warning('monitor_mongo_status: server is shutting down, never mind')
return
self.mongotimerbusy = True
if (self.mongoavailable):
try:
res = yield motor.Op(self.mongo.admin.command, 'ping')
if (not res):
self.log.error('Mongo client not alive')
self.mongoavailable = False
except Exception as ex:
self.log.error('Mongo client not alive: %s', ex)
self.mongoavailable = False
if (not self.mongoavailable):
self.close()
if (not self.mongoavailable):
try:
self.mongo = motor.MotorClient(tz_aware=True)
res = yield motor.Op(self.mongo.open)
### maybe authenticate to a database?
self.mongoavailable = True
self.app.mongodb = self.mongo[self.app.opts.mongo_database]
self.log.info('Mongo client open')
self.app.queue_command({'cmd':'dbconnected'})
except Exception as ex:
self.mongoavailable = False
self.app.mongodb = None
self.log.error('Mongo client not open: %s', ex)
self.mongotimerbusy = False
| [((2452, 2484), 'motor.MotorClient', 'motor.MotorClient', ([], {'tz_aware': '(True)'}), '(tz_aware=True)\n', (2469, 2484), False, 'import motor\n'), ((1957, 1999), 'motor.Op', 'motor.Op', (['self.mongo.admin.command', '"""ping"""'], {}), "(self.mongo.admin.command, 'ping')\n", (1965, 1999), False, 'import motor\n'), ((2513, 2538), 'motor.Op', 'motor.Op', (['self.mongo.open'], {}), '(self.mongo.open)\n', (2521, 2538), False, 'import motor\n')] |
hugopibernat/BayesianABTestAnalysis | code/examples/example_binomial_and_log_normal_abtest.py | 026960524f5313f4a734f30fd447a5731be802e0 | #################################################
####### Author: Hugo Pibernat #######
####### Contact: [email protected] #######
####### Date: April 2014 #######
#################################################
from bayesianABTest import sampleSuccessRateForBinomial, sampleMeanForLogNormal, probabilityOfABetterThanB
from numpy.random import lognormal
from numpy import mean, concatenate, zeros
# Generate Log-Normal data
A_actuals = lognormal(mean=4.10, sigma=1.0, size=100)
B_actuals = lognormal(mean=4.00, sigma=1.0, size=100)
# Plus some zeros
A_data = concatenate([A_actuals,zeros(10000)])
B_data = concatenate([B_actuals,zeros(10000)])
# Modeling conversions with a binomial variable
A_purchases = sum(A_data > 0)
A_sessions = len(A_data)
B_purchases = sum(B_data > 0)
B_sessions = len(B_data)
A_CR = sampleSuccessRateForBinomial(A_sessions,A_purchases)
B_CR = sampleSuccessRateForBinomial(B_sessions,B_purchases)
# Modeling the spend with a log-normal
A_non_zero_data = A_data[A_data > 0]
B_non_zero_data = B_data[B_data > 0]
A_spend = sampleMeanForLogNormal(A_non_zero_data)
B_spend = sampleMeanForLogNormal(B_non_zero_data)
# Combining the two
A_rps = A_CR*A_spend
B_rps = B_CR*B_spend
# Result:
print probabilityOfABetterThanB(A_rps,B_rps) | [] |
airslate-oss/python-airslate | tests/models/test_documents.py | 0f7fe6321b1c2e6875a02dfecb5ffa07a361bb1d | # This file is part of the airslate.
#
# Copyright (c) 2021 airSlate, Inc.
#
# For the full copyright and license information, please view
# the LICENSE file that was distributed with this source code.
from airslate.models.documents import UpdateFields
from airslate.entities.fields import Field
def test_empty_update_fields__to_dict():
model = UpdateFields()
assert model.to_dict() == {'data': []}
def test_update_fields__to_dict():
model = UpdateFields(data=[Field('123'), Field('abc')])
assert model.to_dict() == {'data': [
{'id': '123', 'type': 'dictionary'},
{'id': 'abc', 'type': 'dictionary'}
]}
| [((352, 366), 'airslate.models.documents.UpdateFields', 'UpdateFields', ([], {}), '()\n', (364, 366), False, 'from airslate.models.documents import UpdateFields\n'), ((478, 490), 'airslate.entities.fields.Field', 'Field', (['"""123"""'], {}), "('123')\n", (483, 490), False, 'from airslate.entities.fields import Field\n'), ((492, 504), 'airslate.entities.fields.Field', 'Field', (['"""abc"""'], {}), "('abc')\n", (497, 504), False, 'from airslate.entities.fields import Field\n')] |
rseed42/labyrinth | sim/dynamicobject.py | 1cd4dc74c67b1b76972e1e048a7fce0c13955e7d | class DynamicObject(object):
def __init__(self, name, id_):
self.name = name
self.id = id_
| [] |
meysam81/sheypoor | app/main.py | aa67e20646ebc4143b83968f60c0b28c2ad340a1 | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app import api
from app.core.config import config
app = FastAPI(title="Sheypoor")
# Set all CORS enabled origins
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api.router, prefix=config.API_URI)
| [((142, 167), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""Sheypoor"""'}), "(title='Sheypoor')\n", (149, 167), False, 'from fastapi import FastAPI\n')] |
Indy2222/mbg-codon-usage | cdnu/ccds.py | d415076a8150cd712010c0389c71ef22ba9ad850 | from typing import List, NamedTuple
CCDS_FILE = 'CCDS.current.txt'
CHROMOSOMES = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
'13', '14', '15', '16', '17', '18', '19', '20', '21', '22',
'X', 'Y')
class CdsPos(NamedTuple):
ccds_id: str
indexes: list
"""2-tuples with start (inclusive) and stop indexes (exclusive) in
reference genome. Whole CDS can be constructed as concatenation of the
sub-sequences."""
molecule: str
"""Molecule name, see :const:`CHROMOSOMES`"""
def load_ccds() -> List[CdsPos]:
"""Load file with CDS locations within GRCh38 genome as a list of
:class:`CdsPos`."""
cds = []
with open(CCDS_FILE, encoding='utf-8', newline='\n') as fp:
for line in fp:
if not line:
# Skip empty lines
continue
if line.startswith('#'):
# Skip comments
continue
parts = line.split('\t')
ccds_id = parts[4]
status = parts[5]
if 'Public' not in status:
# CDS is not yet public
continue
if parts[6] == '-':
# CDS strand negative order = reverse-complement
continue
locations_str = parts[9]
if locations_str == '-':
# CDS location unknown
continue
chromosome = parts[0]
assert chromosome in CHROMOSOMES, chromosome
locations = []
assert locations_str.startswith('[')
assert locations_str.endswith(']')
for location_str in locations_str[1:-1].split(','):
start_str, stop_str = location_str.split('-')
start, stop = int(start_str), int(stop_str) + 1
locations.append((start, stop))
if sum(b - a for a, b in locations) % 3 != 0:
# Skip CDS which are not multiple of three in length.
continue
cds.append(CdsPos(
ccds_id=ccds_id,
molecule='chr' + chromosome,
indexes=locations
))
return cds
| [] |
ITMO-NSS-team/GEFEST | test/test_resolve_errors.py | 72bb61cf3fbb9f87fe3dcd48b71f3e84dd23b669 | import pytest
from copy import deepcopy
from gefest.core.structure.point import Point
from gefest.core.structure.polygon import Polygon
from gefest.core.structure.structure import Structure
from gefest.core.algs.postproc.resolve_errors import *
from gefest.core.algs.geom.validation import *
# marking length and width for testing polygon
poly_width = 10
poly_length = 20
# creating a testing polygons via corner points
rectangle_points = [(-1, 40), (-1, poly_length+40), (-poly_width-10, poly_length+40), (-poly_width-10, 40)]
out_bounds_rectangle_poly = Polygon('rectangle', points=[Point(*coords) for coords in rectangle_points])
triangle_points = [(1, 1), (poly_width, poly_length), (1, poly_length)]
unclosed_triangle_poly = Polygon('triangle', points=[Point(*coords) for coords in triangle_points])
incorrect_points = [(5, 5), (5, poly_length), (8, poly_length), (5, 5), (5, 30)]
incorrect_poly = Polygon('incorrect_poly', points=[Point(*coords) for coords in incorrect_points])
domain = Domain()
def test_unclosed_poly():
input_structure = Structure([unclosed_triangle_poly])
observed_structure = postprocess(input_structure, domain)
assert unclosed_poly(input_structure, domain)
assert not unclosed_poly(observed_structure, domain)
def test_self_intersection():
input_structure = Structure([incorrect_poly])
observed_structure = postprocess(input_structure, domain)
assert self_intersection(input_structure)
assert not self_intersection(observed_structure)
def test_out_of_bound():
input_structure = Structure([out_bounds_rectangle_poly])
observed_structure = postprocess(input_structure, domain)
assert out_of_bound(input_structure, domain)
assert not out_of_bound(observed_structure, domain)
def test_fixed_polys():
domain = Domain(fixed_points=[[[15, 30],
[40, 30],
[15, 40]]])
poly_like_fixed = Polygon('like_fixed', points=[Point(15, 30), Point(40, 30), Point(15, 40)])
input_structure = Structure([poly_like_fixed, unclosed_triangle_poly])
observed_structure = postprocess(input_structure, domain)
assert all([np.isclose(len(observed_structure.polygons), 2),
'like_fixed' not in [poly.id for poly in observed_structure.polygons],
'fixed' in [poly.id for poly in observed_structure.polygons]])
def test_too_close():
same_poly = deepcopy(unclosed_triangle_poly)
same_poly.id = 'same_triangle'
input_structure = Structure([unclosed_triangle_poly, same_poly])
observed_structure = postprocess(input_structure, domain)
print(observed_structure.polygons)
assert np.isclose(len(observed_structure.polygons), 1)
| [((1059, 1094), 'gefest.core.structure.structure.Structure', 'Structure', (['[unclosed_triangle_poly]'], {}), '([unclosed_triangle_poly])\n', (1068, 1094), False, 'from gefest.core.structure.structure import Structure\n'), ((1319, 1346), 'gefest.core.structure.structure.Structure', 'Structure', (['[incorrect_poly]'], {}), '([incorrect_poly])\n', (1328, 1346), False, 'from gefest.core.structure.structure import Structure\n'), ((1558, 1596), 'gefest.core.structure.structure.Structure', 'Structure', (['[out_bounds_rectangle_poly]'], {}), '([out_bounds_rectangle_poly])\n', (1567, 1596), False, 'from gefest.core.structure.structure import Structure\n'), ((2046, 2098), 'gefest.core.structure.structure.Structure', 'Structure', (['[poly_like_fixed, unclosed_triangle_poly]'], {}), '([poly_like_fixed, unclosed_triangle_poly])\n', (2055, 2098), False, 'from gefest.core.structure.structure import Structure\n'), ((2433, 2465), 'copy.deepcopy', 'deepcopy', (['unclosed_triangle_poly'], {}), '(unclosed_triangle_poly)\n', (2441, 2465), False, 'from copy import deepcopy\n'), ((2523, 2569), 'gefest.core.structure.structure.Structure', 'Structure', (['[unclosed_triangle_poly, same_poly]'], {}), '([unclosed_triangle_poly, same_poly])\n', (2532, 2569), False, 'from gefest.core.structure.structure import Structure\n'), ((588, 602), 'gefest.core.structure.point.Point', 'Point', (['*coords'], {}), '(*coords)\n', (593, 602), False, 'from gefest.core.structure.point import Point\n'), ((762, 776), 'gefest.core.structure.point.Point', 'Point', (['*coords'], {}), '(*coords)\n', (767, 776), False, 'from gefest.core.structure.point import Point\n'), ((942, 956), 'gefest.core.structure.point.Point', 'Point', (['*coords'], {}), '(*coords)\n', (947, 956), False, 'from gefest.core.structure.point import Point\n'), ((1978, 1991), 'gefest.core.structure.point.Point', 'Point', (['(15)', '(30)'], {}), '(15, 30)\n', (1983, 1991), False, 'from gefest.core.structure.point import Point\n'), ((1993, 2006), 'gefest.core.structure.point.Point', 'Point', (['(40)', '(30)'], {}), '(40, 30)\n', (1998, 2006), False, 'from gefest.core.structure.point import Point\n'), ((2008, 2021), 'gefest.core.structure.point.Point', 'Point', (['(15)', '(40)'], {}), '(15, 40)\n', (2013, 2021), False, 'from gefest.core.structure.point import Point\n')] |
davla/i3-live-tree | tests/mocks.py | 8dc3917afdd09f53f7cf39653c2bf12cb0200983 | from unittest.mock import MagicMock, Mock
from i3ipc.aio import Con
import i3_live_tree.tree_serializer # noqa: F401
class MockConSerializer(Mock, Con):
"""Mock a generic i3ipc.aio.Con for serialization purposes
This Mock is meant to ease testing of i3ipc.aio.Con serialization methods,
which are mokey patched in i3_live_tree.tree_serializer.
In order to achieve this, the mock inherits all the method implementations
of i3ipc.aio.Con, most importantly the serialization ones. However,
whatever is needed for serialization, both properties and methods, is
mocked and can be injected in the constructor, in order to ease the
creation of mock instances.
"""
def __init__(self, *args, name=None, layout=None, focused=False,
nodes=iter(()), **kwargs):
Mock.__init__(self, *args, **kwargs)
self.focused = focused
self.layout = layout
self.name = name
self.nodes = nodes
class MockConNavigation(MagicMock):
"""Mock an i3ipc.aio.Con for navigation purposes
This Mock is meant to be used when testing i3ipc event handlers. It mocks
all the necessary methods and properties, by returning `self` when an
i3ipc.aio.Con instance is needed for the sake of simplicity.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def find_focused(self):
"""Return the focused window"""
return self
def workspace(self):
"""Return the containing workspace"""
return self
class MockI3(Mock):
"""Mock an i3ipc.aio.Connection"""
def __init__(self, *args, tree, **kwargs):
super().__init__(*args, **kwargs)
self.tree = tree
async def get_tree(self):
"""Return the i3 tree asynchronously"""
return self.tree
| [((821, 857), 'unittest.mock.Mock.__init__', 'Mock.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (834, 857), False, 'from unittest.mock import MagicMock, Mock\n')] |
nested-tech/hvac | hvac/api/secrets_engines/gcp.py | 2a58ac9850b882e43c1617ae6b0ea93104c99794 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Gcp methods module."""
from hvac import exceptions
from hvac.api.vault_api_base import VaultApiBase
from hvac.constants.gcp import DEFAULT_MOUNT_POINT, ALLOWED_CREDS_ENDPOINTS
class Gcp(VaultApiBase):
def generate_credentials(self, roleset, endpoint='key', mount_point=DEFAULT_MOUNT_POINT):
if endpoint not in ALLOWED_CREDS_ENDPOINTS:
error_msg = 'invalid endpoint argument provided "{arg}", supported types: "{allowed_endpoints}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=endpoint,
allowed_endpoints=', '.join(ALLOWED_CREDS_ENDPOINTS),
))
api_path = '/v1/{mount_point}/{endpoint}/{roleset}'.format(
mount_point=mount_point,
endpoint=endpoint,
roleset=roleset,
)
response = self._adapter.get(
url=api_path
)
return response.json()
| [] |
poolpitako/ypricemagic | ypricemagic/uniswap.py | 882aa2071a918937e77e0b85e5f52191a4714d28 | import token
from tokenize import tokenize
from brownie import Contract, chain
from brownie.exceptions import ContractNotFound
from cachetools.func import ttl_cache
from .utils.cache import memory
from .utils.multicall2 import fetch_multicall
from .interfaces.ERC20 import ERC20ABI
import ypricemagic.magic
import ypricemagic.utils.utils
from .constants import STABLECOINS, dai, usdc, usdt, wbtc, weth, sushi
# NOTE: If this is failing to pull a price for a token you need, it's likely because that token requires a special swap path.
# Please add a viable swap path below to fetch price data successfully.
#project.load()
if chain.id == 1:
FACTORIES = {
"uniswap": "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f",
"sushiswap": "0xC0AEe478e3658e2610c5F7A4A2E1777cE9e4f2Ac",
}
ROUTERS = {
"uniswap": Contract("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"),
"sushiswap": Contract("0xD9E1CE17F2641F24AE83637AB66A2CCA9C378B9F"),
}
SPECIAL_PATHS = {
"sushiswap": {
"0xEF69B5697f2Fb0345cC680210fD39b593a2f9684": ["0xEF69B5697f2Fb0345cC680210fD39b593a2f9684","0x6B3595068778DD592e39A122f4f5a5cF09C90fE2",weth,usdc]
,"0xbf2179859fc6D5BEE9Bf9158632Dc51678a4100e": ["0xbf2179859fc6D5BEE9Bf9158632Dc51678a4100e","0xC28E27870558cF22ADD83540d2126da2e4b464c2",weth,usdc]
,"0x3166C570935a7D8554c8f4eA792ff965D2EFe1f2": ["0x3166C570935a7D8554c8f4eA792ff965D2EFe1f2","0x4954Db6391F4feB5468b6B943D4935353596aEC9",usdc]
,"0xE6279E1c65DD41b30bA3760DCaC3CD8bbb4420D6": ["0xE6279E1c65DD41b30bA3760DCaC3CD8bbb4420D6","0x87F5F9eBE40786D49D35E1B5997b07cCAA8ADbFF",weth,usdc]
,"0x4954Db6391F4feB5468b6B943D4935353596aEC9": ["0x4954Db6391F4feB5468b6B943D4935353596aEC9",usdc]
,"0x1E18821E69B9FAA8e6e75DFFe54E7E25754beDa0": ["0x1E18821E69B9FAA8e6e75DFFe54E7E25754beDa0","0xEF69B5697f2Fb0345cC680210fD39b593a2f9684","0x6B3595068778DD592e39A122f4f5a5cF09C90fE2",weth,usdc]
,"0xfC1E690f61EFd961294b3e1Ce3313fBD8aa4f85d": ["0xfC1E690f61EFd961294b3e1Ce3313fBD8aa4f85d","0xba100000625a3754423978a60c9317c58a424e3D",weth,usdc]
,"0xBA50933C268F567BDC86E1aC131BE072C6B0b71a": ["0xBA50933C268F567BDC86E1aC131BE072C6B0b71a",weth,usdc]
,"0x6102407f07029892eB5Ff02164ADFaFb85f4d222": ["0x6102407f07029892eB5Ff02164ADFaFb85f4d222",usdt]
,"0x85034b3b2e292493D029443455Cc62ab669573B3": ["0x85034b3b2e292493D029443455Cc62ab669573B3","0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984",weth,usdc]
,"0xb220D53F7D0f52897Bcf25E47c4c3DC0bac344F8": ["0xb220D53F7D0f52897Bcf25E47c4c3DC0bac344F8", usdc]
,"0x383518188C0C6d7730D91b2c03a03C837814a899": ["0x383518188C0C6d7730D91b2c03a03C837814a899",dai]
,"0xafcE9B78D409bF74980CACF610AFB851BF02F257": ["0xafcE9B78D409bF74980CACF610AFB851BF02F257",wbtc,weth,usdc]
},
"uniswap": {
}
}
elif chain.id == 56:
ROUTERS = {
"pancakeswapv2": Contract("0x10ED43C718714eb63d5aA57B78B54704E256024E"),
"pancakeswapv1": Contract("0x05fF2B0DB69458A0750badebc4f9e13aDd608C7F")
}
FACTORIES = {
"pancakeswapv2": "0xcA143Ce32Fe78f1f7019d7d551a6402fC5350c73",
"pancakeswapv1": "0xBCfCcbde45cE874adCB698cC183deBcF17952812"
}
SPECIAL_PATHS = {
"pancakeswapv2": {
},
"pancakeswapv1": {
}
}
elif chain.id == 137:
ROUTERS = {
"quickswap": Contract("0xa5E0829CaCEd8fFDD4De3c43696c57F7D7A678ff")
}
FACTORIES = {
"quickswap": "0x5757371414417b8C6CAad45bAeF941aBc7d3Ab32",
}
SPECIAL_PATHS = {
"quickswap": {
}
}
FACTORY_TO_ROUTER = {FACTORIES[name]: ROUTERS[name] for name in FACTORIES}
FACTORY_TO_PROTOCOL = {FACTORIES[name]: name for name in FACTORIES}
@ttl_cache(ttl=36000)
def get_price(token_in, token_out=usdc, router="uniswap", block=None, paired_against=weth):
"""
Calculate a price based on Uniswap Router quote for selling one `token_in`.
Always uses intermediate WETH pair if `[token_in,weth,token_out]` swap path available.
"""
if chain.id == 56 and token_out == usdc:
busd = Contract("0xe9e7CEA3DedcA5984780Bafc599bD69ADd087D56")
token_out = busd
tokens = [str(token) for token in [token_in, token_out]]
amount_in = 10 ** ypricemagic.utils.utils.get_decimals_with_override(tokens[0])
if str(token_in) in STABLECOINS:
return 1
elif str(paired_against) in STABLECOINS and str(token_out) in STABLECOINS:
path = [token_in, paired_against]
elif weth in (token_in, token_out):
path = [token_in, token_out]
elif paired_against == sushi and token_out != sushi:
path = [token_in,sushi,weth,token_out]
elif str(token_in) in SPECIAL_PATHS[router].keys() and str(token_out) in STABLECOINS:
path = SPECIAL_PATHS[router][str(token_in)]
elif chain.id == 56: #bsc
from .constants import cake, wbnb
if wbnb in (token_in, token_out):
path = [token_in, token_out]
elif cake in (token_in, token_out):
path = [token_in, token_out]
else:
path = [token_in,wbnb,token_out]
elif chain.id == 137: #bsc
from .constants import wmatic
if wmatic in (token_in, token_out):
path = [token_in, token_out]
else:
path = [token_in,wmatic,token_out]
else:
path = [token_in, weth, token_out]
fees = 0.997 ** (len(path) - 1)
if router in ROUTERS:
router = ROUTERS[router]
try:
quote = router.getAmountsOut(amount_in, path, block_identifier=block)
amount_out = quote[-1] / 10 ** ypricemagic.utils.utils.get_decimals_with_override(str(path[-1]))
return amount_out / fees
except ValueError as e:
return
@ttl_cache(ttl=600)
def get_price_v1(asset, block=None):
factory = Contract("0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95")
try:
exchange = Contract(factory.getExchange(asset))
eth_bought = exchange.getTokenToEthInputPrice(10 ** ypricemagic.utils.utils.get_decimals_with_override(asset), block_identifier=block)
exchange = Contract(factory.getExchange(usdc))
usdc_bought = exchange.getEthToTokenInputPrice(eth_bought, block_identifier=block) / 1e6
fees = 0.997 ** 2
return usdc_bought / fees
except (ContractNotFound, ValueError) as e:
pass
@memory.cache()
def is_uniswap_pool(address):
try:
return Contract(address).factory() in FACTORY_TO_ROUTER
except (ValueError, OverflowError, AttributeError):
pass
return False
@ttl_cache(ttl=600)
def lp_price(address, block=None):
""" Get Uniswap/Sushiswap LP token price. """
def extrapolate_balance_if_needed():
nonlocal balances
if balances[0] and not balances[1]:
balances[1] = balances[0]
if balances[1] and not balances[0]:
balances[0] = balances[1]
return balances
pair = Contract(address)
if chain.id not in [56, 137]: # No multicall2 on bsc or poly
factory, token0, token1, supply, reserves = fetch_multicall(
[pair, "factory"],
[pair, "token0"],
[pair, "token1"],
[pair, "totalSupply"],
[pair, "getReserves"],
block=block
)
else:
factory = pair.factory(block_identifier = block)
token0 = pair.token0(block_identifier = block)
token1 = pair.token1(block_identifier = block)
supply = pair.totalSupply(block_identifier = block)
reserves = pair.getReserves(block_identifier = block)
router = FACTORY_TO_PROTOCOL[factory]
tokens = [ypricemagic.utils.utils.Contract_with_erc20_fallback(token) for token in [token0, token1]]
price0 = get_price(tokens[0], paired_against=tokens[1], router=router, block=block)
price1 = get_price(tokens[1], paired_against=tokens[0], router=router, block=block)
prices = [price0,price1]
scales = [10 ** ypricemagic.utils.utils.get_decimals_with_override(str(token)) for token in tokens]
supply = supply / 1e18
try:
balances = [res / scale * price for res, scale, price in zip(reserves, scales, prices)]
except TypeError as e: # If can't get price via router, try to get from elsewhere
if not price0:
try:
price0 = ypricemagic.magic.get_price(tokens[0], block)
except ypricemagic.magic.PriceError:
price0 is None
if not price1:
try:
price1 = ypricemagic.magic.get_price(tokens[1], block)
except ypricemagic.magic.PriceError:
price1 is None
prices = [price0,price1]
balances = [None,None] # [res / scale * price for res, scale, price in zip(reserves, scales, prices)]
if price0:
balances[0] = reserves[0] / scales[0] * price0
if price1:
balances[1] = reserves[1] / scales[1] * price1
balances = extrapolate_balance_if_needed()
try:
return sum(balances) / supply
except TypeError:
return | [((3820, 3840), 'cachetools.func.ttl_cache', 'ttl_cache', ([], {'ttl': '(36000)'}), '(ttl=36000)\n', (3829, 3840), False, 'from cachetools.func import ttl_cache\n'), ((5836, 5854), 'cachetools.func.ttl_cache', 'ttl_cache', ([], {'ttl': '(600)'}), '(ttl=600)\n', (5845, 5854), False, 'from cachetools.func import ttl_cache\n'), ((6652, 6670), 'cachetools.func.ttl_cache', 'ttl_cache', ([], {'ttl': '(600)'}), '(ttl=600)\n', (6661, 6670), False, 'from cachetools.func import ttl_cache\n'), ((5906, 5960), 'brownie.Contract', 'Contract', (['"""0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95"""'], {}), "('0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95')\n", (5914, 5960), False, 'from brownie import Contract, chain\n'), ((7024, 7041), 'brownie.Contract', 'Contract', (['address'], {}), '(address)\n', (7032, 7041), False, 'from brownie import Contract, chain\n'), ((845, 899), 'brownie.Contract', 'Contract', (['"""0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"""'], {}), "('0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D')\n", (853, 899), False, 'from brownie import Contract, chain\n'), ((922, 976), 'brownie.Contract', 'Contract', (['"""0xD9E1CE17F2641F24AE83637AB66A2CCA9C378B9F"""'], {}), "('0xD9E1CE17F2641F24AE83637AB66A2CCA9C378B9F')\n", (930, 976), False, 'from brownie import Contract, chain\n'), ((4180, 4234), 'brownie.Contract', 'Contract', (['"""0xe9e7CEA3DedcA5984780Bafc599bD69ADd087D56"""'], {}), "('0xe9e7CEA3DedcA5984780Bafc599bD69ADd087D56')\n", (4188, 4234), False, 'from brownie import Contract, chain\n'), ((2987, 3041), 'brownie.Contract', 'Contract', (['"""0x10ED43C718714eb63d5aA57B78B54704E256024E"""'], {}), "('0x10ED43C718714eb63d5aA57B78B54704E256024E')\n", (2995, 3041), False, 'from brownie import Contract, chain\n'), ((3068, 3122), 'brownie.Contract', 'Contract', (['"""0x05fF2B0DB69458A0750badebc4f9e13aDd608C7F"""'], {}), "('0x05fF2B0DB69458A0750badebc4f9e13aDd608C7F')\n", (3076, 3122), False, 'from brownie import Contract, chain\n'), ((3458, 3512), 'brownie.Contract', 'Contract', (['"""0xa5E0829CaCEd8fFDD4De3c43696c57F7D7A678ff"""'], {}), "('0xa5E0829CaCEd8fFDD4De3c43696c57F7D7A678ff')\n", (3466, 3512), False, 'from brownie import Contract, chain\n'), ((6514, 6531), 'brownie.Contract', 'Contract', (['address'], {}), '(address)\n', (6522, 6531), False, 'from brownie import Contract, chain\n')] |
haodingkui/semeval2020-task5-subtask1 | configs/configuration_textrnn.py | bfd0c808c6b1de910d6f58ea040a13442b4bcdca | """ TextRNN model configuration """
class TextRNNConfig(object):
def __init__(
self,
vocab_size=30000,
pretrained_embedding=None,
embedding_matrix=None,
embedding_dim=300,
embedding_dropout=0.3,
lstm_hidden_size=128,
output_dim=1,
**kwargs
):
self.pretrained_embedding = pretrained_embedding
self.embedding_matrix = embedding_matrix
self.embedding_dim = embedding_dim
self.embedding_dropout = embedding_dropout
self.lstm_hidden_size = lstm_hidden_size
self.output_dim = output_dim
| [] |
akorzunin/telegram_auction_bot | settings/debug_members.py | d4d5042614ea11f8085815d8f9fb8b6fbebcfab0 | DEBUG_MEMBER_LIST = [
503131177,
] | [] |
JiazeWang/SP-GAN | metrics/pointops/pointops_util.py | 455003f78b1160ebe0a2056005b069808c0df35b | from typing import Tuple
import torch
from torch.autograd import Function
import torch.nn as nn
from metrics.pointops import pointops_cuda
import numpy as np
class FurthestSampling(Function):
@staticmethod
def forward(ctx, xyz, m):
"""
input: xyz: (b, n, 3) and n > m, m: int32
output: idx: (b, m)
"""
assert xyz.is_contiguous()
b, n, _ = xyz.size()
idx = torch.cuda.IntTensor(b, m)
temp = torch.cuda.FloatTensor(b, n).fill_(1e10)
pointops_cuda.furthestsampling_cuda(b, n, m, xyz, temp, idx)
return idx
@staticmethod
def backward(xyz, a=None):
return None, None
furthestsampling = FurthestSampling.apply
class Gathering(Function):
@staticmethod
def forward(ctx, features, idx):
"""
input: features: (b, c, n), idx : (b, m) tensor
output: (b, c, m)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
m = idx.size(1)
output = torch.cuda.FloatTensor(b, c, m)
pointops_cuda.gathering_forward_cuda(b, c, n, m, features, idx, output)
ctx.for_backwards = (idx, c, n)
return output
@staticmethod
def backward(ctx, grad_out):
idx, c, n = ctx.for_backwards
b, m = idx.size()
grad_features = torch.cuda.FloatTensor(b, c, n).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.gathering_backward_cuda(b, c, n, m, grad_out_data, idx, grad_features.data)
return grad_features, None
gathering = Gathering.apply
class NearestNeighbor(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
input: unknown: (b, n, 3), known: (b, m, 3)
output: dist2: (b, n, 3) l2 distance to the three nearest neighbors
idx: (b, n, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
b, n, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(b, n, 3)
idx = torch.cuda.IntTensor(b, n, 3)
pointops_cuda.nearestneighbor_cuda(b, n, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
nearestneighbor = NearestNeighbor.apply
class Interpolation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
input: features: (b, c, m) features descriptors to be interpolated from
idx: (b, n, 3) three nearest neighbors of the target features in features
weight: (b, n, 3) weights
output: (b, c, n) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
b, c, m = features.size()
n = idx.size(1)
ctx.interpolation_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(b, c, n)
pointops_cuda.interpolation_forward_cuda(b, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
input: grad_out: (b, c, n)
output: grad_features: (b, c, m), None, None
"""
idx, weight, m = ctx.interpolation_for_backward
b, c, n = grad_out.size()
grad_features = torch.cuda.FloatTensor(b, c, m).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.interpolation_backward_cuda(b, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
interpolation = Interpolation.apply
class Grouping(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with
output: (b, c, m, nsample)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
_, m, nsample = idx.size()
output = torch.cuda.FloatTensor(b, c, m, nsample)
pointops_cuda.grouping_forward_cuda(b, c, n, m, nsample, features, idx, output)
ctx.for_backwards = (idx, n)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
input: grad_out: (b, c, m, nsample)
output: (b, c, n), None
"""
idx, n = ctx.for_backwards
b, c, m, nsample = grad_out.size()
grad_features = torch.cuda.FloatTensor(b, c, n).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.grouping_backward_cuda(b, c, n, m, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping = Grouping.apply
class GroupingInt(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with
output: (b, c, m, nsample)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
_, m, nsample = idx.size()
output = torch.cuda.LongTensor(b, c, m, nsample)
pointops_cuda.grouping_int_forward_cuda(b, c, n, m, nsample, features, idx, output)
return output
@staticmethod
def backward(ctx, a=None):
return None, None
grouping_int = GroupingInt.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
input: radius: float, radius of the balls
nsample: int, maximum number of features in the balls
xyz: torch.Tensor, (b, n, 3) xyz coordinates of the features
new_xyz: torch.Tensor, (b, m, 3) centers of the ball query
output: (b, m, nsample) tensor with the indicies of the features that form the query balls
"""
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
b, n, _ = xyz.size()
m = new_xyz.size(1)
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
pointops_cuda.ballquery_cuda(b, n, m, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ballquery = BallQuery.apply
class FeatureDistribute(Function):
@staticmethod
def forward(ctx, max_xyz: torch.Tensor, xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param max_xyz: (b, n, 3)
:param xyz: (b, m, 3)
:return: distribute_idx: (b, m)
"""
assert max_xyz.is_contiguous()
assert xyz.is_contiguous()
b, n, _ = max_xyz.size()
m = xyz.size(1)
distribute_idx = torch.cuda.IntTensor(b, m).zero_()
pointops_cuda.featuredistribute_cuda(b, n, m, max_xyz, xyz, distribute_idx)
return distribute_idx
@staticmethod
def backward(ctx, a=None):
return None, None
featuredistribute = FeatureDistribute.apply
class FeatureGather(Function):
@staticmethod
def forward(ctx, max_feature: torch.Tensor, distribute_idx: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param max_feature: (b, c, n)
:param distribute_idx: (b, m)
:return: distribute_feature: (b, c, m)
'''
assert max_feature.is_contiguous()
assert distribute_idx.is_contiguous()
b, c, n = max_feature.size()
m = distribute_idx.size(1)
distribute_feature = torch.cuda.FloatTensor(b, c, m).zero_()
pointops_cuda.featuregather_forward_cuda(b, n, m, c, max_feature, distribute_idx, distribute_feature)
ctx.for_backwards = (distribute_idx, n)
return distribute_feature
@staticmethod
def backward(ctx, grad_distribute_feature: torch.Tensor):
'''
:param ctx:
:param grad_distribute_feature: (b, c, m)
:return: grad_max_feature: (b, c, n), None
'''
distribute_idx, n = ctx.for_backwards
b, c, m = grad_distribute_feature.size()
grad_max_feature = torch.cuda.FloatTensor(b, c, n).zero_()
grad_distribute_feature_data = grad_distribute_feature.data.contiguous()
pointops_cuda.featuregather_backward_cuda(b, n, m, c, grad_distribute_feature_data, distribute_idx, grad_max_feature.data)
return grad_max_feature, None
featuregather = FeatureGather.apply
class LabelStatBallRange(Function):
@staticmethod
def forward(ctx, radius: float, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param radius:
:param xyz: (b, n, 3)
:param new_xyz: (b, m, 3)
:param label_stat: (b, n, nclass)
:return: new_label_stat: (b, m, nclass)
'''
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
assert label_stat.is_contiguous()
b, n, nclass = label_stat.size()
m = new_xyz.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
pointops_cuda.labelstat_ballrange_cuda(b, n, m, radius, nclass, new_xyz, xyz, label_stat, new_label_stat)
return new_label_stat
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
labelstat_ballrange = LabelStatBallRange.apply
class LabelStatIdx(Function):
@staticmethod
def forward(ctx, nsample: int, label_stat: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param nsample:
:param label_stat: (b, n, nclass)
:param idx: (b, m, nsample)
:return: new_label_stat: (b, m, nclass)
'''
assert label_stat.is_contiguous()
assert idx.is_contiguous()
b, n, nclass = label_stat.size()
m = idx.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
pointops_cuda.labelstat_idx_cuda(b, n, m, nsample, nclass, label_stat, idx, new_label_stat)
return new_label_stat
@staticmethod
def backward(ctx, a=None):
return None, None, None
labelstat_idx = LabelStatIdx.apply
class LabelStatAndBallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor):
'''
:param ctx:
:param radius:
:param nsample:
:param xyz: (b, n, 3)
:param new_xyz: (b, m, 3)
:param label_stat: (b, n, nclass)
:return: new_label_stat: (b, m, nclass) idx: (b, m, nsample)
'''
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
assert label_stat.is_contiguous()
b, n, nclass = label_stat.size()
m = new_xyz.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
pointops_cuda.labelstat_and_ballquery_cuda(b, n, m, radius, nsample, nclass, new_xyz, xyz, label_stat, idx, new_label_stat)
return new_label_stat, idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None, None, None, None
labelstat_and_ballquery = LabelStatAndBallQuery.apply
def pairwise_distances(x, y=None):
'''
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
import numpy as np
return torch.clamp(dist, 0.0, np.inf)
class KNNQueryNaive(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: idx: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
b, m, _ = new_xyz.size()
n = xyz.size(1)
'''
idx = torch.zeros(b, m, nsample).int().cuda()
for i in range(b):
dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :])
[_, idxs] = torch.sort(dist, dim=1)
idx[i, :, :] = idxs[:, 0:nsample]
'''
# '''
# new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3)
# xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3)
# dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n)
dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n)
[_, idxs] = torch.sort(dist, dim=2)
idx = idxs[:, :, 0:nsample].int()
# '''
return idx
@staticmethod
def backward(ctx):
return None, None, None
knnquery_naive = KNNQueryNaive.apply
class KNNQuery(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: idx: (b, m, nsample)
( dist2: (b, m, nsample) )
"""
if new_xyz is None:
new_xyz = xyz
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
b, m, _ = new_xyz.size()
n = xyz.size(1)
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
dist2 = torch.cuda.FloatTensor(b, m, nsample).zero_()
pointops_cuda.knnquery_cuda(b, n, m, nsample, xyz, new_xyz, idx, dist2)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None
knnquery = KNNQuery.apply
class KNNQueryExclude(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: new_features: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
b, m, _ = new_xyz.size()
n = xyz.size(1)
'''
idx = torch.zeros(b, m, nsample).int().cuda()
for i in range(b):
dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :])
[_, idxs] = torch.sort(dist, dim=1)
idx[i, :, :] = idxs[:, 0:nsample]
'''
# '''
# new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3)
# xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3)
# dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n)
dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n)
[_, idxs] = torch.sort(dist, dim=2)
idx = idxs[:, :, 1:nsample+1].int()
# '''
return idx
@staticmethod
def backward(ctx):
return None, None, None
knnquery_exclude = KNNQueryExclude.apply
class Le_QueryAndGroup_SameSize(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup_SameSize, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, n, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
assert xyz.size() == new_xyz.size()
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return grouped_xyz, new_features
class QueryAndGroup(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class QueryAndGroup_Dilate(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(QueryAndGroup_Dilate, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, 2*self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(2*self.nsample, xyz, new_xyz) # (b, m, nsample)
idx2 = np.array([i for i in range(2*self.nsample)])
np.random.shuffle(idx2)
idx2 = idx2[:self.nsample]
idx = idx[:, :, idx2]
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class Le_QueryAndGroup(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return grouped_xyz, new_features
class Gen_QueryAndGroupXYZ(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Gen_QueryAndGroupXYZ, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
#def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
#if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous() # BxNx3 -> Bx3xN
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
return grouped_xyz
class Le_QueryAndGroup_OnlyFeature(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup_OnlyFeature, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
#xyz_trans = xyz.transpose(1, 2).contiguous()
#grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
#grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
"""
Groups all features
"""
def __init__(self, use_xyz: bool = True):
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: ignored torch
features: (b, c, n) descriptors of the features
output: new_features: (b, c+3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, 1, n)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
| [((12487, 12517), 'torch.clamp', 'torch.clamp', (['dist', '(0.0)', 'np.inf'], {}), '(dist, 0.0, np.inf)\n', (12498, 12517), False, 'import torch\n'), ((425, 451), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm'], {}), '(b, m)\n', (445, 451), False, 'import torch\n'), ((516, 576), 'metrics.pointops.pointops_cuda.furthestsampling_cuda', 'pointops_cuda.furthestsampling_cuda', (['b', 'n', 'm', 'xyz', 'temp', 'idx'], {}), '(b, n, m, xyz, temp, idx)\n', (551, 576), False, 'from metrics.pointops import pointops_cuda\n'), ((1055, 1086), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'm'], {}), '(b, c, m)\n', (1077, 1086), False, 'import torch\n'), ((1095, 1166), 'metrics.pointops.pointops_cuda.gathering_forward_cuda', 'pointops_cuda.gathering_forward_cuda', (['b', 'c', 'n', 'm', 'features', 'idx', 'output'], {}), '(b, c, n, m, features, idx, output)\n', (1131, 1166), False, 'from metrics.pointops import pointops_cuda\n'), ((1468, 1561), 'metrics.pointops.pointops_cuda.gathering_backward_cuda', 'pointops_cuda.gathering_backward_cuda', (['b', 'c', 'n', 'm', 'grad_out_data', 'idx', 'grad_features.data'], {}), '(b, c, n, m, grad_out_data, idx,\n grad_features.data)\n', (1505, 1561), False, 'from metrics.pointops import pointops_cuda\n'), ((2202, 2233), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'n', '(3)'], {}), '(b, n, 3)\n', (2224, 2233), False, 'import torch\n'), ((2248, 2277), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'n', '(3)'], {}), '(b, n, 3)\n', (2268, 2277), False, 'import torch\n'), ((2286, 2357), 'metrics.pointops.pointops_cuda.nearestneighbor_cuda', 'pointops_cuda.nearestneighbor_cuda', (['b', 'n', 'm', 'unknown', 'known', 'dist2', 'idx'], {}), '(b, n, m, unknown, known, dist2, idx)\n', (2320, 2357), False, 'from metrics.pointops import pointops_cuda\n'), ((3276, 3307), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'n'], {}), '(b, c, n)\n', (3298, 3307), False, 'import torch\n'), ((3316, 3403), 'metrics.pointops.pointops_cuda.interpolation_forward_cuda', 'pointops_cuda.interpolation_forward_cuda', (['b', 'c', 'm', 'n', 'features', 'idx', 'weight', 'output'], {}), '(b, c, m, n, features, idx, weight,\n output)\n', (3356, 3403), False, 'from metrics.pointops import pointops_cuda\n'), ((3864, 3969), 'metrics.pointops.pointops_cuda.interpolation_backward_cuda', 'pointops_cuda.interpolation_backward_cuda', (['b', 'c', 'n', 'm', 'grad_out_data', 'idx', 'weight', 'grad_features.data'], {}), '(b, c, n, m, grad_out_data, idx,\n weight, grad_features.data)\n', (3905, 3969), False, 'from metrics.pointops import pointops_cuda\n'), ((4499, 4539), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'm', 'nsample'], {}), '(b, c, m, nsample)\n', (4521, 4539), False, 'import torch\n'), ((4548, 4627), 'metrics.pointops.pointops_cuda.grouping_forward_cuda', 'pointops_cuda.grouping_forward_cuda', (['b', 'c', 'n', 'm', 'nsample', 'features', 'idx', 'output'], {}), '(b, c, n, m, nsample, features, idx, output)\n', (4583, 4627), False, 'from metrics.pointops import pointops_cuda\n'), ((5091, 5192), 'metrics.pointops.pointops_cuda.grouping_backward_cuda', 'pointops_cuda.grouping_backward_cuda', (['b', 'c', 'n', 'm', 'nsample', 'grad_out_data', 'idx', 'grad_features.data'], {}), '(b, c, n, m, nsample, grad_out_data,\n idx, grad_features.data)\n', (5127, 5192), False, 'from metrics.pointops import pointops_cuda\n'), ((5709, 5748), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['b', 'c', 'm', 'nsample'], {}), '(b, c, m, nsample)\n', (5730, 5748), False, 'import torch\n'), ((5757, 5844), 'metrics.pointops.pointops_cuda.grouping_int_forward_cuda', 'pointops_cuda.grouping_int_forward_cuda', (['b', 'c', 'n', 'm', 'nsample', 'features', 'idx', 'output'], {}), '(b, c, n, m, nsample, features, idx,\n output)\n', (5796, 5844), False, 'from metrics.pointops import pointops_cuda\n'), ((6718, 6791), 'metrics.pointops.pointops_cuda.ballquery_cuda', 'pointops_cuda.ballquery_cuda', (['b', 'n', 'm', 'radius', 'nsample', 'new_xyz', 'xyz', 'idx'], {}), '(b, n, m, radius, nsample, new_xyz, xyz, idx)\n', (6746, 6791), False, 'from metrics.pointops import pointops_cuda\n'), ((7410, 7485), 'metrics.pointops.pointops_cuda.featuredistribute_cuda', 'pointops_cuda.featuredistribute_cuda', (['b', 'n', 'm', 'max_xyz', 'xyz', 'distribute_idx'], {}), '(b, n, m, max_xyz, xyz, distribute_idx)\n', (7446, 7485), False, 'from metrics.pointops import pointops_cuda\n'), ((8188, 8293), 'metrics.pointops.pointops_cuda.featuregather_forward_cuda', 'pointops_cuda.featuregather_forward_cuda', (['b', 'n', 'm', 'c', 'max_feature', 'distribute_idx', 'distribute_feature'], {}), '(b, n, m, c, max_feature,\n distribute_idx, distribute_feature)\n', (8228, 8293), False, 'from metrics.pointops import pointops_cuda\n'), ((8852, 8978), 'metrics.pointops.pointops_cuda.featuregather_backward_cuda', 'pointops_cuda.featuregather_backward_cuda', (['b', 'n', 'm', 'c', 'grad_distribute_feature_data', 'distribute_idx', 'grad_max_feature.data'], {}), '(b, n, m, c,\n grad_distribute_feature_data, distribute_idx, grad_max_feature.data)\n', (8893, 8978), False, 'from metrics.pointops import pointops_cuda\n'), ((9710, 9819), 'metrics.pointops.pointops_cuda.labelstat_ballrange_cuda', 'pointops_cuda.labelstat_ballrange_cuda', (['b', 'n', 'm', 'radius', 'nclass', 'new_xyz', 'xyz', 'label_stat', 'new_label_stat'], {}), '(b, n, m, radius, nclass, new_xyz,\n xyz, label_stat, new_label_stat)\n', (9748, 9819), False, 'from metrics.pointops import pointops_cuda\n'), ((10543, 10638), 'metrics.pointops.pointops_cuda.labelstat_idx_cuda', 'pointops_cuda.labelstat_idx_cuda', (['b', 'n', 'm', 'nsample', 'nclass', 'label_stat', 'idx', 'new_label_stat'], {}), '(b, n, m, nsample, nclass, label_stat, idx,\n new_label_stat)\n', (10575, 10638), False, 'from metrics.pointops import pointops_cuda\n'), ((11550, 11677), 'metrics.pointops.pointops_cuda.labelstat_and_ballquery_cuda', 'pointops_cuda.labelstat_and_ballquery_cuda', (['b', 'n', 'm', 'radius', 'nsample', 'nclass', 'new_xyz', 'xyz', 'label_stat', 'idx', 'new_label_stat'], {}), '(b, n, m, radius, nsample, nclass,\n new_xyz, xyz, label_stat, idx, new_label_stat)\n', (11592, 11677), False, 'from metrics.pointops import pointops_cuda\n'), ((12246, 12270), 'torch.transpose', 'torch.transpose', (['y', '(0)', '(1)'], {}), '(y, 0, 1)\n', (12261, 12270), False, 'import torch\n'), ((12340, 12364), 'torch.transpose', 'torch.transpose', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (12355, 12364), False, 'import torch\n'), ((13671, 13694), 'torch.sort', 'torch.sort', (['dist'], {'dim': '(2)'}), '(dist, dim=2)\n', (13681, 13694), False, 'import torch\n'), ((14633, 14704), 'metrics.pointops.pointops_cuda.knnquery_cuda', 'pointops_cuda.knnquery_cuda', (['b', 'n', 'm', 'nsample', 'xyz', 'new_xyz', 'idx', 'dist2'], {}), '(b, n, m, nsample, xyz, new_xyz, idx, dist2)\n', (14660, 14704), False, 'from metrics.pointops import pointops_cuda\n'), ((15997, 16020), 'torch.sort', 'torch.sort', (['dist'], {'dim': '(2)'}), '(dist, dim=2)\n', (16007, 16020), False, 'import torch\n'), ((21732, 21755), 'numpy.random.shuffle', 'np.random.shuffle', (['idx2'], {}), '(idx2)\n', (21749, 21755), True, 'import numpy as np\n'), ((2373, 2390), 'torch.sqrt', 'torch.sqrt', (['dist2'], {}), '(dist2)\n', (2383, 2390), False, 'import torch\n'), ((12436, 12452), 'torch.mm', 'torch.mm', (['x', 'y_t'], {}), '(x, y_t)\n', (12444, 12452), False, 'import torch\n'), ((467, 495), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'n'], {}), '(b, n)\n', (489, 495), False, 'import torch\n'), ((1369, 1400), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'n'], {}), '(b, c, n)\n', (1391, 1400), False, 'import torch\n'), ((3765, 3796), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'm'], {}), '(b, c, m)\n', (3787, 3796), False, 'import torch\n'), ((4992, 5023), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'n'], {}), '(b, c, n)\n', (5014, 5023), False, 'import torch\n'), ((6666, 6701), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nsample'], {}), '(b, m, nsample)\n', (6686, 6701), False, 'import torch\n'), ((7367, 7393), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm'], {}), '(b, m)\n', (7387, 7393), False, 'import torch\n'), ((8140, 8171), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'm'], {}), '(b, c, m)\n', (8162, 8171), False, 'import torch\n'), ((8723, 8754), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'n'], {}), '(b, c, n)\n', (8745, 8754), False, 'import torch\n'), ((9659, 9693), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nclass'], {}), '(b, m, nclass)\n', (9679, 9693), False, 'import torch\n'), ((10492, 10526), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nclass'], {}), '(b, m, nclass)\n', (10512, 10526), False, 'import torch\n'), ((11440, 11474), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nclass'], {}), '(b, m, nclass)\n', (11460, 11474), False, 'import torch\n'), ((11497, 11532), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nsample'], {}), '(b, m, nsample)\n', (11517, 11532), False, 'import torch\n'), ((14519, 14554), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nsample'], {}), '(b, m, nsample)\n', (14539, 14554), False, 'import torch\n'), ((14579, 14616), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'm', 'nsample'], {}), '(b, m, nsample)\n', (14601, 14616), False, 'import torch\n'), ((20079, 20128), 'torch.cat', 'torch.cat', (['[grouped_xyz, grouped_features]'], {'dim': '(1)'}), '([grouped_xyz, grouped_features], dim=1)\n', (20088, 20128), False, 'import torch\n'), ((22265, 22314), 'torch.cat', 'torch.cat', (['[grouped_xyz, grouped_features]'], {'dim': '(1)'}), '([grouped_xyz, grouped_features], dim=1)\n', (22274, 22314), False, 'import torch\n'), ((29001, 29050), 'torch.cat', 'torch.cat', (['[grouped_xyz, grouped_features]'], {'dim': '(1)'}), '([grouped_xyz, grouped_features], dim=1)\n', (29010, 29050), False, 'import torch\n')] |
rickdg/vivi | core/src/zeit/cms/content/caching.py | 16134ac954bf8425646d4ad47bdd1f372e089355 | from collections import defaultdict
from logging import getLogger
from operator import itemgetter
from os import environ
from time import time
from zope.cachedescriptors.property import Lazy as cachedproperty
from zeit.cms.content.sources import FEATURE_TOGGLES
from zope.component import getUtility
from zeit.connector.interfaces import IConnector
from zeit.connector.filesystem import Connector
log = getLogger(__name__)
class ContentCache(object):
@cachedproperty
def cache(self):
size = environ.get('CONTENT_CACHE_SIZE')
check = environ.get('CONTENT_CACHE_CHECK')
connector = getUtility(IConnector)
if size is not None and type(connector) is Connector:
self.size = int(size)
self.check = int(check) if check is not None else self.size / 5
self.connector = connector
self.cache = defaultdict(lambda: dict(used=0, mtimes={}, data={}))
self.hits = self.misses = 0
log.info('initialized content cache (size %s)', size)
return self.cache
else:
return None
def get(self, unique_id, key, factory, suffix=''):
cache = self.cache
if cache is None or not FEATURE_TOGGLES.find('content_caching'):
return factory()
try:
mtime = int(self.connector.mtime(unique_id, suffix))
except (ValueError, TypeError):
mtime = None
if mtime is None:
return factory()
obj = cache[unique_id]
obj['used'] += 1
obj['last'] = time()
if mtime != obj['mtimes'].get(suffix):
obj['data'].clear()
obj['mtimes'][suffix] = mtime
cache = obj['data']
if key not in cache:
cache[key] = factory()
self.misses += 1
log.debug('added %s (%s)', key, mtime)
if self.misses % self.check == 0:
self.cleanup()
else:
self.hits += 1
return cache[key]
def cleanup(self):
cache = self.cache
over = len(cache) - self.size
log.info('size: %d/%d, hits: %d, misses: %d',
over + self.size, self.size, self.hits, self.misses)
if over > 0:
log.debug('removing %d items', over)
last = sorted((cache[uid]['last'], uid) for uid in cache)
for _, (_, uid) in zip(range(over), last):
del cache[uid]
@property
def usage(self):
cache = self.cache
stats = (dict(uid=uid, used=cache[uid]['used']) for uid in cache)
return sorted(stats, key=itemgetter('used'))
def info(self):
cache = self.cache
usage = {info['uid']: info['used'] for info in reversed(self.usage)}
return dict(
size=self.size,
count=len(cache),
hits=self.hits,
misses=self.misses,
usage=usage)
__cache = ContentCache()
get = __cache.get
info = __cache.info
| [((405, 424), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (414, 424), False, 'from logging import getLogger\n'), ((512, 545), 'os.environ.get', 'environ.get', (['"""CONTENT_CACHE_SIZE"""'], {}), "('CONTENT_CACHE_SIZE')\n", (523, 545), False, 'from os import environ\n'), ((562, 596), 'os.environ.get', 'environ.get', (['"""CONTENT_CACHE_CHECK"""'], {}), "('CONTENT_CACHE_CHECK')\n", (573, 596), False, 'from os import environ\n'), ((617, 639), 'zope.component.getUtility', 'getUtility', (['IConnector'], {}), '(IConnector)\n', (627, 639), False, 'from zope.component import getUtility\n'), ((1565, 1571), 'time.time', 'time', ([], {}), '()\n', (1569, 1571), False, 'from time import time\n'), ((1219, 1258), 'zeit.cms.content.sources.FEATURE_TOGGLES.find', 'FEATURE_TOGGLES.find', (['"""content_caching"""'], {}), "('content_caching')\n", (1239, 1258), False, 'from zeit.cms.content.sources import FEATURE_TOGGLES\n'), ((2618, 2636), 'operator.itemgetter', 'itemgetter', (['"""used"""'], {}), "('used')\n", (2628, 2636), False, 'from operator import itemgetter\n')] |
genialis/genesis-genapi | genesis/project.py | dfe9bcc8b332a8b9873db4ab9994b0cc10eb209a | """Project"""
from __future__ import absolute_import, division, print_function, unicode_literals
class GenProject(object):
"""Genesais project annotation."""
def __init__(self, data, gencloud):
for field in data:
setattr(self, field, data[field])
self.gencloud = gencloud
self.id = getattr(self, 'id', None) # pylint: disable=invalid-name
self.name = getattr(self, 'name', None)
def data_types(self):
"""Return a list of data types."""
data = self.gencloud.project_data(self.id)
return sorted(set(d.type for d in data))
def data(self, **query):
"""Query for Data object annotation."""
data = self.gencloud.project_data(self.id)
query['case_ids__contains'] = self.id
ids = set(d['id'] for d in self.gencloud.api.dataid.get(**query)['objects'])
return [d for d in data if d.id in ids]
def find(self, filter_str):
"""Filter Data object annotation."""
raise NotImplementedError()
def __str__(self):
return self.name or 'n/a'
def __repr__(self):
return u"GenProject: {} - {}".format(self.id, self.name)
| [] |
Subsets and Splits