filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_14363
|
# -*- coding: utf-8 -*-
"""Utility function for estimator testing.
copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""
__author__ = ["mloning", "fkiraly"]
from inspect import isclass, signature
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_random_state
from sktime.alignment.base import BaseAligner
from sktime.annotation.base import BaseSeriesAnnotator
from sktime.classification.base import BaseClassifier
from sktime.clustering.base import BaseClusterer
from sktime.datatypes._panel._check import is_nested_dataframe
from sktime.dists_kernels import BasePairwiseTransformer, BasePairwiseTransformerPanel
from sktime.forecasting.base import BaseForecaster
from sktime.regression.base import BaseRegressor
from sktime.tests._config import VALID_ESTIMATOR_TYPES
from sktime.transformations.base import (
BaseTransformer,
_PanelToPanelTransformer,
_PanelToTabularTransformer,
_SeriesToPrimitivesTransformer,
_SeriesToSeriesTransformer,
)
from sktime.utils._testing.annotation import make_annotation_problem
from sktime.utils._testing.forecasting import (
_get_n_columns,
_make_series,
make_forecasting_problem,
)
from sktime.utils._testing.panel import (
_make_panel_X,
make_classification_problem,
make_clustering_problem,
make_regression_problem,
)
def _get_err_msg(estimator):
return (
f"Invalid estimator type: {type(estimator)}. Valid estimator types are: "
f"{VALID_ESTIMATOR_TYPES}"
)
def _list_required_methods(estimator):
"""Return list of required method names (beyond BaseEstimator ones)."""
# all BaseEstimator children must implement these
MUST_HAVE_FOR_ESTIMATORS = [
"fit",
"check_is_fitted",
"is_fitted", # read-only property
"set_params",
"get_params",
]
# prediction/forecasting base classes that must have predict
BASE_CLASSES_THAT_MUST_HAVE_PREDICT = (
BaseClusterer,
BaseRegressor,
BaseForecaster,
)
# transformation base classes that must have transform
BASE_CLASSES_THAT_MUST_HAVE_TRANSFORM = (
BaseTransformer,
BasePairwiseTransformer,
BasePairwiseTransformerPanel,
)
required_methods = []
if isinstance(estimator, BaseEstimator):
required_methods += MUST_HAVE_FOR_ESTIMATORS
if isinstance(estimator, BASE_CLASSES_THAT_MUST_HAVE_PREDICT):
required_methods += ["predict"]
if isinstance(estimator, BASE_CLASSES_THAT_MUST_HAVE_TRANSFORM):
required_methods += ["transform"]
if isinstance(estimator, BaseAligner):
required_methods += [
"get_alignment",
"get_alignment_loc",
"get_aligned",
"get_distance",
"get_distance_matrix",
]
return required_methods
def _make_args(estimator, method, **kwargs):
"""Generate testing arguments for estimator methods."""
if method == "fit":
return _make_fit_args(estimator, **kwargs)
if method == "update":
raise NotImplementedError()
elif method in ("predict", "predict_proba", "decision_function"):
return _make_predict_args(estimator, **kwargs)
elif method == "transform":
return _make_transform_args(estimator, **kwargs)
elif method == "inverse_transform":
return _make_inverse_transform_args(estimator, **kwargs)
else:
raise ValueError(f"Method: {method} not supported")
def _make_fit_args(estimator, **kwargs):
if isinstance(estimator, BaseForecaster):
# we need to handle the TransformedTargetForecaster separately
if isinstance(estimator, _SeriesToSeriesTransformer):
y = _make_series(**kwargs)
else:
# create matching n_columns input, if n_columns not passed
# e.g., to give bivariate y to strictly multivariate forecaster
if "n_columns" not in kwargs.keys():
n_columns = _get_n_columns(
estimator.get_tag(tag_name="scitype:y", raise_error=False)
)[0]
y = make_forecasting_problem(n_columns=n_columns, **kwargs)
else:
y = make_forecasting_problem(**kwargs)
fh = 1
X = None
return y, X, fh
elif isinstance(estimator, BaseSeriesAnnotator):
X = make_annotation_problem(**kwargs)
return (X,)
elif isinstance(estimator, BaseClassifier):
return make_classification_problem(**kwargs)
elif isinstance(estimator, BaseRegressor):
return make_regression_problem(**kwargs)
elif isinstance(
estimator, (_SeriesToPrimitivesTransformer, _SeriesToSeriesTransformer)
):
X = _make_series(**kwargs)
return (X,)
elif isinstance(estimator, (_PanelToTabularTransformer, _PanelToPanelTransformer)):
return make_classification_problem(**kwargs)
elif isinstance(estimator, BaseTransformer) and estimator.get_tag("requires_y"):
return make_classification_problem(**kwargs)
elif isinstance(estimator, BaseTransformer):
X = _make_series(**kwargs)
return (X,)
elif isinstance(estimator, BaseClusterer):
return (make_clustering_problem(**kwargs),)
elif isinstance(estimator, BasePairwiseTransformer):
return None, None
elif isinstance(estimator, BasePairwiseTransformerPanel):
return None, None
elif isinstance(estimator, BaseAligner):
X = [_make_series(n_columns=2, **kwargs), _make_series(n_columns=2, **kwargs)]
return (X,)
else:
raise ValueError(_get_err_msg(estimator))
def _make_predict_args(estimator, **kwargs):
if isinstance(estimator, BaseForecaster):
fh = 1
return (fh,)
elif isinstance(estimator, (BaseClassifier, BaseRegressor, BaseClusterer)):
X = _make_panel_X(**kwargs)
return (X,)
elif isinstance(estimator, BaseSeriesAnnotator):
X = make_annotation_problem(n_timepoints=10, **kwargs)
return (X,)
else:
raise ValueError(_get_err_msg(estimator))
def _make_transform_args(estimator, **kwargs):
if isinstance(
estimator, (_SeriesToPrimitivesTransformer, _SeriesToSeriesTransformer)
):
X = _make_series(**kwargs)
return (X,)
elif isinstance(
estimator,
(
_PanelToTabularTransformer,
_PanelToPanelTransformer,
),
):
X = _make_panel_X(**kwargs)
return (X,)
elif isinstance(estimator, BaseTransformer):
X = _make_series(**kwargs)
return (X,)
elif isinstance(estimator, BasePairwiseTransformer):
d = {"col1": [1, 2], "col2": [3, 4]}
return pd.DataFrame(d), pd.DataFrame(d)
elif isinstance(estimator, BasePairwiseTransformerPanel):
d = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
X = [d, d]
return X, X
else:
raise ValueError(_get_err_msg(estimator))
def _make_inverse_transform_args(estimator, **kwargs):
if isinstance(estimator, _SeriesToPrimitivesTransformer):
X = _make_primitives(**kwargs)
return (X,)
elif isinstance(estimator, _SeriesToSeriesTransformer):
X = _make_series(**kwargs)
return (X,)
elif isinstance(estimator, _PanelToTabularTransformer):
X = _make_tabular_X(**kwargs)
return (X,)
elif isinstance(estimator, _PanelToPanelTransformer):
X = _make_panel_X(**kwargs)
return (X,)
elif isinstance(estimator, BaseTransformer):
X = _make_series(**kwargs)
return (X,)
else:
raise ValueError(_get_err_msg(estimator))
def _make_primitives(n_columns=1, random_state=None):
"""Generate one or more primitives, for checking inverse-transform."""
rng = check_random_state(random_state)
if n_columns == 1:
return rng.rand()
return rng.rand(size=(n_columns,))
def _make_tabular_X(n_instances=20, n_columns=1, return_numpy=True, random_state=None):
"""Generate tabular X, for checking inverse-transform."""
rng = check_random_state(random_state)
X = rng.rand(n_instances, n_columns)
if return_numpy:
return X
else:
return pd.DataFrame(X)
def _compare_nested_frame(func, x, y, **kwargs):
"""Compare two nested pd.DataFrames.
Parameters
----------
func : function
Function from np.testing for comparing arrays.
x : pd.DataFrame
y : pd.DataFrame
kwargs : dict
Keyword argument for function
Raises
------
AssertionError
If x and y are not equal
"""
# We iterate over columns and rows to make cell-wise comparisons.
# Tabularizing the data first would simplify this, but does not
# work for unequal length data.
# In rare cases, x and y may be empty (e.g. TSFreshRelevantFeatureExtractor) and
# we cannot compare individual cells, so we simply check if everything else is
# equal here.
assert isinstance(x, pd.DataFrame)
if x.empty:
assert_frame_equal(x, y)
elif is_nested_dataframe(x):
# Check if both inputs have the same shape
if not x.shape == y.shape:
raise ValueError("Found inputs with different shapes")
# Iterate over columns
n_columns = x.shape[1]
for i in range(n_columns):
xc = x.iloc[:, i].tolist()
yc = y.iloc[:, i].tolist()
# Iterate over rows, checking if individual cells are equal
for xci, yci in zip(xc, yc):
func(xci, yci, **kwargs)
def _assert_array_almost_equal(x, y, decimal=6, err_msg=""):
func = np.testing.assert_array_almost_equal
if isinstance(x, pd.DataFrame):
_compare_nested_frame(func, x, y, decimal=decimal, err_msg=err_msg)
else:
func(x, y, decimal=decimal, err_msg=err_msg)
def _assert_array_equal(x, y, err_msg=""):
func = np.testing.assert_array_equal
if isinstance(x, pd.DataFrame):
_compare_nested_frame(func, x, y, err_msg=err_msg)
else:
func(x, y, err_msg=err_msg)
def _get_args(function, varargs=False):
"""Get function arguments."""
try:
params = signature(function).parameters
except ValueError:
# Error on builtin C function
return []
args = [
key
for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
]
if varargs:
varargs = [
param.name
for param in params.values()
if param.kind == param.VAR_POSITIONAL
]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _has_capability(est, method: str) -> bool:
"""Check whether estimator has capability of method."""
def get_tag(est, tag_name, tag_value_default=None):
if isclass(est):
return est.get_class_tag(
tag_name=tag_name, tag_value_default=tag_value_default
)
else:
return est.get_tag(tag_name=tag_name, tag_value_default=tag_value_default)
if not hasattr(est, method):
return False
if method == "inverse_transform":
return get_tag(est, "capability:inverse_transform", False)
if method in [
"predict_proba",
"predict_interval",
"predict_quantiles",
"predict_var",
]:
ALWAYS_HAVE_PREDICT_PROBA = (BaseClassifier, BaseClusterer)
# all classifiers and clusterers implement predict_proba
if method == "predict_proba" and isinstance(est, ALWAYS_HAVE_PREDICT_PROBA):
return True
return get_tag(est, "capability:pred_int", False)
return True
|
the-stack_106_14367
|
# Getting Started with Raspberry Pi Ch 4 Example 1
import pygame
width = 640
height = 480
radius = 100
fill = 0
pygame.init()
window = pygame.display.set_mode((width, height))
window.fill(pygame.Color(255, 255, 255))
while True:
pygame.draw.circle(window,
pygame.Color(255, 0, 0),
(width/2, height/2),
radius, fill)
pygame.display.update()
|
the-stack_106_14368
|
#!/usr/bin/env python
'''
Modules to build graphs out of sessions of visited topics
and cluster them based on ancestors in the Philosophy tree.
'''
import igraph as ig
import tagspace as ts
import itertools
import operator
from math import log10
class ClusterTopics:
''' Class containing subroutines to build a full graph,
assign edge weights and apply Newman clustering. '''
def __init__(self, topics):
self.topics = topics
self.graph = self.make_complete_graph()
self.assign_edge_weights()
self.cluster = self.cluster()
def make_complete_graph(self):
''' Make a complete graph of topics with initial unit weights. '''
graph = ig.Graph.Full(len(self.topics))
graph.vs["name"] = self.topics
graph.es["weight"] = 1.0
return graph
def assign_edge_weights(self):
''' Assign edge weights between nodes based on common ancestors. '''
vertices = self.graph.vs["name"]
vertex_pairs = list(itertools.combinations(vertices, 2))
for pair in vertex_pairs:
self.graph[pair[0], pair[1]] = len(find_intersection(pair[0], pair[1]))
return self.graph
def cluster(self):
''' Run Newman clustering on the graph. '''
return self.graph.community_fastgreedy()
class TopicRelation:
''' Class containing subroutines to segment article sessions
and associate sessions with tags. '''
def __init__(self, queries, session_interval):
self.queries = segment_sessions(queries, session_interval)
self.article_paths = []
for session in self.queries:
self.article_paths.append(self.build_paths(session))
self.topic_weights = {}
for path in self.article_paths:
self.topic_weights.update(self.most_common_ancestor(path))
def build_paths(self, articles):
''' Build paths to philosophy for a set of articles. '''
article_paths = {}
for article in articles:
article_paths[article] = ts.IterateArticles(article).traverse()
return article_paths
def most_common_ancestor(self, article_paths):
''' Find the most common ancestor of several article paths
based on the path to Philosophy. '''
ancestors = {}
article_path_list = article_paths.values()
for path in article_path_list:
for topic_idx in range(1, len(path)):
if path[topic_idx] in ancestors:
ancestors[path[topic_idx]] += 3.0 / topic_idx
else:
ancestors[path[topic_idx]] = 3.0 / topic_idx
max_topic = max(ancestors.iteritems(), key=operator.itemgetter(1))[0]
return {max_topic: ancestors[max_topic]}
def segment_sessions(queries, session_interval, accumulator=[]):
''' Segment a list of queries into sessions, each
spanning a session interval. '''
end_time = queries[0][1] + session_interval
session_list = []
for query_idx in range(len(queries)):
if queries[query_idx][1] <= end_time:
session_list.append(queries[query_idx][0])
else:
return segment_sessions(queries[query_idx:],
session_interval,
accumulator=accumulator + [session_list])
return accumulator + [session_list]
def find_intersection(topic_x, topic_y):
''' Find the intersection of two article paths to Philosophy. '''
results_x = ts.IterateArticles(topic_x).traverse()
results_y = ts.IterateArticles(topic_y).traverse()
return set(results_x).intersection(results_y)
|
the-stack_106_14372
|
#!/usr/bin/env python3
"""Alta3 Research | RZFeeser
CHALLENGE 01 - Solution"""
def main():
user_input = input("Please enter an IPv4 IP address: ")
## the line below creates a single string that is passed to print()
# print("You told me the IPv4 address is:" + user_input)
## print() can be given a series of objects separated by a comma
print("You told me the IPv4 address is:", user_input)
# asking user for 'vendor name'
vendor = input("Please input the vendor name: ")
print(vendor)
main()
|
the-stack_106_14374
|
from optimizers.darts.operations import *
from optimizers.darts.utils import drop_path
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
print(C_prev_prev, C_prev, C)
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
op_names, indices = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
op_names, indices = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert len(op_names) == len(indices)
self._steps = len(op_names) // 2
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for name, index in zip(op_names, indices):
stride = 2 if reduction and index < 2 else 1
op = OPS[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[2 * i]]
h2 = states[self._indices[2 * i + 1]]
op1 = self._ops[2 * i]
op2 = self._ops[2 * i + 1]
h1 = op1(h1)
h2 = op2(h2)
if self.training and drop_prob > 0.:
if not isinstance(op1, Identity):
h1 = drop_path(h1, drop_prob)
if not isinstance(op2, Identity):
h2 = drop_path(h2, drop_prob)
s = h1 + h2
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 8x8"""
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), -1))
return x
class AuxiliaryHeadImageNet(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 14x14"""
super(AuxiliaryHeadImageNet, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
# NOTE: This batchnorm was omitted in my earlier implementation due to a typo.
# Commenting it out for consistency with the experiments in the paper.
# nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), -1))
return x
class NetworkCIFAR(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkCIFAR, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
stem_multiplier = 3
C_curr = stem_multiplier * C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
if i == 2 * layers // 3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2 * self._layers // 3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux
class NetworkImageNet(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkImageNet, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self.stem0 = nn.Sequential(
nn.Conv2d(3, C // 2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C // 2),
nn.ReLU(inplace=True),
nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
self.stem1 = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
C_prev_prev, C_prev, C_curr = C, C, C
self.cells = nn.ModuleList()
reduction_prev = True
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
if i == 2 * layers // 3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = self.stem0(input)
s1 = self.stem1(s0)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2 * self._layers // 3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux
|
the-stack_106_14377
|
import json
import time
import win32com.client
from typing import Any, Optional
from thefuzz import fuzz, process
class SapObject:
def __init__(self, session: win32com.client.CDispatch, id: Optional[str] = None) -> None:
self.session: win32com.client.CDispatch = session
self.tree: dict = json.loads(session.getObjectTree(session.id))
self.id_list: list = []
self.enumerate_object_tree(tree=self.tree)
self.id: str | None = id if id is not None else None
self.element: win32com.client.CDispatch | None
self.is_container: bool
self.name: str
self.parent: win32com.client.CDispatch | None
self.type: str
self.type_as_number: int
if self.id is not None:
self.get_element()
def enumerate_object_tree(self, tree: Any) -> None:
if isinstance(tree, dict):
for key, tl_tree in tree.items():
if key.upper() == "CHILDREN":
self.enumerate_object_tree(tree=tl_tree)
elif key.upper() == "PROPERTIES":
self.enumerate_object_tree(tree=tl_tree)
elif key.upper() == "ID":
self.id_list.append(tl_tree)
elif isinstance(tree, list):
for item in tree:
self.enumerate_object_tree(tree=item)
elif isinstance(tree, str):
self.id_list.append(tree)
def get_element(self) -> tuple[str, win32com.client.CDispatch | None]:
try:
self.element = self.session.findById(self.id)
self.is_container = self.element.containerType
self.name = self.element.name
self.parent = self.element.parent
self.type = self.element.type
self.type_as_number = self.element.typeAsNumber
return ("", self.element)
except Exception as err:
return (err, None)
def visualize(self, delay: Optional[float] = 1.0) -> None:
try:
self.element.Visualize(True)
time.sleep(delay)
self.element.Visualize(False)
except Exception as err:
self.logger.log.error(f"Unhandled error during call to SapGuiFramework.SapGui.SapObject.SapObject.SapObject.visualize|{err}")
def find_element(self, search: str) -> dict:
result = process.extractOne(search, self.id_list, scorer=fuzz.partial_token_sort_ratio)
return {"Search": search, "ID": result[0], "Score": result[1]}
def get_element_history(self) -> list:
if hasattr(self.element, "historyList"):
return [i for i in self.element.historyList]
return []
|
the-stack_106_14379
|
import pymysql
class MySql(object):
def __init__(self, host=None,db=None,user=None,pwd=None, port=0):
self.host = host
self.db = db
self.user = user
self.pwd = pwd
self.login(host, db, user, pwd, port)
def login(self, host,db, user, pwd, port):
self.conn = pymysql.connect(host, user, pwd, port=port, charset='UTF8MB4')
self.cursor = self.conn.cursor()
cmd = f'create database if not exists {db}'
self.cursor.execute(cmd)
self.cursor.execute(f'use {db}')
# self.conn.commit()
print("已连接到MySql")
def close(self):
if self.cursor is not None:
self.cursor.close()
if self.conn is not None:
self.conn.close()
print("MySql连接已断开")
def test():
mysql = MySql('47.94.99.0', 'cone', 'cone', '3.1415926')
mysql.close()
if __name__ == '__main__':
test()
|
the-stack_106_14381
|
#Code for Merge Sort in Python
#Rishabh Pathak
def mergeSort(lst):
if len(lst) > 1:
mid = len(lst) // 2
#dividing the list into left and right halves
left = lst[:mid]
right = lst[mid:]
#recursive call for further divisions
mergeSort(left)
mergeSort(right)
#iterators for left and right halves
i = 0
j = 0
#iterator for main list
k = 0
while i < len(left) and j < len(right):
#comparing values from left half with the values from right half and inserting the smaller value in the main list
if left[i] < right[j]:
lst[k] = left[i]
i += 1 #incrementing value of iterator for left half
else:
lst[k] = right[j]
j += 1 #incrementing value of iterator for right half
k += 1 #incrementing value of iterator for main list
#for all the remaining values
while i < len(left):
lst[k] = left[i]
i += 1
k += 1
while j < len(right):
lst[k] = right[j]
j += 1
k += 1
#an example
myList = [5, 7, 6, 1, 9]
mergeSort(myList)
print(myList)
#Time complexity: O(nlogn)
|
the-stack_106_14387
|
from django.contrib import admin
from django.urls import path, include
from . import views
app_name = "room"
urlpatterns = [
#
path('room/<slug:slug>/', views.room, name="room"),
path('room/delete/<slug:slug>/', views.deleteRoom, name="delete-room"),
path('create-room/', views.createRoom, name="create-room"),
path('joined<int:room_id>/',views.joinedRoom ,name="joined-room"),
path('quitter<int:room_id>/',views.quitterRoom ,name="quitter-room"),
]
|
the-stack_106_14388
|
# -*- coding: utf-8 -*-
from wemake_python_styleguide.violations.complexity import (
OverusedStringViolation,
)
from wemake_python_styleguide.visitors.ast.builtins import WrongStringVisitor
string_actions = """
first = {0}
second({0})
third[{0}]
'new' + {0}
"""
string_value = '"same-string"'
def test_string_overuse_settings(
assert_errors,
parse_ast_tree,
options,
):
"""Ensures that settings for string over-use work."""
tree = parse_ast_tree(string_actions.format('"same-string"'))
option_values = options(max_string_usages=4)
visitor = WrongStringVisitor(option_values, tree=tree)
visitor.run()
assert_errors(visitor, [])
def test_string_overuse(
assert_errors,
assert_error_text,
parse_ast_tree,
default_options,
):
"""Ensures that over-used strings raise violations."""
tree = parse_ast_tree(string_actions.format(string_value))
visitor = WrongStringVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [OverusedStringViolation])
assert_error_text(visitor, string_value.replace('"', ''))
|
the-stack_106_14389
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for suggestion controllers."""
from constants import constants
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_manager
from core.domain import state_domain
from core.domain import suggestion_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
(suggestion_models, feedback_models) = models.Registry.import_models([
models.NAMES.suggestion, models.NAMES.feedback])
class SuggestionUnitTests(test_utils.GenericTestBase):
EXP_ID = 'exp1'
TRANSLATION_LANGUAGE_CODE = 'en'
AUTHOR_EMAIL = '[email protected]'
AUTHOR_EMAIL_2 = '[email protected]'
NORMAL_USER_EMAIL = '[email protected]'
def setUp(self):
super(SuggestionUnitTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.AUTHOR_EMAIL, 'author')
self.signup(self.AUTHOR_EMAIL_2, 'author2')
self.signup(self.NORMAL_USER_EMAIL, 'normalUser')
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.author_id_2 = self.get_user_id_from_email(self.AUTHOR_EMAIL_2)
self.reviewer_id = self.editor_id
self.set_admins([self.ADMIN_USERNAME])
self.editor = user_services.UserActionsInfo(self.editor_id)
# Login and create exploration and suggestions.
self.login(self.EDITOR_EMAIL)
exploration = (
self.save_new_linear_exp_with_state_names_and_interactions(
self.EXP_ID, self.editor_id, ['State 1', 'State 2', 'State 3'],
['TextInput'], category='Algebra'))
self.old_content = state_domain.SubtitledHtml(
'content', 'old content html').to_dict()
exploration.states['State 1'].update_content(self.old_content)
exploration.states['State 2'].update_content(self.old_content)
exploration.states['State 3'].update_content(self.old_content)
exp_services._save_exploration(self.editor_id, exploration, '', []) # pylint: disable=protected-access
rights_manager.publish_exploration(self.editor, self.EXP_ID)
rights_manager.assign_role_for_exploration(
self.editor, self.EXP_ID, self.owner_id,
rights_manager.ROLE_EDITOR)
self.new_content = state_domain.SubtitledHtml(
'content', 'new content html').to_dict()
self.resubmit_change_content = state_domain.SubtitledHtml(
'content', 'resubmit change content html').to_dict()
self.logout()
self.login(self.AUTHOR_EMAIL)
response = self.get_html_response('/explore/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models
.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': (
suggestion_models.TARGET_TYPE_EXPLORATION),
'target_id': 'exp1',
'target_version_at_submission': exploration.version,
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 1',
'old_value': self.old_content,
'new_value': self.new_content
},
'description': 'change to state 1',
'final_reviewer_id': self.reviewer_id,
}, csrf_token=csrf_token)
self.logout()
self.login(self.AUTHOR_EMAIL_2)
response = self.get_html_response('/explore/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models
.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': (
suggestion_models.TARGET_TYPE_EXPLORATION),
'target_id': 'exp1',
'target_version_at_submission': exploration.version,
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 2',
'old_value': self.old_content,
'new_value': self.new_content
},
'description': 'change to state 2',
'final_reviewer_id': self.reviewer_id,
}, csrf_token=csrf_token)
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models
.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': (
suggestion_models.TARGET_TYPE_EXPLORATION),
'target_id': 'exp1',
'target_version_at_submission': exploration.version,
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 3',
'old_value': self.old_content,
'new_value': self.new_content
},
'description': 'change to state 3',
'final_reviewer_id': self.reviewer_id,
}, csrf_token=csrf_token)
self.logout()
def test_create_suggestion(self):
self.login(self.AUTHOR_EMAIL_2)
response = self.get_html_response('/explore/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
exploration = exp_services.get_exploration_by_id(self.EXP_ID)
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models
.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': (
suggestion_models.TARGET_TYPE_EXPLORATION),
'target_id': 'exp1',
'target_version_at_submission': exploration.version,
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 3',
'new_value': self.new_content
},
'description': 'change again to state 3',
}, csrf_token=csrf_token)
suggestions = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions']
self.assertEqual(len(suggestions), 3)
self.logout()
def test_accept_suggestion(self):
exploration = exp_services.get_exploration_by_id(self.EXP_ID)
# Test editor can accept successfully.
self.login(self.EDITOR_EMAIL)
response = self.get_html_response('/explore/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
response = self.get_html_response('/explore/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted'
}, csrf_token=csrf_token)
suggestion_post_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
self.assertEqual(
suggestion_post_accept['status'],
suggestion_models.STATUS_ACCEPTED)
exploration = exp_services.get_exploration_by_id(self.EXP_ID)
self.assertEqual(
exploration.states[suggestion_to_accept[
'change']['state_name']].content.html,
suggestion_to_accept['change']['new_value']['html'])
self.logout()
# Testing user without permissions cannot accept.
self.login(self.NORMAL_USER_EMAIL)
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions'][0]
response = self.get_html_response('/explore/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted'
}, csrf_token=csrf_token, expected_status_int=401)
self.logout()
# Testing that author cannot accept own suggestion.
self.login(self.AUTHOR_EMAIL_2)
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions'][0]
response = self.get_html_response('/explore/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted'
}, csrf_token=csrf_token, expected_status_int=401)
# Testing users with scores above threshold can accept.
self.login(self.AUTHOR_EMAIL)
suggestion_services.increment_score_for_user(
self.author_id, 'content.Algebra', 15)
response = self.get_html_response('/explore/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted'
}, csrf_token=csrf_token)
suggestion_post_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions'][0]
self.assertEqual(
suggestion_post_accept['status'],
suggestion_models.STATUS_ACCEPTED)
self.logout()
# Testing admins can accept suggestions.
self.login(self.ADMIN_EMAIL)
response = self.get_html_response('/explore/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions'][1]
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted'
}, csrf_token=csrf_token)
suggestion_post_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions'][1]
self.assertEqual(
suggestion_post_accept['status'],
suggestion_models.STATUS_ACCEPTED)
self.logout()
def test_suggestion_list_handler(self):
suggestions = self.get_json(
'%s?author_id=%s&target_type=%s&target_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id_2,
suggestion_models.TARGET_TYPE_EXPLORATION, self.EXP_ID)
)['suggestions']
self.assertEqual(len(suggestions), 2)
def test_resubmit_rejected_suggestion(self):
self.login(self.EDITOR_EMAIL)
response = self.get_html_response('/explore/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
suggestion = suggestion_services.query_suggestions(
[('author_id', self.author_id), ('target_id', self.EXP_ID)])[0]
suggestion_services.reject_suggestion(
suggestion, self.reviewer_id, 'reject message')
self.logout()
self.login(self.AUTHOR_EMAIL)
response = self.get_html_response('/explore/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
self.put_json('%s/resubmit/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion.suggestion_id), {
'summary_message': 'summary message',
'action': u'resubmit',
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 1',
'new_value': self.resubmit_change_content,
'old_value': self.old_content
}
}, csrf_token=csrf_token)
suggestion = suggestion_services.query_suggestions(
[('author_id', self.author_id), ('target_id', self.EXP_ID)])[0]
self.assertEqual(
suggestion.status, suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(
suggestion.change.new_value['html'],
self.resubmit_change_content['html'])
self.assertEqual(
suggestion.change.cmd, exp_domain.CMD_EDIT_STATE_PROPERTY)
self.assertEqual(
suggestion.change.property_name, exp_domain.STATE_PROPERTY_CONTENT)
self.assertEqual(
suggestion.change.state_name, 'State 1')
self.logout()
class QuestionSuggestionTests(test_utils.GenericTestBase):
AUTHOR_EMAIL = '[email protected]'
AUTHOR_EMAIL_2 = '[email protected]'
# Needs to be 12 characters long.
SKILL_ID = 'skill1234567'
SKILL_DESCRIPTION = 'skill to link question to'
TOPIC_ID = 'topic'
def setUp(self):
super(QuestionSuggestionTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.AUTHOR_EMAIL, 'author')
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.save_new_skill(
self.SKILL_ID, self.admin_id, self.SKILL_DESCRIPTION)
self.question_dict = {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_schema_version': (
feconf.CURRENT_STATES_SCHEMA_VERSION)
}
self.login(self.AUTHOR_EMAIL)
response = self.get_html_response(feconf.CREATOR_DASHBOARD_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION),
'target_type': suggestion_models.TARGET_TYPE_TOPIC,
'target_id': self.TOPIC_ID,
'target_version_at_submission': 1,
'change': {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': self.question_dict,
'skill_id': None
},
'description': 'Add new question to skill'
}, csrf_token=csrf_token)
self.logout()
def test_query_question_suggestions(self):
suggestions = self.get_json(
'%s?suggestion_type=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION)
)['suggestions']
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertEqual(
suggestion['suggestion_type'],
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION)
self.assertEqual(suggestion['target_id'], self.TOPIC_ID)
self.assertEqual(
suggestion['target_type'], suggestion_models.TARGET_TYPE_TOPIC)
self.assertEqual(
suggestion['change']['cmd'],
question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION)
def test_accept_question_suggestion(self):
suggestion_to_accept = self.get_json(
'%s?suggestion_type=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION)
)['suggestions'][0]
self.login(self.ADMIN_EMAIL)
response = self.get_html_response(feconf.CREATOR_DASHBOARD_URL)
csrf_token = self.get_csrf_token_from_response(response)
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', True):
self.put_json('%s/topic/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'This looks good!',
'skill_id': self.SKILL_ID
}, csrf_token=csrf_token)
suggestion_post_accept = self.get_json(
'%s?suggestion_type=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION)
)['suggestions'][0]
self.assertEqual(
suggestion_post_accept['status'],
suggestion_models.STATUS_ACCEPTED)
questions, skill_descriptions, _ = (
question_services.get_question_summaries_and_skill_descriptions(
1, [self.SKILL_ID], ''))
self.assertEqual(len(questions), 1)
self.assertEqual(questions[0].creator_id, self.author_id)
self.assertEqual(skill_descriptions[0], self.SKILL_DESCRIPTION)
self.assertEqual(
questions[0].question_content,
self.question_dict['question_state_data']['content']['html']
)
thread_messages = feedback_services.get_messages(
suggestion_to_accept['suggestion_id'])
last_message = thread_messages[len(thread_messages) - 1]
self.assertEqual(last_message.text, 'This looks good!')
|
the-stack_106_14390
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2016-2017 ARM Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import argparse
import json
import logging
import os
import sys
from raas_helper import RaasProvider
class StoreRaasJsonConfig(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
prospective_file = values
try:
with open(prospective_file, 'r') as fh:
data = json.load(fh)
setattr(namespace, 'host', data['host'])
setattr(namespace, 'port', data['port'])
setattr(namespace, 'username', data['username'])
setattr(namespace, 'password', data['password'])
except KeyError as e:
raise argparse.ArgumentTypeError('malformed JSON file "{0}" - {1} not found'.format(prospective_file, e))
except ValueError as e:
raise argparse.ArgumentTypeError('malformed JSON file "{0}" - {1}'.format(prospective_file, e))
except IOError as e:
raise argparse.ArgumentTypeError('failed to read "{0}" - '.format(prospective_file, e))
def get_parser():
parser = argparse.ArgumentParser(
description='RAAS test runner',
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'image',
help='binary image file to be flashed to a target device',
type=argparse.FileType('r')
)
parser.add_argument(
'-l',
'--serial-log',
metavar='FILE',
help='file name to store log captured from the serial interface',
type=argparse.FileType('wt')
)
raas_cfg = parser.add_argument_group('RAAS config')
m_group = raas_cfg.add_mutually_exclusive_group(required=True)
m_group.add_argument(
'--host',
metavar='IP',
help='RAAS server host (IP or DNS name)',
)
m_group.add_argument(
'--raas-json',
metavar='FILE',
action=StoreRaasJsonConfig,
help='RAAS config JSON file with preset values. Take precedence over other RAAS config options'
)
raas_cfg.add_argument(
'--port',
default=8000,
metavar='N',
type=int,
help='RAAS server network port',
)
raas_cfg.add_argument(
'--username',
metavar='NAME',
default='user',
help='RAAS server login username',
)
raas_cfg.add_argument(
'--password',
metavar='PWD',
default='user',
help='RAAS server login password',
)
dut_cfg = parser.add_argument_group('DUT config')
dut_cfg.add_argument(
'-p',
'--platform-type',
metavar='NAME',
default='K64F',
help='DUT platform type'
)
dut_cfg.add_argument(
'-b',
'--baud-rate',
type=int,
metavar='N',
default=115200,
help='DUT serial connection baud-rate'
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='set verbose mode'
)
parser.add_argument(
'--no-color',
action='store_true',
help='disable color printing'
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
alloc_timeout = 5000
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
stream=sys.stdout
)
if not args.no_color:
logging.addLevelName(logging.WARNING, '\033[1;35m{}\033[0m'.format(logging.getLevelName(logging.WARNING)))
logging.addLevelName(logging.ERROR, '\033[1;31m{}\033[0m'.format(logging.getLevelName(logging.ERROR)))
logging.addLevelName(logging.INFO, '\033[1;36m{}\033[0m'.format(logging.getLevelName(logging.INFO)))
if not args.serial_log:
args.serial_log = open(os.path.splitext(args.image.name)[0] + '.log', 'wt')
logging.getLogger('raas-test-runner').info('serial log file name %s', args.serial_log.name)
with RaasProvider(args.host, args.port, args.username, args.password) as raas:
with raas.allocate_device(platform_type=args.platform_type, alloc_timeout=alloc_timeout) as dut:
dut.load_bin(args.image.name)
with dut.open_connection(args.baud_rate, args.serial_log) as dut_connection:
dut.reset()
dut_connection.wait()
if __name__ == '__main__':
main()
|
the-stack_106_14391
|
import KratosMultiphysics
def Factory(settings, Model):
if not isinstance(settings, Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return CheckAndPrepareSelfweightModelProcess(Model, settings["Parameters"])
## All the processes python should be derived from "Process"
class CheckAndPrepareSelfweightModelProcess(KratosMultiphysics.Process):
"""Prepare the computing model part.
The computing model part is created if it does not exist. Nodes and elements
from the domain sub model parts are added to the computing model part.
Conditions are added from the processes sub model parts.
"""
def __init__(self, main_model_part, Parameters ):
KratosMultiphysics.Process.__init__(self)
self.main_model_part = main_model_part
self.mechanical_model_part_name = Parameters["mechanical_model_part_name"].GetString()
self.mechanical_domain_sub_model_part_list = Parameters["mechanical_domain_sub_model_part_list"]
self.mechanical_loads_sub_model_part_list = Parameters["mechanical_loads_sub_model_part_list"]
self.body_domain_sub_model_part_list = Parameters["body_domain_sub_model_part_list"]
self.body_domain_sub_sub_model_part_list = Parameters["body_domain_sub_sub_model_part_list"]
self.loads_sub_model_part_list = Parameters["loads_sub_model_part_list"]
self.loads_sub_sub_model_part_list = Parameters["loads_sub_sub_model_part_list"]
def Execute(self):
## Construct the mechanical model part:
mechanical_parts = []
for i in range(self.mechanical_domain_sub_model_part_list.size()):
mechanical_parts.append(self.main_model_part.GetSubModelPart(self.mechanical_domain_sub_model_part_list[i].GetString()))
self.main_model_part.CreateSubModelPart(self.mechanical_model_part_name)
mechanical_model_part = self.main_model_part.GetSubModelPart(self.mechanical_model_part_name)
mechanical_model_part.ProcessInfo = self.main_model_part.ProcessInfo
mechanical_model_part.Properties = self.main_model_part.Properties
mechanical_model_part.Set(KratosMultiphysics.ACTIVE)
print("Adding Nodes to Mechanical Model Part")
list_of_ids = set()
for part in mechanical_parts:
for node in part.Nodes:
list_of_ids.add(node.Id)
mechanical_model_part.AddNodes(list(list_of_ids))
print("Adding Elements to Mechanical Model Part")
list_of_ids = set()
for part in mechanical_parts:
for elem in part.Elements:
list_of_ids.add(elem.Id)
mechanical_model_part.AddElements(list(list_of_ids))
# Mechanical Conditions
print("Adding Conditions to Mechanical Model Part")
mechanical_conditions = []
for i in range(self.mechanical_loads_sub_model_part_list.size()):
mechanical_conditions.append(self.main_model_part.GetSubModelPart(self.mechanical_loads_sub_model_part_list[i].GetString()))
list_of_ids = set()
for part in mechanical_conditions:
for cond in part.Conditions:
list_of_ids.add(cond.Id)
mechanical_model_part.AddConditions(list(list_of_ids))
print("Adding Mechanical Sub Sub Model Parts")
# Sub sub model parts
# Body - Joints
for i in range(self.body_domain_sub_model_part_list.size()):
body_sub_model_part = self.main_model_part.GetSubModelPart(self.body_domain_sub_model_part_list[i].GetString())
mechanical_model_part.CreateSubModelPart(self.body_domain_sub_sub_model_part_list[i].GetString())
body_sub_sub_model_part = mechanical_model_part.GetSubModelPart(self.body_domain_sub_sub_model_part_list[i].GetString())
list_of_ids = set()
for node in body_sub_model_part.Nodes:
list_of_ids.add(node.Id)
body_sub_sub_model_part.AddNodes(list(list_of_ids))
list_of_ids = set()
for elem in body_sub_model_part.Elements:
list_of_ids.add(elem.Id)
body_sub_sub_model_part.AddElements(list(list_of_ids))
# Arc-length
for i in range(self.loads_sub_model_part_list.size()):
load_sub_model_part = self.main_model_part.GetSubModelPart(self.loads_sub_model_part_list[i].GetString())
mechanical_model_part.CreateSubModelPart(self.loads_sub_sub_model_part_list[i].GetString())
load_sub_sub_model_part = mechanical_model_part.GetSubModelPart(self.loads_sub_sub_model_part_list[i].GetString())
list_of_ids = set()
for node in load_sub_model_part.Nodes:
list_of_ids.add(node.Id)
load_sub_sub_model_part.AddNodes(list(list_of_ids))
print(mechanical_model_part)
|
the-stack_106_14392
|
#!/usr/bin/env python
# Run with:
# python -m unittest -v Common.babi.babi_utils_test
import os
import unittest
import babi_utils
from babi_utils import StoryLine, QALine
import utils
def testdata_dir():
this_dir = os.path.dirname(__file__)
return os.path.join(this_dir, "testdata")
EXPECTED_EXAMPLES = [
[StoryLine(1, "Foo bar baz ."),
StoryLine(2, "This is the second sentence ."),
QALine(3, "What is a baz ?", "Foo", [1])],
[StoryLine(1, "Spam and eggs ."),
StoryLine(2, "Eggs and spam ."),
QALine(3, "What goes with eggs ?", "Spam", [1, 2])]
]
EXPECTED_RAW_SENTS = [
"Foo bar baz .",
"This is the second sentence .",
"What is a baz ?", "Foo",
"Spam and eggs .",
"Eggs and spam .",
"What goes with eggs ?", "Spam"
]
EXPECTED_SENTS = [
["foo", "bar", "baz", "."],
["this", "is", "the", "second", "sentence", "."],
["what", "is", "a", "baz", "?"], ["foo"],
["spam", "and", "eggs", "."],
["eggs", "and", "spam", "."],
["what", "goes", "with", "eggs", "?"], ["spam"]
]
class TestCorpus(unittest.TestCase):
def setUp(self):
self.tokenizer = lambda s: s.lower().split()
file_list = ["qa42_fake_data.txt", "doesnt_exist.txt"]
self.corpus = babi_utils.BabiTaskCorpusReader(
testdata_dir(), file_list=file_list,
tokenizer=self.tokenizer)
def test_raw_sents(self):
raw_sents = list(self.corpus.raw_sents())
self.assertEqual(raw_sents, EXPECTED_RAW_SENTS)
def test_sents(self):
sents = list(self.corpus.sents())
self.assertEqual(sents, EXPECTED_SENTS)
def test_words(self):
words = list(self.corpus.words())
expected_words = list(utils.flatten(EXPECTED_SENTS))
self.assertEqual(words, expected_words)
def test_examples(self):
examples = list(self.corpus.examples(tokenize=False))
self.assertEqual(examples, EXPECTED_EXAMPLES)
|
the-stack_106_14393
|
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from typing import Any
from unittest import mock
from unittest.mock import patch
import pandas as pd
import pytest
import tornado.testing
import tornado.web
from streamlit import StreamlitAPIException
from streamlit.components.v1 import component_arrow
from streamlit.components.v1.components import (
ComponentRegistry,
ComponentRequestHandler,
CustomComponent,
declare_component,
)
import streamlit.components.v1 as components
from streamlit.errors import DuplicateWidgetID
from streamlit.proto.Components_pb2 import SpecialArg
from streamlit.type_util import to_bytes
from tests import testutil
from tests.testutil import DeltaGeneratorTestCase
import streamlit as st
URL = "http://not.a.real.url:3001"
PATH = "not/a/real/path"
def _serialize_dataframe_arg(key: str, value: Any) -> SpecialArg:
special_arg = SpecialArg()
special_arg.key = key
component_arrow.marshall(special_arg.arrow_dataframe.data, value)
return special_arg
def _serialize_bytes_arg(key: str, value: Any) -> SpecialArg:
special_arg = SpecialArg()
special_arg.key = key
special_arg.bytes = to_bytes(value)
return special_arg
class DeclareComponentTest(unittest.TestCase):
"""Test component declaration."""
def tearDown(self) -> None:
ComponentRegistry._instance = None
def test_name(self):
"""Test component name generation"""
# Test a component defined in a module with no package
component = components.declare_component("foo", url=URL)
self.assertEqual("components_test.foo", component.name)
# Test a component defined in __init__.py
from component_test_data import component as init_component
self.assertEqual(
"component_test_data.foo",
init_component.name,
)
# Test a component defined in a module within a package
from component_test_data.outer_module import component as outer_module_component
self.assertEqual(
"component_test_data.outer_module.foo",
outer_module_component.name,
)
# Test a component defined in module within a nested package
from component_test_data.nested.inner_module import (
component as inner_module_component,
)
self.assertEqual(
"component_test_data.nested.inner_module.foo",
inner_module_component.name,
)
def test_only_path(self):
"""Succeed when a path is provided."""
def isdir(path):
return path == PATH or path == os.path.abspath(PATH)
with mock.patch(
"streamlit.components.v1.components.os.path.isdir", side_effect=isdir
):
component = components.declare_component("test", path=PATH)
self.assertEqual(PATH, component.path)
self.assertIsNone(component.url)
self.assertEqual(
ComponentRegistry.instance().get_component_path(component.name),
component.abspath,
)
def test_only_url(self):
"""Succeed when a URL is provided."""
component = components.declare_component("test", url=URL)
self.assertEqual(URL, component.url)
self.assertIsNone(component.path)
self.assertEqual(
ComponentRegistry.instance().get_component_path("components_test"),
component.abspath,
)
def test_path_and_url(self):
"""Fail if path AND url are provided."""
with pytest.raises(StreamlitAPIException) as exception_message:
components.declare_component("test", path=PATH, url=URL)
self.assertEqual(
"Either 'path' or 'url' must be set, but not both.",
str(exception_message.value),
)
def test_no_path_and_no_url(self):
"""Fail if neither path nor url is provided."""
with pytest.raises(StreamlitAPIException) as exception_message:
components.declare_component("test", path=None, url=None)
self.assertEqual(
"Either 'path' or 'url' must be set, but not both.",
str(exception_message.value),
)
class ComponentRegistryTest(unittest.TestCase):
"""Test component registration."""
def tearDown(self) -> None:
ComponentRegistry._instance = None
def test_register_component_with_path(self):
"""Registering a component should associate it with its path."""
test_path = "/a/test/component/directory"
def isdir(path):
return path == test_path
registry = ComponentRegistry.instance()
with mock.patch(
"streamlit.components.v1.components.os.path.isdir", side_effect=isdir
):
registry.register_component(
CustomComponent("test_component", path=test_path)
)
self.assertEqual(test_path, registry.get_component_path("test_component"))
def test_register_component_no_path(self):
"""It's not an error to register a component without a path."""
registry = ComponentRegistry.instance()
# Return None when the component hasn't been registered
self.assertIsNone(registry.get_component_path("test_component"))
# And also return None when the component doesn't have a path
registry.register_component(
CustomComponent("test_component", url="http://not.a.url")
)
self.assertIsNone(registry.get_component_path("test_component"))
def test_register_invalid_path(self):
"""We raise an exception if a component is registered with a
non-existent path.
"""
test_path = "/a/test/component/directory"
registry = ComponentRegistry.instance()
with self.assertRaises(StreamlitAPIException) as ctx:
registry.register_component(CustomComponent("test_component", test_path))
self.assertIn("No such component directory", ctx.exception)
def test_register_duplicate_path(self):
"""It's not an error to re-register a component.
(This can happen during development).
"""
test_path_1 = "/a/test/component/directory"
test_path_2 = "/another/test/component/directory"
def isdir(path):
return path in (test_path_1, test_path_2)
registry = ComponentRegistry.instance()
with mock.patch(
"streamlit.components.v1.components.os.path.isdir", side_effect=isdir
):
registry.register_component(CustomComponent("test_component", test_path_1))
registry.register_component(CustomComponent("test_component", test_path_1))
self.assertEqual(test_path_1, registry.get_component_path("test_component"))
registry.register_component(CustomComponent("test_component", test_path_2))
self.assertEqual(test_path_2, registry.get_component_path("test_component"))
class InvokeComponentTest(DeltaGeneratorTestCase):
"""Test invocation of a custom component object."""
def setUp(self):
super().setUp()
self.test_component = components.declare_component("test", url=URL)
def test_only_json_args(self):
"""Test that component with only json args is marshalled correctly."""
self.test_component(foo="bar")
proto = self.get_delta_from_queue().new_element.component_instance
self.assertEqual(self.test_component.name, proto.component_name)
self.assertJSONEqual(
{"foo": "bar", "key": None, "default": None}, proto.json_args
)
self.assertEqual("[]", str(proto.special_args))
def test_only_df_args(self):
"""Test that component with only dataframe args is marshalled correctly."""
raw_data = {
"First Name": ["Jason", "Molly"],
"Last Name": ["Miller", "Jacobson"],
"Age": [42, 52],
}
df = pd.DataFrame(raw_data, columns=["First Name", "Last Name", "Age"])
self.test_component(df=df)
proto = self.get_delta_from_queue().new_element.component_instance
self.assertEqual(self.test_component.name, proto.component_name)
self.assertJSONEqual({"key": None, "default": None}, proto.json_args)
self.assertEqual(1, len(proto.special_args))
self.assertEqual(_serialize_dataframe_arg("df", df), proto.special_args[0])
def test_only_list_args(self):
"""Test that component with only list args is marshalled correctly."""
self.test_component(data=["foo", "bar", "baz"])
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual(
{"data": ["foo", "bar", "baz"], "key": None, "default": None},
proto.json_args,
)
self.assertEqual("[]", str(proto.special_args))
def test_no_args(self):
"""Test that component with no args is marshalled correctly."""
self.test_component()
proto = self.get_delta_from_queue().new_element.component_instance
self.assertEqual(self.test_component.name, proto.component_name)
self.assertJSONEqual({"key": None, "default": None}, proto.json_args)
self.assertEqual("[]", str(proto.special_args))
def test_bytes_args(self):
self.test_component(foo=b"foo", bar=b"bar")
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": None, "default": None}, proto.json_args)
self.assertEqual(2, len(proto.special_args))
self.assertEqual(
_serialize_bytes_arg("foo", b"foo"),
proto.special_args[0],
)
self.assertEqual(
_serialize_bytes_arg("bar", b"bar"),
proto.special_args[1],
)
def test_mixed_args(self):
"""Test marshalling of a component with varied arg types."""
df = pd.DataFrame(
{
"First Name": ["Jason", "Molly"],
"Last Name": ["Miller", "Jacobson"],
"Age": [42, 52],
},
columns=["First Name", "Last Name", "Age"],
)
self.test_component(string_arg="string", df_arg=df, bytes_arg=b"bytes")
proto = self.get_delta_from_queue().new_element.component_instance
self.assertEqual(self.test_component.name, proto.component_name)
self.assertJSONEqual(
{"string_arg": "string", "key": None, "default": None},
proto.json_args,
)
self.assertEqual(2, len(proto.special_args))
self.assertEqual(_serialize_dataframe_arg("df_arg", df), proto.special_args[0])
self.assertEqual(
_serialize_bytes_arg("bytes_arg", b"bytes"), proto.special_args[1]
)
def test_duplicate_key(self):
"""Two components with the same `key` should throw DuplicateWidgetID exception"""
self.test_component(foo="bar", key="baz")
with self.assertRaises(DuplicateWidgetID):
self.test_component(key="baz")
def test_key_sent_to_frontend(self):
"""We send the 'key' param to the frontend (even if it's None)."""
# Test a string key
self.test_component(key="baz")
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": "baz", "default": None}, proto.json_args)
# Test an empty key
self.test_component()
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": None, "default": None}, proto.json_args)
def test_simple_default(self):
"""Test the 'default' param with a JSON value."""
return_value = self.test_component(default="baz")
self.assertEqual("baz", return_value)
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": None, "default": "baz"}, proto.json_args)
def test_bytes_default(self):
"""Test the 'default' param with a bytes value."""
return_value = self.test_component(default=b"bytes")
self.assertEqual(b"bytes", return_value)
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": None}, proto.json_args)
self.assertEqual(
_serialize_bytes_arg("default", b"bytes"),
proto.special_args[0],
)
def test_df_default(self):
"""Test the 'default' param with a DataFrame value."""
df = pd.DataFrame(
{
"First Name": ["Jason", "Molly"],
"Last Name": ["Miller", "Jacobson"],
"Age": [42, 52],
},
columns=["First Name", "Last Name", "Age"],
)
return_value = self.test_component(default=df)
self.assertTrue(df.equals(return_value), "df != return_value")
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": None}, proto.json_args)
self.assertEqual(
_serialize_dataframe_arg("default", df),
proto.special_args[0],
)
def assertJSONEqual(self, a, b):
"""Asserts that two JSON dicts are equal. If either arg is a string,
it will be first converted to a dict with json.loads()."""
# Ensure both objects are dicts.
dict_a = a if isinstance(a, dict) else json.loads(a)
dict_b = b if isinstance(b, dict) else json.loads(b)
self.assertEqual(dict_a, dict_b)
def test_outside_form(self):
"""Test that form id is marshalled correctly outside of a form."""
self.test_component()
proto = self.get_delta_from_queue().new_element.component_instance
self.assertEqual(proto.form_id, "")
@patch("streamlit._is_running_with_streamlit", new=True)
def test_inside_form(self):
"""Test that form id is marshalled correctly inside of a form."""
with st.form("foo"):
self.test_component()
# 2 elements will be created: form block, widget
self.assertEqual(len(self.get_all_deltas_from_queue()), 2)
form_proto = self.get_delta_from_queue(0).add_block
component_instance_proto = self.get_delta_from_queue(
1
).new_element.component_instance
self.assertEqual(component_instance_proto.form_id, form_proto.form.form_id)
class ComponentRequestHandlerTest(tornado.testing.AsyncHTTPTestCase):
"""Test /component endpoint."""
def tearDown(self) -> None:
ComponentRegistry._instance = None
def get_app(self):
self.registry = ComponentRegistry()
return tornado.web.Application(
[
(
"/component/(.*)",
ComponentRequestHandler,
dict(registry=self.registry.instance()),
)
]
)
def _request_component(self, path):
return self.fetch("/component/%s" % path, method="GET")
def test_success_request(self):
"""Test request success when valid parameters are provided."""
with mock.patch("streamlit.components.v1.components.os.path.isdir"):
# We don't need the return value in this case.
declare_component("test", path=PATH)
with mock.patch(
"streamlit.components.v1.components.open",
mock.mock_open(read_data="Test Content"),
):
response = self._request_component("components_test.test")
self.assertEqual(200, response.code)
self.assertEqual(b"Test Content", response.body)
def test_invalid_component_request(self):
"""Test request failure when invalid component name is provided."""
response = self._request_component("invalid_component")
self.assertEqual(404, response.code)
self.assertEqual(b"not found", response.body)
def test_invalid_content_request(self):
"""Test request failure when invalid content (file) is provided."""
with mock.patch("streamlit.components.v1.components.os.path.isdir"):
declare_component("test", path=PATH)
with mock.patch("streamlit.components.v1.components.open") as m:
m.side_effect = OSError("Invalid content")
response = self._request_component("components_test.test")
self.assertEqual(404, response.code)
self.assertEqual(
b"read error",
response.body,
)
def test_support_binary_files_request(self):
"""Test support for binary files reads."""
def _open_read(m, payload):
is_binary = False
args, kwargs = m.call_args
if len(args) > 1:
if "b" in args[1]:
is_binary = True
encoding = "utf-8"
if "encoding" in kwargs:
encoding = kwargs["encoding"]
if is_binary:
from io import BytesIO
return BytesIO(payload)
else:
from io import TextIOWrapper
return TextIOWrapper(str(payload, encoding=encoding))
with mock.patch("streamlit.components.v1.components.os.path.isdir"):
declare_component("test", path=PATH)
payload = b"\x00\x01\x00\x00\x00\x0D\x00\x80" # binary non utf-8 payload
with mock.patch("streamlit.components.v1.components.open") as m:
m.return_value.__enter__ = lambda _: _open_read(m, payload)
response = self._request_component("components_test.test")
self.assertEqual(200, response.code)
self.assertEqual(
payload,
response.body,
)
class IFrameTest(testutil.DeltaGeneratorTestCase):
def test_iframe(self):
"""Test components.iframe"""
components.iframe("http://not.a.url", width=200, scrolling=True)
el = self.get_delta_from_queue().new_element
self.assertEqual(el.iframe.src, "http://not.a.url")
self.assertEqual(el.iframe.srcdoc, "")
self.assertEqual(el.iframe.width, 200)
self.assertTrue(el.iframe.has_width)
self.assertTrue(el.iframe.scrolling)
def test_html(self):
"""Test components.html"""
html = r"<html><body>An HTML string!</body></html>"
components.html(html, width=200, scrolling=True)
el = self.get_delta_from_queue().new_element
self.assertEqual(el.iframe.src, "")
self.assertEqual(el.iframe.srcdoc, html)
self.assertEqual(el.iframe.width, 200)
self.assertTrue(el.iframe.has_width)
self.assertTrue(el.iframe.scrolling)
|
the-stack_106_14395
|
# -*- coding: utf-8 -*-
r"""
Directed graphs
This module implements functions and operations involving directed
graphs. Here is what they can do
**Graph basic operations:**
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`~DiGraph.layout_acyclic_dummy` | Compute a (dummy) ranked layout so that all edges point upward.
:meth:`~DiGraph.layout_acyclic` | Compute a ranked layout so that all edges point upward.
:meth:`~DiGraph.reverse` | Return a copy of digraph with edges reversed in direction.
:meth:`~DiGraph.reverse_edge` | Reverse an edge.
:meth:`~DiGraph.reverse_edges` | Reverse a list of edges.
:meth:`~DiGraph.out_degree_sequence` | Return the outdegree sequence.
:meth:`~DiGraph.out_degree_iterator` | Same as degree_iterator, but for out degree.
:meth:`~DiGraph.out_degree` | Same as degree, but for out degree.
:meth:`~DiGraph.in_degree_sequence` | Return the indegree sequence of this digraph.
:meth:`~DiGraph.in_degree_iterator` | Same as degree_iterator, but for in degree.
:meth:`~DiGraph.in_degree` | Same as degree, but for in-degree.
:meth:`~DiGraph.neighbors_out` | Return the list of the out-neighbors of a given vertex.
:meth:`~DiGraph.neighbor_out_iterator` | Return an iterator over the out-neighbors of a given vertex.
:meth:`~DiGraph.neighbors_in` | Return the list of the in-neighbors of a given vertex.
:meth:`~DiGraph.neighbor_in_iterator` | Return an iterator over the in-neighbors of vertex.
:meth:`~DiGraph.outgoing_edges` | Return a list of edges departing from vertices.
:meth:`~DiGraph.outgoing_edge_iterator` | Return an iterator over all departing edges from vertices
:meth:`~DiGraph.incoming_edges` | Return a list of edges arriving at vertices.
:meth:`~DiGraph.incoming_edge_iterator` | Return an iterator over all arriving edges from vertices
:meth:`~DiGraph.sources` | Return the list of all sources (vertices without incoming edges) of this digraph.
:meth:`~DiGraph.sinks` | Return the list of all sinks (vertices without outgoing edges) of this digraph.
:meth:`~DiGraph.to_undirected` | Return an undirected version of the graph.
:meth:`~DiGraph.to_directed` | Since the graph is already directed, simply returns a copy of itself.
:meth:`~DiGraph.is_directed` | Since digraph is directed, returns True.
:meth:`~DiGraph.dig6_string` | Return the ``dig6`` representation of the digraph as an ASCII string.
**Distances:**
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`~DiGraph.eccentricity` | Return the eccentricity of vertex (or vertices) ``v``.
:meth:`~DiGraph.radius` | Return the radius of the DiGraph.
:meth:`~DiGraph.diameter` | Return the diameter of the DiGraph.
:meth:`~DiGraph.center` | Return the set of vertices in the center of the DiGraph.
:meth:`~DiGraph.periphery` | Return the set of vertices in the periphery of the DiGraph.
**Paths and cycles:**
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`~DiGraph.all_paths_iterator` | Return an iterator over the paths of ``self``.
:meth:`~DiGraph.all_simple_paths` | Return a list of all the simple paths of ``self`` starting with one of the given vertices.
:meth:`~DiGraph.all_cycles_iterator` | Return an iterator over all the cycles of ``self`` starting with one of the given vertices.
:meth:`~DiGraph.all_simple_cycles` | Return a list of all simple cycles of ``self``.
**Representation theory:**
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`~DiGraph.path_semigroup` | Return the (partial) semigroup formed by the paths of the digraph.
**Connectivity:**
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`~DiGraph.is_strongly_connected` | Check whether the current ``DiGraph`` is strongly connected.
:meth:`~DiGraph.strongly_connected_components_digraph` | Return the digraph of the strongly connected components
:meth:`~DiGraph.strongly_connected_components_subgraphs` | Return the strongly connected components as a list of subgraphs.
:meth:`~DiGraph.strongly_connected_component_containing_vertex` | Return the strongly connected component containing a given vertex
:meth:`~DiGraph.strongly_connected_components` | Return the list of strongly connected components.
:meth:`~DiGraph.immediate_dominators` | Return the immediate dominators of all vertices reachable from `root`.
:meth:`~DiGraph.strong_articulation_points` | Return the strong articulation points of this digraph.
**Acyclicity:**
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`~DiGraph.is_directed_acyclic` | Check whether the digraph is acyclic or not.
:meth:`~DiGraph.is_transitive` | Check whether the digraph is transitive or not.
:meth:`~DiGraph.is_aperiodic` | Check whether the digraph is aperiodic or not.
:meth:`~DiGraph.is_tournament` | Check whether the digraph is a tournament.
:meth:`~DiGraph.period` | Return the period of the digraph.
:meth:`~DiGraph.level_sets` | Return the level set decomposition of the digraph.
:meth:`~DiGraph.topological_sort_generator` | Return a list of all topological sorts of the digraph if it is acyclic
:meth:`~DiGraph.topological_sort` | Return a topological sort of the digraph if it is acyclic
**Hard stuff:**
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`~DiGraph.feedback_edge_set` | Compute the minimum feedback edge (arc) set of a digraph
**Miscellaneous:**
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`~DiGraph.flow_polytope` | Compute the flow polytope of a digraph
:meth:`~DiGraph.degree_polynomial` | Return the generating polynomial of degrees of vertices in ``self``.
:meth:`~DiGraph.out_branchings` | Return an iterator over the out branchings rooted at given vertex in ``self``.
:meth:`~DiGraph.in_branchings` | Return an iterator over the in branchings rooted at given vertex in ``self``.
Methods
-------
"""
# ****************************************************************************
# Copyright (C) 2010 Alexandre Blondin Masse <alexandre.blondin.masse at gmail.com>
# Carl Witty <[email protected]>
# Gregory McWhirter <[email protected]>
# Minh Van Nguyen <[email protected]>
# 2010-2011 Robert L. Miller <[email protected]>
# 2010-2015 Nathann Cohen <[email protected]>
# Nicolas M. Thiery <[email protected]>
# 2011 Johannes Klaus Fichte <[email protected]>
# 2012 Javier López Peña <[email protected]>
# 2012 Jim Stark <[email protected]>
# 2012 Karl-Dieter Crisman <[email protected]>
# 2012 Keshav Kini <[email protected]>
# 2012 Lukas Lansky <[email protected]>
# 2012-2015 Volker Braun <[email protected]>
# 2012-2017 Jeroen Demeyer <[email protected]>
# 2012-2018 David Coudert <[email protected]>
# 2013 Emily Gunawan <[email protected]>
# 2013 Gregg Musiker <[email protected]>
# 2013 Mathieu Guay-Paquet <[email protected]>
# 2013-2014 Simon King <[email protected]>
# 2014 Clemens Heuberger <[email protected]>
# Erik Massop <[email protected]>
# R. Andrew Ohana <[email protected]>
# Wilfried Luebbe <[email protected]>
# 2014-2015 André Apitzsch <[email protected]>
# Darij Grinberg <[email protected]>
# Travis Scrimshaw <tscrim at ucdavis.edu>
# Vincent Delecroix <[email protected]>
# 2014-2017 Frédéric Chapoton <[email protected]>
# 2015 Michele Borassi <[email protected]>
# 2015-2017 John H. Palmieri <[email protected]>
# Jori Mäntysalo <[email protected]>
# 2016 Dima Pasechnik <[email protected]>
# 2018 Meghana M Reddy <[email protected]>
# Julian Rüth <[email protected]>
# 2019 Rajat Mittal <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from copy import copy
from sage.rings.integer import Integer
from sage.rings.integer_ring import ZZ
from itertools import product
import sage.graphs.generic_graph_pyx as generic_graph_pyx
from sage.graphs.generic_graph import GenericGraph
from sage.graphs.dot2tex_utils import have_dot2tex
from sage.graphs.views import EdgesView
class DiGraph(GenericGraph):
r"""
Directed graph.
A digraph or directed graph is a set of vertices connected by oriented
edges. See also the :wikipedia:`Directed_graph`. For a collection of
pre-defined digraphs, see the :mod:`~sage.graphs.digraph_generators` module.
A :class:`DiGraph` object has many methods whose list can be obtained by
typing ``g.<tab>`` (i.e. hit the 'tab' key) or by reading the documentation
of :mod:`~sage.graphs.digraph`, :mod:`~sage.graphs.generic_graph`, and
:mod:`~sage.graphs.graph`.
INPUT:
By default, a :class:`DiGraph` object is simple (i.e. no *loops* nor
*multiple edges*) and unweighted. This can be easily tuned with the
appropriate flags (see below).
- ``data`` -- can be any of the following (see the ``format`` argument):
#. ``DiGraph()`` -- build a digraph on 0 vertices
#. ``DiGraph(5)`` -- return an edgeless digraph on the 5 vertices 0,...,4
#. ``DiGraph([list_of_vertices, list_of_edges])`` -- return a digraph with
given vertices/edges
To bypass auto-detection, prefer the more explicit
``DiGraph([V, E], format='vertices_and_edges')``.
#. ``DiGraph(list_of_edges)`` -- return a digraph with a given list of
edges (see documentation of
:meth:`~sage.graphs.generic_graph.GenericGraph.add_edges`).
To bypass auto-detection, prefer the more explicit
``DiGraph(L, format='list_of_edges')``.
#. ``DiGraph({1: [2,3,4], 3: [4]})`` -- return a digraph by associating to
each vertex the list of its out-neighbors.
To bypass auto-detection, prefer the more explicit
``DiGraph(D, format='dict_of_lists')``.
#. ``DiGraph({1: {2: 'a', 3: 'b'}, 3: {2: 'c'}})`` -- return a digraph by
associating a list of out-neighbors to each vertex and providing its
edge label.
To bypass auto-detection, prefer the more explicit
``DiGraph(D, format='dict_of_dicts')``.
For digraphs with multiple edges, you can provide a list of labels
instead, e.g.: ``DiGraph({1: {2: ['a1', 'a2'], 3:['b']},
3:{2:['c']}})``.
#. ``DiGraph(a_matrix)`` -- return a digraph with given (weighted)
adjacency matrix (see documentation of
:meth:`~sage.graphs.generic_graph.GenericGraph.adjacency_matrix`).
To bypass auto-detection, prefer the more explicit ``DiGraph(M,
format='adjacency_matrix')``. To take weights into account, use
``format='weighted_adjacency_matrix'`` instead.
#. ``DiGraph(a_nonsquare_matrix)`` -- return a digraph with given
incidence matrix (see documentation of
:meth:`~sage.graphs.generic_graph.GenericGraph.incidence_matrix`).
To bypass auto-detection, prefer the more explicit ``DiGraph(M,
format='incidence_matrix')``.
#. ``DiGraph([V, f])`` -- return a digraph with a vertex set ``V`` and an
edge `u,v` whenever `f(u, v)` is ``True``. Example: ``DiGraph([
[1..10], lambda x,y: abs(x - y).is_square()])``
#. ``DiGraph('FOC@?OC@_?')`` -- return a digraph from a ``dig6`` string
(see documentation of :meth:`~dig6_string`).
#. ``DiGraph(another_digraph)`` -- return a digraph from a Sage (di)graph,
`pygraphviz <https://pygraphviz.github.io/>`__ digraph, `NetworkX
<https://networkx.github.io/>`__ digraph, or `igraph
<http://igraph.org/python/>`__ digraph.
- ``pos`` -- dict (default: ``None``); a positioning dictionary. For
example, the spring layout from NetworkX for the 5-cycle is::
{0: [-0.91679746, 0.88169588],
1: [ 0.47294849, 1.125 ],
2: [ 1.125 ,-0.12867615],
3: [ 0.12743933,-1.125 ],
4: [-1.125 ,-0.50118505]}
- ``name`` -- string (default: ``None``); gives the graph a name (e.g.,
name="complete")
- ``loops`` -- boolean (default: ``None``); whether to allow loops (ignored
if data is an instance of the DiGraph class)
- ``multiedges`` -- boolean (default: ``None``); whether to allow multiple
edges (ignored if data is an instance of the DiGraph class)
- ``weighted`` -- boolean (default: ``None``); whether digraph thinks of
itself as weighted or not. See ``self.weighted()``
- ``format`` -- string (default: ``None``); if set to ``None``,
:class:`DiGraph` tries to guess input's format. To avoid this possibly
time-consuming step, one of the following values can be specified (see
description above): ``"int"``, ``"dig6"``, ``"rule"``,
``"list_of_edges"``, ``"dict_of_lists"``, ``"dict_of_dicts"``,
``"adjacency_matrix"``, ``"weighted_adjacency_matrix"``,
``"incidence_matrix"``, ``"NX"``, ``"igraph"``.
- ``sparse`` -- boolean (default: ``True``); ``sparse=True`` is an alias for
``data_structure="sparse"``, and ``sparse=False`` is an alias for
``data_structure="dense"``
- ``data_structure`` -- string (default: ``"sparse"``); one of the following
(for more information, see :mod:`~sage.graphs.base.overview`):
* ``"dense"`` -- selects the :mod:`~sage.graphs.base.dense_graph` backend
* ``"sparse"`` -- selects the :mod:`~sage.graphs.base.sparse_graph`
backend
* ``"static_sparse"`` -- selects the
:mod:`~sage.graphs.base.static_sparse_backend` (this backend is faster
than the sparse backend and smaller in memory, and it is immutable, so
that the resulting graphs can be used as dictionary keys).
- ``immutable`` -- boolean (default: ``False``); whether to create a
immutable digraph. Note that ``immutable=True`` is actually a shortcut for
``data_structure='static_sparse'``.
- ``vertex_labels`` -- boolean (default: ``True``); whether to allow any
object as a vertex (slower), or only the integers `0,...,n-1`, where `n`
is the number of vertices.
- ``convert_empty_dict_labels_to_None`` -- boolean (default: ``None``); this
arguments sets the default edge labels used by NetworkX (empty
dictionaries) to be replaced by ``None``, the default Sage edge label. It
is set to ``True`` iff a NetworkX graph is on the input.
EXAMPLES:
#. A dictionary of dictionaries::
sage: g = DiGraph({0: {1: 'x', 2: 'z', 3: 'a'}, 2: {5: 'out'}}); g
Digraph on 5 vertices
The labels ('x', 'z', 'a', 'out') are labels for edges. For example,
'out' is the label for the edge from 2 to 5. Labels can be used as
weights, if all the labels share some common parent.
#. A dictionary of lists (or iterables)::
sage: g = DiGraph({0: [1, 2, 3], 2: [4]}); g
Digraph on 5 vertices
sage: g = DiGraph({0: (1, 2, 3), 2: (4,)}); g
Digraph on 5 vertices
#. A list of vertices and a function describing adjacencies. Note that the
list of vertices and the function must be enclosed in a list (i.e.,
``[list of vertices, function]``).
We construct a graph on the integers 1 through 12 such that there is a
directed edge from `i` to `j` if and only if `i` divides `j`::
sage: g = DiGraph([[1..12], lambda i,j: i != j and i.divides(j)])
sage: g.vertices()
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
sage: g.adjacency_matrix()
[0 1 1 1 1 1 1 1 1 1 1 1]
[0 0 0 1 0 1 0 1 0 1 0 1]
[0 0 0 0 0 1 0 0 1 0 0 1]
[0 0 0 0 0 0 0 1 0 0 0 1]
[0 0 0 0 0 0 0 0 0 1 0 0]
[0 0 0 0 0 0 0 0 0 0 0 1]
[0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0]
#. A Sage matrix: Note: If format is not specified, then Sage assumes a
square matrix is an adjacency matrix, and a nonsquare matrix is an
incidence matrix.
- an adjacency matrix::
sage: M = Matrix([[0, 1, 1, 1, 0],[0, 0, 0, 0, 0],[0, 0, 0, 0, 1],[0, 0, 0, 0, 0],[0, 0, 0, 0, 0]]); M
[0 1 1 1 0]
[0 0 0 0 0]
[0 0 0 0 1]
[0 0 0 0 0]
[0 0 0 0 0]
sage: DiGraph(M)
Digraph on 5 vertices
sage: M = Matrix([[0,1,-1],[-1,0,-1/2],[1,1/2,0]]); M
[ 0 1 -1]
[ -1 0 -1/2]
[ 1 1/2 0]
sage: G = DiGraph(M,sparse=True,weighted=True); G
Digraph on 3 vertices
sage: G.weighted()
True
- an incidence matrix::
sage: M = Matrix(6, [-1,0,0,0,1, 1,-1,0,0,0, 0,1,-1,0,0, 0,0,1,-1,0, 0,0,0,1,-1, 0,0,0,0,0]); M
[-1 0 0 0 1]
[ 1 -1 0 0 0]
[ 0 1 -1 0 0]
[ 0 0 1 -1 0]
[ 0 0 0 1 -1]
[ 0 0 0 0 0]
sage: DiGraph(M)
Digraph on 6 vertices
#. A ``dig6`` string: Sage automatically recognizes whether a string is in
``dig6`` format, which is a directed version of ``graph6``::
sage: D = DiGraph('IRAaDCIIOWEOKcPWAo')
sage: D
Digraph on 10 vertices
sage: D = DiGraph('IRAaDCIIOEOKcPWAo')
Traceback (most recent call last):
...
RuntimeError: the string (IRAaDCIIOEOKcPWAo) seems corrupt: for n = 10, the string is too short
sage: D = DiGraph("IRAaDCI'OWEOKcPWAo")
Traceback (most recent call last):
...
RuntimeError: the string seems corrupt: valid characters are
?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
#. A NetworkX XDiGraph::
sage: import networkx
sage: g = networkx.MultiDiGraph({0: [1, 2, 3], 2: [4]})
sage: DiGraph(g)
Digraph on 5 vertices
#. A NetworkX digraph::
sage: import networkx
sage: g = networkx.DiGraph({0: [1, 2, 3], 2: [4]})
sage: DiGraph(g)
Digraph on 5 vertices
#. An igraph directed Graph (see also
:meth:`~sage.graphs.generic_graph.GenericGraph.igraph_graph`)::
sage: import igraph # optional - python_igraph
sage: g = igraph.Graph([(0,1),(0,2)], directed=True) # optional - python_igraph
sage: DiGraph(g) # optional - python_igraph
Digraph on 3 vertices
If ``vertex_labels`` is ``True``, the names of the vertices are given by
the vertex attribute ``'name'``, if available::
sage: g = igraph.Graph([(0,1),(0,2)], directed=True, vertex_attrs={'name':['a','b','c']}) # optional - python_igraph
sage: DiGraph(g).vertices() # optional - python_igraph
['a', 'b', 'c']
sage: g = igraph.Graph([(0,1),(0,2)], directed=True, vertex_attrs={'label':['a','b','c']}) # optional - python_igraph
sage: DiGraph(g).vertices() # optional - python_igraph
[0, 1, 2]
If the igraph Graph has edge attributes, they are used as edge labels::
sage: g = igraph.Graph([(0,1),(0,2)], directed=True, edge_attrs={'name':['a','b'], 'weight':[1,3]}) # optional - python_igraph
sage: DiGraph(g).edges() # optional - python_igraph
[(0, 1, {'name': 'a', 'weight': 1}), (0, 2, {'name': 'b', 'weight': 3})]
TESTS::
sage: DiGraph({0:[1,2,3], 2:[4]}).edges()
[(0, 1, None), (0, 2, None), (0, 3, None), (2, 4, None)]
sage: DiGraph({0:(1,2,3), 2:(4,)}).edges()
[(0, 1, None), (0, 2, None), (0, 3, None), (2, 4, None)]
sage: DiGraph({0:Set([1,2,3]), 2:Set([4])}).edges()
[(0, 1, None), (0, 2, None), (0, 3, None), (2, 4, None)]
Demonstrate that digraphs using the static backend are equal to mutable
graphs but can be used as dictionary keys::
sage: import networkx
sage: g = networkx.DiGraph({0:[1,2,3], 2:[4]})
sage: G = DiGraph(g)
sage: G_imm = DiGraph(G, data_structure="static_sparse")
sage: H_imm = DiGraph(G, data_structure="static_sparse")
sage: H_imm is G_imm
False
sage: H_imm == G_imm == G
True
sage: {G_imm:1}[H_imm]
1
sage: {G_imm:1}[G]
Traceback (most recent call last):
...
TypeError: This graph is mutable, and thus not hashable. Create an
immutable copy by `g.copy(immutable=True)`
The error message states that one can also create immutable graphs by
specifying the ``immutable`` optional argument (not only by
``data_structure='static_sparse'`` as above)::
sage: J_imm = DiGraph(G, immutable=True)
sage: J_imm == G_imm
True
sage: type(J_imm._backend) == type(G_imm._backend)
True
From a list of vertices and a list of edges::
sage: G = DiGraph([[1,2,3],[(1,2)]]); G
Digraph on 3 vertices
sage: G.edges()
[(1, 2, None)]
Check that :trac:`27505` is fixed::
sage: DiGraph(DiGraph().networkx_graph(), weighted=None, format='NX')
Digraph on 0 vertices
"""
_directed = True
def __init__(self, data=None, pos=None, loops=None, format=None,
weighted=None, data_structure="sparse",
vertex_labels=True, name=None,
multiedges=None, convert_empty_dict_labels_to_None=None,
sparse=True, immutable=False):
"""
TESTS::
sage: D = DiGraph()
sage: loads(dumps(D)) == D
True
sage: a = matrix(2,2,[1,2,0,1])
sage: DiGraph(a,sparse=True).adjacency_matrix() == a
True
sage: a = matrix(2,2,[3,2,0,1])
sage: DiGraph(a,sparse=True).adjacency_matrix() == a
True
The positions are copied when the DiGraph is built from another DiGraph
or from a Graph ::
sage: g = DiGraph(graphs.PetersenGraph())
sage: h = DiGraph(g)
sage: g.get_pos() == h.get_pos()
True
sage: g.get_pos() == graphs.PetersenGraph().get_pos()
True
The position dictionary is not the input one (:trac:`22424`)::
sage: my_pos = {0:(0,0), 1:(1,1)}
sage: D = DiGraph([[0,1], [(0,1)]], pos=my_pos)
sage: my_pos == D._pos
True
sage: my_pos is D._pos
False
Detection of multiple edges::
sage: DiGraph({1:{2:[0,1]}})
Multi-digraph on 2 vertices
sage: DiGraph({1:{2:0}})
Digraph on 2 vertices
An empty list or dictionary defines a simple graph (:trac:`10441` and
:trac:`12910`)::
sage: DiGraph([])
Digraph on 0 vertices
sage: DiGraph({})
Digraph on 0 vertices
sage: # not "Multi-digraph on 0 vertices"
Problem with weighted adjacency matrix (:trac:`13919`)::
sage: B = {0:{1:2,2:5,3:4},1:{2:2,4:7},2:{3:1,4:4,5:3},3:{5:4},4:{5:1,6:5},5:{4:1,6:7,5:1}}
sage: grafo3 = DiGraph(B, weighted=True)
sage: matad = grafo3.weighted_adjacency_matrix()
sage: grafo4 = DiGraph(matad, format="adjacency_matrix", weighted=True)
sage: grafo4.shortest_path(0, 6, by_weight=True)
[0, 1, 2, 5, 4, 6]
Building a DiGraph with ``immutable=False`` returns a mutable graph::
sage: g = graphs.PetersenGraph()
sage: g = DiGraph(g.edges(), immutable=False)
sage: g.add_edge("Hey", "Heyyyyyyy")
sage: {g:1}[g]
Traceback (most recent call last):
...
TypeError: This graph is mutable, and thus not hashable. Create an immutable copy by `g.copy(immutable=True)`
sage: copy(g) is g
False
sage: {g.copy(immutable=True):1}[g.copy(immutable=True)]
1
But building it with ``immutable=True`` returns an immutable graph::
sage: g = DiGraph(graphs.PetersenGraph(), immutable=True)
sage: g.add_edge("Hey", "Heyyyyyyy")
Traceback (most recent call last):
...
ValueError: graph is immutable; please change a copy instead (use function copy())
sage: {g:1}[g]
1
sage: copy(g) is g # copy is mutable again
False
Unknown input format::
sage: DiGraph(4, format="HeyHeyHey")
Traceback (most recent call last):
...
ValueError: unknown input format 'HeyHeyHey'
Sage DiGraph from igraph undirected graph::
sage: import igraph # optional - python_igraph
sage: DiGraph(igraph.Graph()) # optional - python_igraph
Traceback (most recent call last):
...
ValueError: a *directed* igraph graph was expected. To build an undirected graph, call the Graph constructor
Vertex labels are retained in the graph (:trac:`14708`)::
sage: g = DiGraph()
sage: g.add_vertex(0)
sage: g.set_vertex(0, 'foo')
sage: g.get_vertices()
{0: 'foo'}
sage: DiGraph(g).get_vertices()
{0: 'foo'}
"""
msg = ''
GenericGraph.__init__(self)
from sage.structure.element import is_Matrix
if sparse is False:
if data_structure != "sparse":
raise ValueError("the 'sparse' argument is an alias for "
"'data_structure', please do not define both")
data_structure = "dense"
if multiedges or weighted:
if data_structure == "dense":
raise RuntimeError("multiedge and weighted c_graphs must be sparse")
if immutable:
data_structure = 'static_sparse'
# If the data structure is static_sparse, we first build a graph
# using the sparse data structure, then re-encode the resulting graph
# as a static sparse graph.
from sage.graphs.base.sparse_graph import SparseGraphBackend
from sage.graphs.base.dense_graph import DenseGraphBackend
if data_structure in ["sparse", "static_sparse"]:
CGB = SparseGraphBackend
elif data_structure == "dense":
CGB = DenseGraphBackend
else:
raise ValueError("data_structure must be equal to 'sparse', "
"'static_sparse' or 'dense'")
self._backend = CGB(0, directed=True)
if format is None and isinstance(data, str):
format = 'dig6'
if data[:8] == ">>dig6<<":
data = data[8:]
if format is None and is_Matrix(data):
if data.is_square():
format = 'adjacency_matrix'
else:
format = 'incidence_matrix'
msg += "Non-symmetric or non-square matrix assumed to be an incidence matrix: "
if format is None and isinstance(data, DiGraph):
format = 'DiGraph'
from sage.graphs.all import Graph
if format is None and isinstance(data, Graph):
data = data.to_directed()
format = 'DiGraph'
if format is None and isinstance(data,list) and \
len(data) >= 2 and callable(data[1]):
format = 'rule'
if (format is None and
isinstance(data, list) and
len(data) == 2 and
isinstance(data[0], list) and # a list of two lists, the second of
((isinstance(data[1], list) and # which contains iterables (the edges)
(not data[1] or callable(getattr(data[1][0], "__iter__", None)))) or
(isinstance(data[1], EdgesView)))):
format = "vertices_and_edges"
if format is None and isinstance(data, dict):
if not data:
format = 'dict_of_dicts'
else:
val = next(iter(data.values()))
if isinstance(val, dict):
format = 'dict_of_dicts'
else:
format = 'dict_of_lists'
if format is None and hasattr(data, 'adj'):
import networkx
if isinstance(data, (networkx.Graph, networkx.MultiGraph)):
data = data.to_directed()
format = 'NX'
elif isinstance(data, (networkx.DiGraph, networkx.MultiDiGraph)):
format = 'NX'
if (format is None and
hasattr(data, 'vcount') and
hasattr(data, 'get_edgelist')):
try:
import igraph
except ImportError:
raise ImportError("the data seems to be a igraph object, but "
"igraph is not installed in Sage. To install "
"it, run 'sage -i python_igraph'")
if format is None and isinstance(data, igraph.Graph):
format = 'igraph'
if format is None and isinstance(data, (int, Integer)):
format = 'int'
if format is None and data is None:
format = 'int'
data = 0
# Input is a list of edges or an EdgesView
if format is None and isinstance(data, (list, EdgesView)):
format = "list_of_edges"
if weighted is None:
weighted = False
if format == 'weighted_adjacency_matrix':
if weighted is False:
raise ValueError("format was weighted_adjacency_matrix but weighted was False")
if weighted is None:
weighted = True
if multiedges is None:
multiedges = False
format = 'adjacency_matrix'
if format is None:
raise ValueError("This input cannot be turned into a graph")
# At this point, format has been set. We build the graph
if format == 'dig6':
if weighted is None:
self._weighted = False
self.allow_loops(True if loops else False, check=False)
self.allow_multiple_edges(True if multiedges else False, check=False)
from .graph_input import from_dig6
from_dig6(self, data)
elif format == 'adjacency_matrix':
from .graph_input import from_adjacency_matrix
from_adjacency_matrix(self, data, loops=loops, multiedges=multiedges, weighted=weighted)
elif format == 'incidence_matrix':
from .graph_input import from_oriented_incidence_matrix
from_oriented_incidence_matrix(self, data, loops=loops, multiedges=multiedges, weighted=weighted)
elif format == 'DiGraph':
if loops is None:
loops = data.allows_loops()
elif not loops and data.has_loops():
raise ValueError("the digraph was built with loops=False but input data has a loop")
if multiedges is None:
multiedges = data.allows_multiple_edges()
elif not multiedges:
e = data.edges(labels=False, sort=False)
if len(e) != len(set(e)):
raise ValueError("no multiple edges but input digraph"
" has multiple edges")
self.allow_multiple_edges(multiedges, check=False)
self.allow_loops(loops, check=False)
if weighted is None:
weighted = data.weighted()
if data.get_pos() is not None:
pos = data.get_pos()
self.set_vertices(data.get_vertices())
data._backend.subgraph_given_vertices(self._backend, data)
self.name(data.name())
elif format == 'rule':
f = data[1]
if loops is None:
loops = any(f(v,v) for v in data[0])
if weighted is None:
weighted = False
self.allow_multiple_edges(True if multiedges else False, check=False)
self.allow_loops(loops,check=False)
self.add_vertices(data[0])
self.add_edges((u, v) for u in data[0] for v in data[0] if f(u, v))
elif format == "vertices_and_edges":
self.allow_multiple_edges(bool(multiedges), check=False)
self.allow_loops(bool(loops), check=False)
self.add_vertices(data[0])
self.add_edges(data[1])
elif format == 'dict_of_dicts':
from .graph_input import from_dict_of_dicts
from_dict_of_dicts(self, data, loops=loops, multiedges=multiedges, weighted=weighted,
convert_empty_dict_labels_to_None=False if convert_empty_dict_labels_to_None is None else convert_empty_dict_labels_to_None)
elif format == 'dict_of_lists':
from .graph_input import from_dict_of_lists
from_dict_of_lists(self, data, loops=loops, multiedges=multiedges, weighted=weighted)
elif format == 'NX':
# adjust for empty dicts instead of None in NetworkX default edge
# labels
if convert_empty_dict_labels_to_None is None:
convert_empty_dict_labels_to_None = (format == 'NX')
if weighted is None:
import networkx
if isinstance(data, networkx.DiGraph):
weighted = False
if multiedges is None:
multiedges = False
if loops is None:
loops = False
else:
weighted = True
if multiedges is None:
multiedges = data.multiedges
if loops is None:
loops = data.selfloops
if convert_empty_dict_labels_to_None:
r = lambda x: None if x == {} else x
else:
r = lambda x: x
self.allow_multiple_edges(multiedges, check=False)
self.allow_loops(loops, check=False)
self.add_vertices(data.nodes())
self.add_edges((u, v, r(l)) for u, v, l in data.edges(data=True))
elif format == 'igraph':
if not data.is_directed():
raise ValueError("a *directed* igraph graph was expected. To "
"build an undirected graph, call the Graph "
"constructor")
self.add_vertices(range(data.vcount()))
self.add_edges((e.source, e.target, e.attributes()) for e in data.es())
if vertex_labels and 'name' in data.vertex_attributes():
vs = data.vs()
self.relabel({v: vs[v]['name'] for v in self})
elif format == 'int':
if weighted is None:
weighted = False
self.allow_loops(True if loops else False, check=False)
self.allow_multiple_edges(True if multiedges else False,
check=False)
if data < 0:
raise ValueError("the number of vertices cannot be strictly negative")
elif data:
self.add_vertices(range(data))
elif format == 'list_of_edges':
self.allow_multiple_edges(True if multiedges else False,
check=False)
self.allow_loops(True if loops else False, check=False)
self.add_edges(data)
else:
raise ValueError("unknown input format '{}'".format(format))
# weighted, multiedges, loops, verts and num_verts should now be set
self._weighted = weighted
self._pos = copy(pos)
if format != 'DiGraph' or name is not None:
self.name(name)
if data_structure == "static_sparse":
from sage.graphs.base.static_sparse_backend import StaticSparseBackend
ib = StaticSparseBackend(self,
loops = self.allows_loops(),
multiedges = self.allows_multiple_edges())
self._backend = ib
self._immutable = True
### Formats
def dig6_string(self):
r"""
Return the ``dig6`` representation of the digraph as an ASCII string.
This is only valid for single (no multiple edges) digraphs on at most
`2^{18} - 1 = 262143` vertices.
.. NOTE::
As the ``dig6`` format only handles graphs with vertex set `\{0,
\ldots, n-1\}`, a :meth:`relabelled copy
<sage.graphs.generic_graph.GenericGraph.relabel>` will be encoded,
if necessary.
.. SEEALSO::
* :meth:`~sage.graphs.graph.Graph.graph6_string` -- a similar string
format for undirected graphs
EXAMPLES::
sage: D = DiGraph({0: [1, 2], 1: [2], 2: [3], 3: [0]})
sage: D.dig6_string()
'CW`_'
TESTS::
sage: DiGraph().dig6_string()
'?'
"""
n = self.order()
if n > 262143:
raise ValueError('dig6 format supports graphs on 0 to 262143 vertices only')
elif self.has_multiple_edges():
raise ValueError('dig6 format does not support multiple edges')
else:
return generic_graph_pyx.small_integer_to_graph6(n) + generic_graph_pyx.binary_string_to_graph6(self._bit_vector())
### Attributes
def is_directed(self):
"""
Since digraph is directed, return ``True``.
EXAMPLES::
sage: DiGraph().is_directed()
True
"""
return True
### Properties
def is_directed_acyclic(self, certificate=False):
"""
Return whether the digraph is acyclic or not.
A directed graph is acyclic if for any vertex `v`, there is no directed
path that starts and ends at `v`. Every directed acyclic graph (DAG)
corresponds to a partial ordering of its vertices, however multiple dags
may lead to the same partial ordering.
INPUT:
- ``certificate`` -- boolean (default: ``False``); whether to return a
certificate
OUTPUT:
* When ``certificate=False``, returns a boolean value.
* When ``certificate=True``:
* If the graph is acyclic, returns a pair ``(True, ordering)`` where
``ordering`` is a list of the vertices such that ``u`` appears
before ``v`` in ``ordering`` if ``u, v`` is an edge.
* Else, returns a pair ``(False, cycle)`` where ``cycle`` is a list of
vertices representing a circuit in the graph.
EXAMPLES:
At first, the following graph is acyclic::
sage: D = DiGraph({0:[1, 2, 3], 4:[2, 5], 1:[8], 2:[7], 3:[7], 5:[6,7], 7:[8], 6:[9], 8:[10], 9:[10]})
sage: D.plot(layout='circular').show()
sage: D.is_directed_acyclic()
True
Adding an edge from `9` to `7` does not change it::
sage: D.add_edge(9, 7)
sage: D.is_directed_acyclic()
True
We can obtain as a proof an ordering of the vertices such that `u`
appears before `v` if `uv` is an edge of the graph::
sage: D.is_directed_acyclic(certificate=True)
(True, [4, 5, 6, 9, 0, 1, 2, 3, 7, 8, 10])
Adding an edge from 7 to 4, though, makes a difference::
sage: D.add_edge(7, 4)
sage: D.is_directed_acyclic()
False
Indeed, it creates a circuit `7, 4, 5`::
sage: D.is_directed_acyclic(certificate=True)
(False, [7, 4, 5])
Checking acyclic graphs are indeed acyclic ::
sage: def random_acyclic(n, p):
....: g = graphs.RandomGNP(n, p)
....: h = DiGraph()
....: h.add_edges(((u, v) if u < v else (v, u)) for u, v in g.edge_iterator(labels=False))
....: return h
...
sage: all(random_acyclic(100, .2).is_directed_acyclic() # long time
....: for i in range(50)) # long time
True
TESTS:
What about loops? ::
sage: g = digraphs.ButterflyGraph(3)
sage: g.allow_loops(True)
sage: g.is_directed_acyclic()
True
sage: g.add_edge(0, 0)
sage: g.is_directed_acyclic()
False
"""
return self._backend.is_directed_acyclic(certificate=certificate)
def to_directed(self):
"""
Since the graph is already directed, simply returns a copy of itself.
EXAMPLES::
sage: DiGraph({0: [1, 2, 3], 4: [5, 1]}).to_directed()
Digraph on 6 vertices
"""
return self.copy()
def to_undirected(self, data_structure=None, sparse=None):
"""
Return an undirected version of the graph.
Every directed edge becomes an edge.
INPUT:
- ``data_structure`` -- string (default: ``None``); one of
``"sparse"``, ``"static_sparse"``, or ``"dense"``. See the
documentation of :class:`Graph` or :class:`DiGraph`.
- ``sparse`` -- boolean (default: ``None``); ``sparse=True`` is an
alias for ``data_structure="sparse"``, and ``sparse=False`` is an
alias for ``data_structure="dense"``.
EXAMPLES::
sage: D = DiGraph({0: [1, 2], 1: [0]})
sage: G = D.to_undirected()
sage: D.edges(labels=False)
[(0, 1), (0, 2), (1, 0)]
sage: G.edges(labels=False)
[(0, 1), (0, 2)]
TESTS:
Immutable graphs yield immutable graphs (:trac:`17005`)::
sage: DiGraph([[1, 2]], immutable=True).to_undirected()._backend
<sage.graphs.base.static_sparse_backend.StaticSparseBackend object at ...>
Vertex labels will be retained (:trac:`14708`)::
sage: D.set_vertex(0, 'foo')
sage: G = D.to_undirected()
sage: D.get_vertices()
{0: 'foo', 1: None, 2: None}
sage: G.get_vertices()
{0: 'foo', 1: None, 2: None}
"""
if sparse is not None:
if data_structure is not None:
raise ValueError("the 'sparse' argument is an alias for "
"'data_structure'. Please do not define both")
data_structure = "sparse" if sparse else "dense"
if data_structure is None:
from sage.graphs.base.dense_graph import DenseGraphBackend
from sage.graphs.base.sparse_graph import SparseGraphBackend
if isinstance(self._backend, DenseGraphBackend):
data_structure = "dense"
elif isinstance(self._backend, SparseGraphBackend):
data_structure = "sparse"
else:
data_structure = "static_sparse"
from sage.graphs.all import Graph
G = Graph(name = self.name(),
pos = self._pos,
multiedges = self.allows_multiple_edges(),
loops = self.allows_loops(),
data_structure = (data_structure if data_structure!="static_sparse"
else "sparse")) # we need a mutable copy first
G.add_vertices(self.vertex_iterator())
G.set_vertices(self.get_vertices())
G.add_edges(self.edge_iterator())
if hasattr(self, '_embedding'):
G._embedding = copy(self._embedding)
G._weighted = self._weighted
if data_structure == "static_sparse":
G = G.copy(data_structure=data_structure)
return G
### Edge Handlers
def incoming_edge_iterator(self, vertices, labels=True):
"""
Return an iterator over all arriving edges from vertices.
INPUT:
- ``vertices`` -- a vertex or a list of vertices
- ``labels`` -- boolean (default: ``True``); whether to return edges as
pairs of vertices, or as triples containing the labels
EXAMPLES::
sage: D = DiGraph({0: [1,2,3], 1: [0,2], 2: [3], 3: [4], 4: [0,5], 5: [1]})
sage: for a in D.incoming_edge_iterator([0]):
....: print(a)
(1, 0, None)
(4, 0, None)
"""
if vertices is None:
vertices = self
elif vertices in self:
vertices = [vertices]
else:
vertices = [v for v in vertices if v in self]
return self._backend.iterator_in_edges(vertices, labels)
def incoming_edges(self, vertices, labels=True):
"""
Return a list of edges arriving at vertices.
INPUT:
- ``vertices`` -- a vertex or a list of vertices
- ``labels`` -- boolean (default: ``True``); whether to return edges as
pairs of vertices, or as triples containing the labels.
EXAMPLES::
sage: D = DiGraph({0: [1,2,3], 1: [0,2], 2: [3], 3: [4], 4: [0,5], 5: [1]})
sage: D.incoming_edges([0])
[(1, 0, None), (4, 0, None)]
"""
return list(self.incoming_edge_iterator(vertices, labels=labels))
def outgoing_edge_iterator(self, vertices, labels=True):
"""
Return an iterator over all departing edges from vertices.
INPUT:
- ``vertices`` -- a vertex or a list of vertices
- ``labels`` -- boolean (default: ``True``); whether to return edges as
pairs of vertices, or as triples containing the labels.
EXAMPLES::
sage: D = DiGraph({0: [1,2,3], 1: [0,2], 2: [3], 3: [4], 4: [0,5], 5: [1]})
sage: for a in D.outgoing_edge_iterator([0]):
....: print(a)
(0, 1, None)
(0, 2, None)
(0, 3, None)
"""
if vertices is None:
vertices = self
elif vertices in self:
vertices = [vertices]
else:
vertices = [v for v in vertices if v in self]
return self._backend.iterator_out_edges(vertices, labels)
def outgoing_edges(self, vertices, labels=True):
"""
Return a list of edges departing from vertices.
INPUT:
- ``vertices`` -- a vertex or a list of vertices
- ``labels`` -- boolean (default: ``True``); whether to return edges as
pairs of vertices, or as triples containing the labels.
EXAMPLES::
sage: D = DiGraph({0: [1,2,3], 1: [0,2], 2: [3], 3: [4], 4: [0,5], 5: [1]})
sage: D.outgoing_edges([0])
[(0, 1, None), (0, 2, None), (0, 3, None)]
"""
return list(self.outgoing_edge_iterator(vertices, labels=labels))
def neighbor_in_iterator(self, vertex):
"""
Return an iterator over the in-neighbors of ``vertex``.
An vertex `u` is an in-neighbor of a vertex `v` if `uv` in an edge.
EXAMPLES::
sage: D = DiGraph({0: [1,2,3], 1: [0,2], 2: [3], 3: [4], 4: [0,5], 5: [1]})
sage: for a in D.neighbor_in_iterator(0):
....: print(a)
1
4
"""
return iter(set(self._backend.iterator_in_nbrs(vertex)))
def neighbors_in(self, vertex):
"""
Return the list of the in-neighbors of a given vertex.
A vertex `u` is an in-neighbor of a vertex `v` if `uv` in an edge.
EXAMPLES::
sage: D = DiGraph({0: [1,2,3], 1: [0,2], 2: [3], 3: [4], 4: [0,5], 5: [1]})
sage: D.neighbors_in(0)
[1, 4]
"""
return list(self.neighbor_in_iterator(vertex))
def neighbor_out_iterator(self, vertex):
"""
Return an iterator over the out-neighbors of a given vertex.
A vertex `u` is an out-neighbor of a vertex `v` if `vu` in an edge.
EXAMPLES::
sage: D = DiGraph({0: [1,2,3], 1: [0,2], 2: [3], 3: [4], 4: [0,5], 5: [1]})
sage: for a in D.neighbor_out_iterator(0):
....: print(a)
1
2
3
"""
return iter(set(self._backend.iterator_out_nbrs(vertex)))
def neighbors_out(self, vertex):
"""
Return the list of the out-neighbors of a given vertex.
A vertex `u` is an out-neighbor of a vertex `v` if `vu` in an edge.
EXAMPLES::
sage: D = DiGraph({0: [1,2,3], 1: [0,2], 2: [3], 3: [4], 4: [0,5], 5: [1]})
sage: D.neighbors_out(0)
[1, 2, 3]
"""
return list(self.neighbor_out_iterator(vertex))
### Degree functions
def in_degree(self, vertices=None, labels=False):
"""
Same as degree, but for in degree.
EXAMPLES::
sage: D = DiGraph({0: [1,2,3], 1: [0,2], 2: [3], 3: [4], 4: [0,5], 5: [1]})
sage: D.in_degree(vertices=[0, 1, 2], labels=True)
{0: 2, 1: 2, 2: 2}
sage: D.in_degree()
[2, 2, 2, 2, 1, 1]
sage: G = graphs.PetersenGraph().to_directed()
sage: G.in_degree(0)
3
"""
if vertices in self:
return self._backend.in_degree(vertices)
elif labels:
return {v: d for v, d in self.in_degree_iterator(vertices, labels=labels)}
else:
return list(self.in_degree_iterator(vertices, labels=labels))
def in_degree_iterator(self, vertices=None, labels=False):
"""
Same as degree_iterator, but for in degree.
EXAMPLES::
sage: D = graphs.Grid2dGraph(2,4).to_directed()
sage: sorted(D.in_degree_iterator())
[2, 2, 2, 2, 3, 3, 3, 3]
sage: sorted(D.in_degree_iterator(labels=True))
[((0, 0), 2),
((0, 1), 3),
((0, 2), 3),
((0, 3), 2),
((1, 0), 2),
((1, 1), 3),
((1, 2), 3),
((1, 3), 2)]
"""
if vertices is None:
vertices = self.vertex_iterator()
if labels:
for v in vertices:
yield (v, self.in_degree(v))
else:
for v in vertices:
yield self.in_degree(v)
def in_degree_sequence(self):
r"""
Return the in-degree sequence.
EXAMPLES:
The in-degree sequences of two digraphs::
sage: g = DiGraph({1: [2, 5, 6], 2: [3, 6], 3: [4, 6], 4: [6], 5: [4, 6]})
sage: g.in_degree_sequence()
[5, 2, 1, 1, 1, 0]
::
sage: V = [2, 3, 5, 7, 8, 9, 10, 11]
sage: E = [[], [8, 10], [11], [8, 11], [9], [], [], [2, 9, 10]]
sage: g = DiGraph(dict(zip(V, E)))
sage: g.in_degree_sequence()
[2, 2, 2, 2, 1, 0, 0, 0]
"""
return sorted(self.in_degree_iterator(), reverse=True)
def out_degree(self, vertices=None, labels=False):
"""
Same as degree, but for out degree.
EXAMPLES::
sage: D = DiGraph({0: [1,2,3], 1: [0,2], 2: [3], 3: [4], 4: [0,5], 5: [1]})
sage: D.out_degree(vertices=[0, 1 ,2], labels=True)
{0: 3, 1: 2, 2: 1}
sage: D.out_degree()
[3, 2, 1, 1, 2, 1]
sage: D.out_degree(2)
1
"""
if vertices in self:
return self._backend.out_degree(vertices)
elif labels:
return {v: d for v, d in self.out_degree_iterator(vertices, labels=labels)}
else:
return list(self.out_degree_iterator(vertices, labels=labels))
def out_degree_iterator(self, vertices=None, labels=False):
"""
Same as degree_iterator, but for out degree.
EXAMPLES::
sage: D = graphs.Grid2dGraph(2,4).to_directed()
sage: sorted(D.out_degree_iterator())
[2, 2, 2, 2, 3, 3, 3, 3]
sage: sorted(D.out_degree_iterator(labels=True))
[((0, 0), 2),
((0, 1), 3),
((0, 2), 3),
((0, 3), 2),
((1, 0), 2),
((1, 1), 3),
((1, 2), 3),
((1, 3), 2)]
"""
if vertices is None:
vertices = self.vertex_iterator()
if labels:
for v in vertices:
yield (v, self.out_degree(v))
else:
for v in vertices:
yield self.out_degree(v)
def out_degree_sequence(self):
r"""
Return the outdegree sequence of this digraph.
EXAMPLES:
The outdegree sequences of two digraphs::
sage: g = DiGraph({1: [2, 5, 6], 2: [3, 6], 3: [4, 6], 4: [6], 5: [4, 6]})
sage: g.out_degree_sequence()
[3, 2, 2, 2, 1, 0]
::
sage: V = [2, 3, 5, 7, 8, 9, 10, 11]
sage: E = [[], [8, 10], [11], [8, 11], [9], [], [], [2, 9, 10]]
sage: g = DiGraph(dict(zip(V, E)))
sage: g.out_degree_sequence()
[3, 2, 2, 1, 1, 0, 0, 0]
"""
return sorted(self.out_degree_iterator(), reverse=True)
def sources(self):
r"""
Return a list of sources of the digraph.
OUTPUT:
- list of the vertices of the digraph that have no edges going into them
EXAMPLES::
sage: G = DiGraph({1: {3: ['a']}, 2: {3: ['b']}})
sage: G.sources()
[1, 2]
sage: T = DiGraph({1: {}})
sage: T.sources()
[1]
"""
return [x for x in self if not self.in_degree(x)]
def sinks(self):
"""
Return a list of sinks of the digraph.
OUTPUT:
- list of the vertices of the digraph that have no edges beginning at them
EXAMPLES::
sage: G = DiGraph({1: {3: ['a']}, 2: {3: ['b']}})
sage: G.sinks()
[3]
sage: T = DiGraph({1: {}})
sage: T.sinks()
[1]
"""
return [x for x in self if not self.out_degree(x)]
def degree_polynomial(self):
r"""
Return the generating polynomial of degrees of vertices in ``self``.
This is the sum
.. MATH::
\sum_{v \in G} x^{\operatorname{in}(v)} y^{\operatorname{out}(v)},
where ``in(v)`` and ``out(v)`` are the number of incoming and outgoing
edges at vertex `v` in the digraph `G`.
Because this polynomial is multiplicative for Cartesian product of
digraphs, it is useful to help see if the digraph can be isomorphic to a
Cartesian product.
.. SEEALSO::
:meth:`num_verts` for the value at `(x, y) = (1, 1)`
EXAMPLES::
sage: G = posets.PentagonPoset().hasse_diagram()
sage: G.degree_polynomial()
x^2 + 3*x*y + y^2
sage: G = posets.BooleanLattice(4).hasse_diagram()
sage: G.degree_polynomial().factor()
(x + y)^4
"""
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
R = PolynomialRing(ZZ, 'x,y')
x, y = R.gens()
return R.sum(x ** self.in_degree(v) * y ** self.out_degree(v) for v in self)
def feedback_edge_set(self, constraint_generation=True, value_only=False,
solver=None, verbose=0, *, integrality_tolerance=1e-3):
r"""
Compute the minimum feedback edge set of a digraph (also called
feedback arc set).
The minimum feedback edge set of a digraph is a set of edges that
intersect all the circuits of the digraph. Equivalently, a minimum
feedback arc set of a DiGraph is a set `S` of arcs such that the digraph
`G - S` is acyclic. For more information, see the
:wikipedia:`Feedback_arc_set`.
INPUT:
- ``value_only`` -- boolean (default: ``False``)
- When set to ``True``, only the minimum cardinal of a minimum edge
set is returned.
- When set to ``False``, the ``Set`` of edges of a minimal edge set is
returned.
- ``constraint_generation`` -- boolean (default: ``True``); whether to
use constraint generation when solving the Mixed Integer Linear
Program.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
ALGORITHM:
This problem is solved using Linear Programming, in two different
ways. The first one is to solve the following formulation:
.. MATH::
\mbox{Minimize : }&\sum_{(u,v)\in G} b_{(u,v)}\\
\mbox{Such that : }&\\
&\forall (u,v)\in G, d_u-d_v+ n \cdot b_{(u,v)}\geq 0\\
&\forall u\in G, 0\leq d_u\leq |G|\\
An explanation:
An acyclic digraph can be seen as a poset, and every poset has a linear
extension. This means that in any acyclic digraph the vertices can be
ordered with a total order `<` in such a way that if `(u,v) \in G`, then
`u < v`.
Thus, this linear program is built in order to assign to each vertex `v`
a number `d_v \in [0,\dots,n-1]` such that if there exists an edge
`(u, v) \in G` such that `d_v < d_u`, then the edge `(u,v)` is removed.
The number of edges removed is then minimized, which is the objective.
(Constraint Generation)
If the parameter ``constraint_generation`` is enabled, a more efficient
formulation is used :
.. MATH::
\mbox{Minimize : }&\sum_{(u,v)\in G} b_{(u,v)}\\
\mbox{Such that : }&\\
&\forall C\text{ circuits }\subseteq G, \sum_{uv\in C}b_{(u,v)}\geq 1\\
As the number of circuits contained in a graph is exponential, this LP
is solved through constraint generation. This means that the solver is
sequentially asked to solved the problem, knowing only a portion of the
circuits contained in `G`, each time adding to the list of its
constraints the circuit which its last answer had left intact.
EXAMPLES:
If the digraph is created from a graph, and hence is symmetric (if `uv`
is an edge, then `vu` is an edge too), then obviously the cardinality of
its feedback arc set is the number of edges in the first graph::
sage: cycle=graphs.CycleGraph(5)
sage: dcycle=DiGraph(cycle)
sage: cycle.size()
5
sage: dcycle.feedback_edge_set(value_only=True)
5
And in this situation, for any edge `uv` of the first graph, `uv` of
`vu` is in the returned feedback arc set::
sage: g = graphs.RandomGNP(5,.3)
sage: while not g.num_edges():
....: g = graphs.RandomGNP(5,.3)
sage: dg = DiGraph(g)
sage: feedback = dg.feedback_edge_set()
sage: u,v,l = next(g.edge_iterator())
sage: (u,v) in feedback or (v,u) in feedback
True
TESTS:
Comparing with/without constraint generation. Also double-checks ticket
:trac:`12833`::
sage: for i in range(20):
....: g = digraphs.RandomDirectedGNP(10, .3)
....: x = g.feedback_edge_set(value_only=True)
....: y = g.feedback_edge_set(value_only=True,
....: constraint_generation=False)
....: if x != y:
....: print("Oh my, oh my !")
....: break
Loops are part of the feedback edge set (:trac:`23989`)::
sage: D = digraphs.DeBruijn(2, 2)
sage: sorted(D.loops(labels=None))
[('00', '00'), ('11', '11')]
sage: FAS = D.feedback_edge_set(value_only=False)
sage: all(l in FAS for l in D.loops(labels=None))
True
sage: FAS2 = D.feedback_edge_set(value_only=False, constraint_generation=False)
sage: len(FAS) == len(FAS2)
True
Check that multi-edges are properly taken into account::
sage: cycle = graphs.CycleGraph(5)
sage: dcycle = DiGraph(cycle)
sage: dcycle.feedback_edge_set(value_only=True)
5
sage: dcycle.allow_multiple_edges(True)
sage: dcycle.add_edges(dcycle.edges())
sage: dcycle.feedback_edge_set(value_only=True)
10
sage: dcycle.feedback_edge_set(value_only=True, constraint_generation=False)
10
Strongly connected components are well handled (:trac:`23989`)::
sage: g = digraphs.Circuit(3) * 2
sage: g.add_edge(0, 3)
sage: g.feedback_edge_set(value_only=True)
2
"""
# It would be a pity to start a LP if the digraph is already acyclic
if self.is_directed_acyclic():
return 0 if value_only else []
if self.has_loops():
# We solve the problem on a copy without loops of the digraph
D = DiGraph(self.edges(sort=False), multiedges=self.allows_multiple_edges(), loops=True)
D.allow_loops(False)
FAS = D.feedback_edge_set(constraint_generation=constraint_generation,
value_only=value_only, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
if value_only:
return FAS + self.number_of_loops()
else:
return FAS + self.loops(labels=None)
if not self.is_strongly_connected():
# If the digraph is not strongly connected, we solve the problem on
# each of its strongly connected components
FAS = 0 if value_only else []
for h in self.strongly_connected_components_subgraphs():
if value_only:
FAS += h.feedback_edge_set(constraint_generation=constraint_generation,
value_only=True, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
else:
FAS.extend( h.feedback_edge_set(constraint_generation=constraint_generation,
value_only=False, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance) )
return FAS
from sage.numerical.mip import MixedIntegerLinearProgram
########################################
# Constraint Generation Implementation #
########################################
if constraint_generation:
p = MixedIntegerLinearProgram(constraint_generation=True,
maximization=False, solver=solver)
# An variable for each edge
b = p.new_variable(binary=True)
# Variables are binary, and their coefficient in the objective is
# the number of occurrences of the corresponding edge, so 1 if the
# graph is simple
p.set_objective( p.sum(b[u,v] for u,v in self.edge_iterator(labels=False)))
p.solve(log=verbose)
# For as long as we do not break because the digraph is acyclic....
while True:
# Building the graph without the edges removed by the MILP
val = p.get_values(b, convert=bool, tolerance=integrality_tolerance)
h = DiGraph([e for e in self.edge_iterator(labels=False) if not val[e]],
format='list_of_edges')
# Is the digraph acyclic ?
isok, certificate = h.is_directed_acyclic(certificate=True)
# If so, we are done !
if isok:
break
# There is a circuit left. Let's add the corresponding
# constraint !
while not isok:
if verbose:
print("Adding a constraint on circuit : {}".format(certificate))
edges = zip(certificate, certificate[1:] + [certificate[0]])
p.add_constraint(p.sum(b[u, v] for u, v in edges), min=1)
# Is there another edge disjoint circuit ?
# for python3, we need to recreate the zip iterator
edges = zip(certificate, certificate[1:] + [certificate[0]])
h.delete_edges(edges)
isok, certificate = h.is_directed_acyclic(certificate=True)
obj = p.solve(log=verbose)
if value_only:
return Integer(round(obj))
else:
# listing the edges contained in the MFAS
val = p.get_values(b, convert=bool, tolerance=integrality_tolerance)
return [e for e in self.edge_iterator(labels=False) if val[e]]
######################################
# Ordering-based MILP Implementation #
######################################
else:
p = MixedIntegerLinearProgram(maximization=False, solver=solver)
b = p.new_variable(binary=True)
d = p.new_variable(integer=True, nonnegative=True)
n = self.order()
for u,v in self.edge_iterator(labels=None):
p.add_constraint(d[u] - d[v] + n * b[u,v], min=1)
for v in self:
p.add_constraint(d[v] <= n)
p.set_objective(p.sum(b[u,v] for u,v in self.edge_iterator(labels=None)))
if value_only:
return Integer(round(p.solve(objective_only=True, log=verbose)))
else:
p.solve(log=verbose)
b_sol = p.get_values(b, convert=bool, tolerance=integrality_tolerance)
return [e for e in self.edge_iterator(labels=None) if b_sol[e]]
### Construction
def reverse(self):
"""
Return a copy of digraph with edges reversed in direction.
EXAMPLES::
sage: D = DiGraph({0: [1,2,3], 1: [0,2], 2: [3], 3: [4], 4: [0,5], 5: [1]})
sage: D.reverse()
Reverse of (): Digraph on 6 vertices
"""
H = DiGraph(multiedges=self.allows_multiple_edges(), loops=self.allows_loops())
H.add_vertices(self)
H.add_edges((v, u, d) for u, v, d in self.edge_iterator())
name = self.name()
if name is None:
name = ''
H.name("Reverse of (%s)"%name)
return H
def reverse_edge(self, u, v=None, label=None, inplace=True, multiedges=None):
"""
Reverse the edge from `u` to `v`.
INPUT:
- ``inplace`` -- boolean (default: ``True``); if ``False``, a new
digraph is created and returned as output, otherwise ``self`` is
modified.
- ``multiedges`` -- boolean (default: ``None``); how to decide what
should be done in case of doubt (for instance when edge `(1,2)` is to
be reversed in a graph while `(2,1)` already exists):
- If set to ``True``, input graph will be forced to allow parallel
edges if necessary and edge `(1,2)` will appear twice in the graph.
- If set to ``False``, only one edge `(1,2)` will remain in the graph
after `(2,1)` is reversed. Besides, the label of edge `(1,2)` will
be overwritten with the label of edge `(2,1)`.
The default behaviour (``multiedges = None``) will raise an exception
each time a subjective decision (setting ``multiedges`` to ``True`` or
``False``) is necessary to perform the operation.
The following forms are all accepted:
- D.reverse_edge( 1, 2 )
- D.reverse_edge( (1, 2) )
- D.reverse_edge( [1, 2] )
- D.reverse_edge( 1, 2, 'label' )
- D.reverse_edge( ( 1, 2, 'label') )
- D.reverse_edge( [1, 2, 'label'] )
- D.reverse_edge( ( 1, 2), label='label' )
EXAMPLES:
If ``inplace`` is ``True`` (default value), ``self`` is modified::
sage: D = DiGraph([(0, 1 ,2)])
sage: D.reverse_edge(0, 1)
sage: D.edges()
[(1, 0, 2)]
If ``inplace`` is ``False``, ``self`` is not modified and a new digraph
is returned::
sage: D = DiGraph([(0, 1, 2)])
sage: re = D.reverse_edge(0, 1, inplace=False)
sage: re.edges()
[(1, 0, 2)]
sage: D.edges()
[(0, 1, 2)]
If ``multiedges`` is ``True``, ``self`` will be forced to allow parallel
edges when and only when it is necessary::
sage: D = DiGraph([(1, 2, 'A'), (2, 1, 'A'), (2, 3, None)])
sage: D.reverse_edge(1, 2, multiedges=True)
sage: D.edges()
[(2, 1, 'A'), (2, 1, 'A'), (2, 3, None)]
sage: D.allows_multiple_edges()
True
Even if ``multiedges`` is ``True``, ``self`` will not be forced to allow
parallel edges when it is not necessary::
sage: D = DiGraph( [(1, 2, 'A'), (2, 1, 'A'), (2, 3, None)] )
sage: D.reverse_edge(2, 3, multiedges=True)
sage: D.edges()
[(1, 2, 'A'), (2, 1, 'A'), (3, 2, None)]
sage: D.allows_multiple_edges()
False
If user specifies ``multiedges = False``, ``self`` will not be forced to
allow parallel edges and a parallel edge will get deleted::
sage: D = DiGraph( [(1, 2, 'A'), (2, 1, 'A'), (2, 3, None)] )
sage: D.edges()
[(1, 2, 'A'), (2, 1, 'A'), (2, 3, None)]
sage: D.reverse_edge(1, 2, multiedges=False)
sage: D.edges()
[(2, 1, 'A'), (2, 3, None)]
Note that in the following graph, specifying ``multiedges = False`` will
result in overwriting the label of `(1, 2)` with the label of `(2, 1)`::
sage: D = DiGraph( [(1, 2, 'B'), (2, 1, 'A'), (2, 3, None)] )
sage: D.edges()
[(1, 2, 'B'), (2, 1, 'A'), (2, 3, None)]
sage: D.reverse_edge(2, 1, multiedges=False)
sage: D.edges()
[(1, 2, 'A'), (2, 3, None)]
If input edge in digraph has weight/label, then the weight/label should
be preserved in the output digraph. User does not need to specify the
weight/label when calling function::
sage: D = DiGraph([[0, 1, 2], [1, 2, 1]], weighted=True)
sage: D.reverse_edge(0, 1)
sage: D.edges()
[(1, 0, 2), (1, 2, 1)]
sage: re = D.reverse_edge([1, 2], inplace=False)
sage: re.edges()
[(1, 0, 2), (2, 1, 1)]
If ``self`` has multiple copies (parallel edges) of the input edge, only
1 of the parallel edges is reversed::
sage: D = DiGraph([(0, 1, '01'), (0, 1, '01'), (0, 1, 'cat'), (1, 2, '12')], weighted=True, multiedges=True)
sage: re = D.reverse_edge([0, 1, '01'], inplace=False)
sage: re.edges()
[(0, 1, '01'), (0, 1, 'cat'), (1, 0, '01'), (1, 2, '12')]
If ``self`` has multiple copies (parallel edges) of the input edge but
with distinct labels and no input label is specified, only 1 of the
parallel edges is reversed (the edge that is labeled by the first label
on the list returned by :meth:`.edge_label`)::
sage: D = DiGraph([(0, 1, 'A'), (0, 1, 'B'), (0, 1, 'mouse'), (0, 1, 'cat')], multiedges=true)
sage: D.edge_label(0, 1)
['cat', 'mouse', 'B', 'A']
sage: D.reverse_edge(0, 1)
sage: D.edges()
[(0, 1, 'A'), (0, 1, 'B'), (0, 1, 'mouse'), (1, 0, 'cat')]
Finally, an exception is raised when Sage does not know how to choose
between allowing multiple edges and losing some data::
sage: D = DiGraph([(0, 1, 'A'), (1, 0, 'B')])
sage: D.reverse_edge(0, 1)
Traceback (most recent call last):
...
ValueError: reversing the given edge is about to create two parallel
edges but input digraph doesn't allow them - User needs to specify
multiedges is True or False.
The following syntax is supported, but note that you must use the
``label`` keyword::
sage: D = DiGraph()
sage: D.add_edge((1, 2), label='label')
sage: D.edges()
[(1, 2, 'label')]
sage: D.reverse_edge((1, 2), label='label')
sage: D.edges()
[(2, 1, 'label')]
sage: D.add_edge((1, 2), 'label')
sage: D.edges(sort=False)
[((1, 2), 'label', None), (2, 1, 'label')]
sage: D.reverse_edge((1, 2), 'label')
sage: D.edges(sort=False)
[('label', (1, 2), None), (2, 1, 'label')]
TESTS::
sage: D = DiGraph([(0, 1, None)])
sage: D.reverse_edge(0, 1, 'mylabel')
Traceback (most recent call last):
...
ValueError: input edge must exist in the digraph
"""
# Assigns the expected values to u,v, and label depending on the input.
if label is None:
if v is None:
try:
u, v, label = u
except Exception:
try:
u, v = u
except Exception:
pass
else:
if v is None:
try:
u, v = u
except Exception:
pass
if not self.has_edge(u,v,label):
raise ValueError("input edge must exist in the digraph")
tempG = self if inplace else copy(self)
if label is None:
if not tempG.allows_multiple_edges():
label = tempG.edge_label(u, v)
else:
# If digraph has parallel edges for input edge, pick the first
# from the labels on the list
label = tempG.edge_label(u, v)[0]
if ((not tempG.allows_multiple_edges()) and (tempG.has_edge(v, u))):
# If user wants to force digraph to allow parallel edges
if multiedges:
tempG.allow_multiple_edges(True)
tempG.delete_edge(u, v, label)
tempG.add_edge(v, u, label)
# If user does not want to force digraph to allow parallel edges,
# we delete edge u to v and overwrite v,u with the label of u,v
elif multiedges is False:
tempG.delete_edge(u,v,label)
tempG.set_edge_label(v,u,label)
# User is supposed to specify multiedges True or False
else:
raise ValueError("reversing the given edge is about to "
"create two parallel edges but input digraph "
"doesn't allow them - User needs to specify "
"multiedges is True or False.")
else:
tempG.delete_edge(u, v, label)
tempG.add_edge(v, u, label)
if not inplace:
return tempG
def reverse_edges(self, edges, inplace=True, multiedges=None):
"""
Reverse a list of edges.
INPUT:
- ``edges`` -- a list of edges in the DiGraph.
- ``inplace`` -- boolean (default: ``True``); if ``False``, a new
digraph is created and returned as output, otherwise ``self`` is
modified.
- ``multiedges`` -- boolean (default: ``None``); if ``True``, input
graph will be forced to allow parallel edges when necessary (for more
information see the documentation of :meth:`~DiGraph.reverse_edge`)
.. SEEALSO::
:meth:`~DiGraph.reverse_edge` - Reverses a single edge.
EXAMPLES:
If ``inplace`` is ``True`` (default value), ``self`` is modified::
sage: D = DiGraph({ 0: [1, 1, 3], 2: [3, 3], 4: [1, 5]}, multiedges=true)
sage: D.reverse_edges([[0, 1], [0, 3]])
sage: D.reverse_edges([(2, 3), (4, 5)])
sage: D.edges()
[(0, 1, None), (1, 0, None), (2, 3, None), (3, 0, None),
(3, 2, None), (4, 1, None), (5, 4, None)]
If ``inplace`` is ``False``, ``self`` is not modified and a new digraph
is returned::
sage: D = DiGraph([(0, 1, 'A'), (1, 0, 'B'), (1, 2, 'C')])
sage: re = D.reverse_edges([(0, 1), (1, 2)],
....: inplace=False,
....: multiedges=True)
sage: re.edges()
[(1, 0, 'A'), (1, 0, 'B'), (2, 1, 'C')]
sage: D.edges()
[(0, 1, 'A'), (1, 0, 'B'), (1, 2, 'C')]
sage: D.allows_multiple_edges()
False
sage: re.allows_multiple_edges()
True
If ``multiedges`` is ``True``, ``self`` will be forced to allow parallel
edges when and only when it is necessary::
sage: D = DiGraph([(1, 2, 'A'), (2, 1, 'A'), (2, 3, None)])
sage: D.reverse_edges([(1, 2), (2, 3)], multiedges=True)
sage: D.edges()
[(2, 1, 'A'), (2, 1, 'A'), (3, 2, None)]
sage: D.allows_multiple_edges()
True
Even if ``multiedges`` is ``True``, ``self`` will not be forced to allow
parallel edges when it is not necessary::
sage: D = DiGraph([(1, 2, 'A'), (2, 1, 'A'), (2, 3, None)])
sage: D.reverse_edges([(2, 3)], multiedges=True)
sage: D.edges()
[(1, 2, 'A'), (2, 1, 'A'), (3, 2, None)]
sage: D.allows_multiple_edges()
False
If ``multiedges`` is ``False``, ``self`` will not be forced to allow
parallel edges and an edge will get deleted::
sage: D = DiGraph([(1, 2), (2, 1)])
sage: D.edges()
[(1, 2, None), (2, 1, None)]
sage: D.reverse_edges([(1, 2)], multiedges=False)
sage: D.edges()
[(2, 1, None)]
If input edge in digraph has weight/label, then the weight/label should
be preserved in the output digraph. User does not need to specify the
weight/label when calling function::
sage: D = DiGraph([(0, 1, '01'), (1, 2, 1), (2, 3, '23')], weighted=True)
sage: D.reverse_edges([(0, 1, '01'), (1, 2), (2, 3)])
sage: D.edges()
[(1, 0, '01'), (2, 1, 1), (3, 2, '23')]
TESTS::
sage: D = digraphs.Circuit(6)
sage: D.reverse_edges(D.edges(), inplace=False).edges()
[(0, 5, None), (1, 0, None), (2, 1, None),
(3, 2, None), (4, 3, None), (5, 4, None)]
sage: D = digraphs.Kautz(2, 3)
sage: Dr = D.reverse_edges(D.edges(), inplace=False, multiedges=True)
sage: Dr.edges() == D.reverse().edges()
True
"""
tempG = self if inplace else copy(self)
for e in edges:
tempG.reverse_edge(e,inplace=True,multiedges=multiedges)
if not inplace:
return tempG
### Distances
def eccentricity(self, v=None, by_weight=False, algorithm=None,
weight_function=None, check_weight=True, dist_dict=None,
with_labels=False):
"""
Return the eccentricity of vertex (or vertices) ``v``.
The eccentricity of a vertex is the maximum distance to any other
vertex.
For more information and examples on how to use input variables, see
:meth:`~GenericGraph.shortest_path_all_pairs`,
:meth:`~GenericGraph.shortest_path_lengths` and
:meth:`~GenericGraph.shortest_paths`
INPUT:
- ``v`` - either a single vertex or a list of vertices. If it is not
specified, then it is taken to be all vertices.
- ``by_weight`` -- boolean (default: ``False``); if ``True``, edge
weights are taken into account; if False, all edges have weight 1
- ``algorithm`` -- string (default: ``None``); one of the following
algorithms:
- ``'BFS'`` - the computation is done through a BFS centered on each
vertex successively. Works only if ``by_weight==False``.
- ``'Floyd-Warshall-Cython'`` - a Cython implementation of the
Floyd-Warshall algorithm. Works only if ``by_weight==False`` and
``v is None`` or ``v`` should contain all vertices of ``self``.
- ``'Floyd-Warshall-Python'`` - a Python implementation of the
Floyd-Warshall algorithm. Works also with weighted graphs, even with
negative weights (but no negative cycle is allowed). However, ``v``
must be ``None`` or ``v`` should contain all vertices of ``self``.
- ``'Dijkstra_NetworkX'`` - the Dijkstra algorithm, implemented in
NetworkX. It works with weighted graphs, but no negative weight is
allowed.
- ``'Dijkstra_Boost'`` - the Dijkstra algorithm, implemented in Boost
(works only with positive weights).
- ``'Johnson_Boost'`` - the Johnson algorithm, implemented in
Boost (works also with negative weights, if there is no negative
cycle). Works only if ``v is None`` or ``v`` should contain all
vertices of ``self``.
- ``'From_Dictionary'`` - uses the (already computed) distances, that
are provided by input variable ``dist_dict``.
- ``None`` (default): Sage chooses the best algorithm:
``'From_Dictionary'`` if ``dist_dict`` is not None, ``'BFS'`` for
unweighted graphs, ``'Dijkstra_Boost'`` if all weights are
positive, ``'Johnson_Boost'`` otherwise.
- ``weight_function`` -- function (default: ``None``); a function that
takes as input an edge ``(u, v, l)`` and outputs its weight. If not
``None``, ``by_weight`` is automatically set to ``True``. If ``None``
and ``by_weight`` is ``True``, we use the edge label ``l``, if ``l``
is not ``None``, else ``1`` as a weight.
- ``check_weight`` -- boolean (default: ``True``); if ``True``, we check
that the ``weight_function`` outputs a number for each edge
- ``dist_dict`` -- a dictionary (default: ``None``); a dict of dicts of
distances (used only if ``algorithm=='From_Dictionary'``)
- ``with_labels`` -- boolean (default: ``False``); whether to return a
list or a dictionary keyed by vertices.
EXAMPLES::
sage: G = graphs.KrackhardtKiteGraph().to_directed()
sage: G.eccentricity()
[4, 4, 4, 4, 4, 3, 3, 2, 3, 4]
sage: G.vertices()
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
sage: G.eccentricity(7)
2
sage: G.eccentricity([7,8,9])
[2, 3, 4]
sage: G.eccentricity([7,8,9], with_labels=True) == {8: 3, 9: 4, 7: 2}
True
sage: G = DiGraph(3)
sage: G.eccentricity(with_labels=True)
{0: +Infinity, 1: +Infinity, 2: +Infinity}
sage: G = DiGraph({0:[]})
sage: G.eccentricity(with_labels=True)
{0: 0}
sage: G = DiGraph([(0,1,2), (1,2,3), (2,0,2)])
sage: G.eccentricity(algorithm = 'BFS')
[2, 2, 2]
sage: G.eccentricity(algorithm = 'Floyd-Warshall-Cython')
[2, 2, 2]
sage: G.eccentricity(by_weight = True, algorithm = 'Dijkstra_NetworkX')
[5, 5, 4]
sage: G.eccentricity(by_weight = True, algorithm = 'Dijkstra_Boost')
[5, 5, 4]
sage: G.eccentricity(by_weight = True, algorithm = 'Johnson_Boost')
[5, 5, 4]
sage: G.eccentricity(by_weight = True, algorithm = 'Floyd-Warshall-Python')
[5, 5, 4]
sage: G.eccentricity(dist_dict = G.shortest_path_all_pairs(by_weight = True)[0])
[5, 5, 4]
TESTS:
A non-implemented algorithm::
sage: G.eccentricity(algorithm = 'boh')
Traceback (most recent call last):
...
ValueError: unknown algorithm "boh"
An algorithm that does not work with edge weights::
sage: G.eccentricity(by_weight = True, algorithm = 'BFS')
Traceback (most recent call last):
...
ValueError: algorithm 'BFS' does not work with weights
sage: G.eccentricity(by_weight = True, algorithm = 'Floyd-Warshall-Cython')
Traceback (most recent call last):
...
ValueError: algorithm 'Floyd-Warshall-Cython' does not work with weights
An algorithm that computes the all-pair-shortest-paths when not all
vertices are needed::
sage: G.eccentricity(0, algorithm = 'Floyd-Warshall-Cython')
Traceback (most recent call last):
...
ValueError: algorithm 'Floyd-Warshall-Cython' works only if all eccentricities are needed
sage: G.eccentricity(0, algorithm = 'Floyd-Warshall-Python')
Traceback (most recent call last):
...
ValueError: algorithm 'Floyd-Warshall-Python' works only if all eccentricities are needed
sage: G.eccentricity(0, algorithm = 'Johnson_Boost')
Traceback (most recent call last):
...
ValueError: algorithm 'Johnson_Boost' works only if all eccentricities are needed
"""
if weight_function is not None:
by_weight = True
elif by_weight:
def weight_function(e):
return 1 if e[2] is None else e[2]
if algorithm is None:
if dist_dict is not None:
algorithm = 'From_Dictionary'
elif not by_weight:
algorithm = 'BFS'
else:
for e in self.edge_iterator():
try:
if float(weight_function(e)) < 0:
algorithm = 'Johnson_Boost'
break
except (ValueError, TypeError):
raise ValueError("the weight function cannot find the"
" weight of " + str(e))
if algorithm is None:
algorithm = 'Dijkstra_Boost'
if v is not None and not isinstance(v, list):
v = [v]
if v is None or all(u in v for u in self):
if v is None:
v = list(self)
# If we want to use BFS, we use the Cython routine
if algorithm == 'BFS':
if by_weight:
raise ValueError("algorithm 'BFS' does not work with weights")
from sage.graphs.distances_all_pairs import eccentricity
algo = 'standard'
if with_labels:
return dict(zip(v, eccentricity(self, algorithm=algo, vertex_list=v)))
else:
return eccentricity(self, algorithm=algo)
if algorithm in ['Floyd-Warshall-Python', 'Floyd-Warshall-Cython', 'Johnson_Boost']:
dist_dict = self.shortest_path_all_pairs(by_weight, algorithm,
weight_function,
check_weight)[0]
algorithm = 'From_Dictionary'
elif algorithm in ['Floyd-Warshall-Python', 'Floyd-Warshall-Cython', 'Johnson_Boost']:
raise ValueError("algorithm '" + algorithm + "' works only if all" +
" eccentricities are needed")
ecc = {}
from sage.rings.infinity import Infinity
for u in v:
if algorithm == 'From_Dictionary':
length = dist_dict[u]
else:
# If algorithm is wrong, the error is raised by the
# shortest_path_lengths function
length = self.shortest_path_lengths(u, by_weight=by_weight,
algorithm=algorithm,
weight_function=weight_function,
check_weight=check_weight)
if len(length) != self.num_verts():
ecc[u] = Infinity
else:
ecc[u] = max(length.values())
if with_labels:
return ecc
else:
if len(ecc) == 1:
# return single value
v, = ecc.values()
return v
return [ecc[u] for u in v]
def radius(self, by_weight=False, algorithm=None, weight_function=None,
check_weight=True):
r"""
Return the radius of the DiGraph.
The radius is defined to be the minimum eccentricity of any vertex,
where the eccentricity is the maximum distance to any other
vertex. For more information and examples on how to use input variables,
see :meth:`~GenericGraph.shortest_paths` and
:meth:`~DiGraph.eccentricity`
INPUT:
- ``by_weight`` -- boolean (default: ``False``); if ``True``, edge
weights are taken into account; if False, all edges have weight 1
- ``algorithm`` -- string (default: ``None``); see method
:meth:`eccentricity` for the list of available algorithms
- ``weight_function`` -- function (default: ``None``); a function that
takes as input an edge ``(u, v, l)`` and outputs its weight. If not
``None``, ``by_weight`` is automatically set to ``True``. If ``None``
and ``by_weight`` is ``True``, we use the edge label ``l``, if ``l``
is not ``None``, else ``1`` as a weight.
- ``check_weight`` -- boolean (default: ``True``); if ``True``, we check
that the ``weight_function`` outputs a number for each edge
EXAMPLES:
The more symmetric a DiGraph is, the smaller (diameter - radius) is::
sage: G = graphs.BarbellGraph(9, 3).to_directed()
sage: G.radius()
3
sage: G.diameter()
6
::
sage: G = digraphs.Circuit(9)
sage: G.radius()
8
sage: G.diameter()
8
TESTS::
sage: G = DiGraph()
sage: G.radius()
Traceback (most recent call last):
...
ValueError: radius is not defined for the empty DiGraph
"""
if not self.order():
raise ValueError("radius is not defined for the empty DiGraph")
if weight_function is not None:
by_weight = True
if by_weight and not weight_function:
def weight_function(e):
return 1 if e[2] is None else e[2]
return min(self.eccentricity(v=None, by_weight=by_weight,
weight_function=weight_function,
check_weight=check_weight,
algorithm=algorithm))
def diameter(self, by_weight=False, algorithm=None, weight_function=None,
check_weight=True):
r"""
Return the diameter of the DiGraph.
The diameter is defined to be the maximum distance between two vertices.
It is infinite if the DiGraph is not strongly connected.
For more information and examples on how to use input variables, see
:meth:`~GenericGraph.shortest_paths` and
:meth:`~DiGraph.eccentricity`
INPUT:
- ``by_weight`` -- boolean (default: ``False``); if ``True``, edge
weights are taken into account; if False, all edges have weight 1
- ``algorithm`` -- string (default: ``None``); one of the following
algorithms:
- ``'BFS'``: the computation is done through a BFS centered on each
vertex successively. Works only if ``by_weight==False``. It computes
all the eccentricities and return the maximum value.
- ``'Floyd-Warshall-Cython'``: a Cython implementation of the
Floyd-Warshall algorithm. Works only if ``by_weight==False``. It
computes all the eccentricities and return the maximum value.
- ``'Floyd-Warshall-Python'``: a Python implementation of the
Floyd-Warshall algorithm. Works also with weighted graphs, even with
negative weights (but no negative cycle is allowed). It computes all
the eccentricities and return the maximum value.
- ``'Dijkstra_NetworkX'``: the Dijkstra algorithm, implemented in
NetworkX. It works with weighted graphs, but no negative weight is
allowed. It computes all the eccentricities and return the maximum
value.
- ``'DiFUB'``, ``'2Dsweep'``: these algorithms are
implemented in :func:`sage.graphs.distances_all_pairs.diameter` and
:func:`sage.graphs.base.boost_graph.diameter`. ``'2Dsweep'`` returns
lower bound on the diameter, while ``'DiFUB'`` returns the exact
computed diameter. They also work with negative weight, if there is
no negative cycle. See the functions documentation for more
information.
- ``'standard'`` : the standard algorithm is implemented in
:func:`sage.graphs.distances_all_pairs.diameter`. It works only
if ``by_weight==False``. See the function documentation for more
information. It computes all the eccentricities and return the
maximum value.
- ``'Dijkstra_Boost'``: the Dijkstra algorithm, implemented in Boost
(works only with positive weights). It computes all the
eccentricities and return the maximum value.
- ``'Johnson_Boost'``: the Johnson algorithm, implemented in
Boost (works also with negative weights, if there is no negative
cycle). It computes all the eccentricities and return the maximum
value.
- ``None`` (default): Sage chooses the best algorithm: ``'DiFUB'``.
- ``weight_function`` -- function (default: ``None``); a function that
takes as input an edge ``(u, v, l)`` and outputs its weight. If not
``None``, ``by_weight`` is automatically set to ``True``. If ``None``
and ``by_weight`` is ``True``, we use the edge label ``l``, if ``l``
is not ``None``, else ``1`` as weight.
- ``check_weight`` -- boolean (default: ``True``); if ``True``, we check
that the ``weight_function`` outputs a number for each edge
EXAMPLES::
sage: G = digraphs.DeBruijn(5,4)
sage: G.diameter()
4
sage: G = digraphs.GeneralizedDeBruijn(9, 3)
sage: G.diameter()
2
TESTS::
sage: G = graphs.RandomGNP(40, 0.4).to_directed()
sage: d1 = G.diameter(algorithm='DiFUB', by_weight=True)
sage: d2 = max(G.eccentricity(algorithm='Dijkstra_Boost', by_weight=True))
sage: d1 == d2
True
sage: G = digraphs.Path(5)
sage: G.diameter(algorithm = 'DiFUB')
+Infinity
sage: G = DiGraph([(1,2,4), (2,1,7)])
sage: G.diameter(algorithm='2Dsweep', by_weight=True)
7.0
sage: G.delete_edge(2,1,7); G.add_edge(2,1,-5);
sage: G.diameter(algorithm='2Dsweep', by_weight=True)
Traceback (most recent call last):
...
ValueError: the graph contains a negative cycle
sage: G = DiGraph()
sage: G.diameter()
Traceback (most recent call last):
...
ValueError: diameter is not defined for the empty DiGraph
:trac:`32095` is fixed::
sage: g6 = 'guQOUOQCW[IaDBCVP_IE\\RfxV@WMSaeHgheEIA@tfOJkB~@EpGLCrs'
sage: g6 += 'aPIpwgQI_`Abs_x?VWxNJAo@w\\hffCDAW]bYGMIZGC_PYOrIw[Gp['
sage: g6 += '@FTgc_O}E?fXAnGCB{gSaUcD'
sage: G = Graph(g6).to_directed()
sage: G.diameter(algorithm='DiFUB', by_weight=False)
3
sage: G.diameter(algorithm='DiFUB', by_weight=True)
3.0
"""
if not self.order():
raise ValueError("diameter is not defined for the empty DiGraph")
if weight_function is not None:
by_weight = True
if by_weight and not weight_function:
def weight_function(e):
return 1 if e[2] is None else e[2]
if algorithm is None:
algorithm = 'DiFUB'
elif algorithm == 'BFS':
algorithm = 'standard'
if algorithm in ['2Dsweep', 'DiFUB']:
if not by_weight:
from sage.graphs.distances_all_pairs import diameter
return diameter(self, algorithm=algorithm)
else:
from sage.graphs.base.boost_graph import diameter
return diameter(self, algorithm=algorithm,
weight_function=weight_function,
check_weight=check_weight)
if algorithm == 'standard':
if by_weight:
raise ValueError("algorithm '" + algorithm + "' does not work" +
" on weighted DiGraphs")
from sage.graphs.distances_all_pairs import diameter
return diameter(self, algorithm=algorithm)
return max(self.eccentricity(v=None, by_weight=by_weight,
weight_function=weight_function,
check_weight=check_weight,
algorithm=algorithm))
def center(self, by_weight=False, algorithm=None, weight_function=None,
check_weight=True):
r"""
Return the set of vertices in the center of the DiGraph.
The center is the set of vertices whose eccentricity is equal to the
radius of the DiGraph, i.e., achieving the minimum eccentricity.
For more information and examples on how to use input variables,
see :meth:`~GenericGraph.shortest_paths` and
:meth:`~DiGraph.eccentricity`
INPUT:
- ``by_weight`` -- boolean (default: ``False``); if ``True``, edge
weights are taken into account; if False, all edges have weight 1
- ``algorithm`` -- string (default: ``None``); see method
:meth:`eccentricity` for the list of available algorithms
- ``weight_function`` -- function (default: ``None``); a function that
takes as input an edge ``(u, v, l)`` and outputs its weight. If not
``None``, ``by_weight`` is automatically set to ``True``. If ``None``
and ``by_weight`` is ``True``, we use the edge label ``l`` as a
weight, if ``l`` is not ``None``, else ``1`` as a weight.
- ``check_weight`` -- boolean (default: ``True``); if ``True``, we check
that the ``weight_function`` outputs a number for each edge
EXAMPLES:
Every vertex is a center in a Circuit-DiGraph::
sage: G = digraphs.Circuit(9)
sage: G.center()
[0, 1, 2, 3, 4, 5, 6, 7, 8]
Center can be the whole graph::
sage: G.subgraph(G.center()) == G
True
Some other graphs::
sage: G = digraphs.Path(5)
sage: G.center()
[0]
sage: G = DiGraph([(0,1,2), (1,2,3), (2,0,2)])
sage: G.center(by_weight=True)
[2]
TESTS::
sage: G = DiGraph()
sage: G.center()
[]
sage: G = DiGraph(3)
sage: G.center()
[0, 1, 2]
"""
ecc = self.eccentricity(v=list(self), by_weight=by_weight,
weight_function=weight_function,
algorithm=algorithm,
check_weight=check_weight,
with_labels=True)
try:
r = min(ecc.values())
except Exception:
return []
return [v for v in self if ecc[v] == r]
def periphery(self, by_weight=False, algorithm=None, weight_function=None,
check_weight=True):
r"""
Return the set of vertices in the periphery of the DiGraph.
The periphery is the set of vertices whose eccentricity is equal to the
diameter of the DiGraph, i.e., achieving the maximum eccentricity.
For more information and examples on how to use input variables,
see :meth:`~GenericGraph.shortest_paths` and
:meth:`~DiGraph.eccentricity`
INPUT:
- ``by_weight`` -- boolean (default: ``False``); if ``True``, edge
weights are taken into account; if False, all edges have weight 1
- ``algorithm`` -- string (default: ``None``); see method
:meth:`eccentricity` for the list of available algorithms
- ``weight_function`` -- function (default: ``None``); a function that
takes as input an edge ``(u, v, l)`` and outputs its weight. If not
``None``, ``by_weight`` is automatically set to ``True``. If ``None``
and ``by_weight`` is ``True``, we use the edge label ``l`` as a
weight, if ``l`` is not ``None``, else ``1`` as a weight.
- ``check_weight`` -- boolean (default: ``True``); if ``True``, we check
that the ``weight_function`` outputs a number for each edge
EXAMPLES::
sage: G = graphs.DiamondGraph().to_directed()
sage: G.periphery()
[0, 3]
sage: P = digraphs.Path(5)
sage: P.periphery()
[1, 2, 3, 4]
sage: G = digraphs.Complete(5)
sage: G.subgraph(G.periphery()) == G
True
TESTS::
sage: G = DiGraph()
sage: G.periphery()
[]
sage: G.add_vertex()
0
sage: G.periphery()
[0]
"""
ecc = self.eccentricity(v=list(self), by_weight=by_weight,
weight_function=weight_function,
algorithm=algorithm,
check_weight=check_weight,
with_labels=True)
try:
d = max(ecc.values())
except Exception:
return []
return [v for v in self if ecc[v] == d]
### Paths and cycles iterators
def _all_cycles_iterator_vertex(self, vertex, starting_vertices=None, simple=False,
rooted=False, max_length=None, trivial=False,
remove_acyclic_edges=True):
r"""
Return an iterator over the cycles of ``self`` starting with the given
vertex.
INPUT:
- ``vertex`` -- the starting vertex of the cycle
- ``starting_vertices`` -- iterable (default: ``None``); vertices from
which the cycles must start. If ``None``, then all vertices of the
graph can be starting points. This argument is necessary if ``rooted``
is set to ``True``.
- ``simple`` -- boolean (default: ``False``); if set to ``True``, then
only simple cycles are considered. A cycle is simple if the only
vertex occurring twice in it is the starting and ending one.
- ``rooted`` -- boolean (default: ``False``); if set to False, then
cycles differing only by their starting vertex are considered the same
(e.g. ``['a', 'b', 'c', 'a']`` and ``['b', 'c', 'a',
'b']``). Otherwise, all cycles are enumerated.
- ``max_length`` -- non negative integer (default: ``None``); the
maximum length of the enumerated paths. If set to ``None``, then all
lengths are allowed.
- ``trivial`` - boolean (default: ``False``); if set to ``True``, then
the empty paths are also enumerated.
- ``remove_acyclic_edges`` -- boolean (default: ``True``); whether
acyclic edges must be removed from the graph. Used to avoid
recomputing it for each vertex
OUTPUT:
iterator
EXAMPLES::
sage: g = DiGraph({'a': ['a', 'b'], 'b': ['c'], 'c': ['d'], 'd': ['c']}, loops=True)
sage: it = g._all_cycles_iterator_vertex('a', simple=False, max_length=None)
sage: for i in range(5): print(next(it))
['a', 'a']
['a', 'a', 'a']
['a', 'a', 'a', 'a']
['a', 'a', 'a', 'a', 'a']
['a', 'a', 'a', 'a', 'a', 'a']
sage: it = g._all_cycles_iterator_vertex('c', simple=False, max_length=None)
sage: for i in range(5): print(next(it))
['c', 'd', 'c']
['c', 'd', 'c', 'd', 'c']
['c', 'd', 'c', 'd', 'c', 'd', 'c']
['c', 'd', 'c', 'd', 'c', 'd', 'c', 'd', 'c']
['c', 'd', 'c', 'd', 'c', 'd', 'c', 'd', 'c', 'd', 'c']
sage: it = g._all_cycles_iterator_vertex('d', simple=False, max_length=None)
sage: for i in range(5): print(next(it))
['d', 'c', 'd']
['d', 'c', 'd', 'c', 'd']
['d', 'c', 'd', 'c', 'd', 'c', 'd']
['d', 'c', 'd', 'c', 'd', 'c', 'd', 'c', 'd']
['d', 'c', 'd', 'c', 'd', 'c', 'd', 'c', 'd', 'c', 'd']
It is possible to set a maximum length so that the number of cycles is
finite::
sage: it = g._all_cycles_iterator_vertex('d', simple=False, max_length=6)
sage: list(it)
[['d', 'c', 'd'], ['d', 'c', 'd', 'c', 'd'], ['d', 'c', 'd', 'c', 'd', 'c', 'd']]
When ``simple`` is set to True, the number of cycles is finite since no vertex
but the first one can occur more than once::
sage: it = g._all_cycles_iterator_vertex('d', simple=True, max_length=None)
sage: list(it)
[['d', 'c', 'd']]
By default, the empty cycle is not enumerated::
sage: it = g._all_cycles_iterator_vertex('d', simple=True, trivial=True)
sage: list(it)
[['d'], ['d', 'c', 'd']]
"""
if starting_vertices is None:
starting_vertices = [vertex]
# First enumerate the empty cycle
if trivial:
yield [vertex]
# First we remove vertices and edges that are not part of any cycle
if remove_acyclic_edges:
sccs = self.strongly_connected_components()
d = {}
for id, component in enumerate(sccs):
for v in component:
d[v] = id
h = copy(self)
h.delete_edges((u, v) for u, v in h.edge_iterator(labels=False) if d[u] != d[v])
else:
h = self
queue = [[vertex]]
if max_length is None:
from sage.rings.infinity import Infinity
max_length = Infinity
while queue:
path = queue.pop(0)
# Checks if a cycle has been found
if len(path) > 1 and path[0] == path[-1]:
yield path
# Makes sure that the current cycle is not too long
# Also if a cycle has been encountered and only simple cycles are
# allowed, Then it discards the current path
if len(path) <= max_length and (not simple or path.count(path[-1]) == 1):
for neighbor in h.neighbor_out_iterator(path[-1]):
# If cycles are not rooted, makes sure to keep only the
# minimum cycle according to the lexicographic order
if rooted or neighbor not in starting_vertices or path[0] <= neighbor:
queue.append(path + [neighbor])
def all_cycles_iterator(self, starting_vertices=None, simple=False,
rooted=False, max_length=None, trivial=False):
r"""
Return an iterator over all the cycles of ``self`` starting with one of
the given vertices.
The cycles are enumerated in increasing length order.
INPUT:
- ``starting_vertices`` -- iterable (default: ``None``); vertices from
which the cycles must start. If ``None``, then all vertices of the
graph can be starting points. This argument is necessary if ``rooted``
is set to ``True``.
- ``simple`` -- boolean (default: ``False``); if set to ``True``, then
only simple cycles are considered. A cycle is simple if the only
vertex occurring twice in it is the starting and ending one.
- ``rooted`` -- boolean (default: ``False``); if set to False, then
cycles differing only by their starting vertex are considered the same
(e.g. ``['a', 'b', 'c', 'a']`` and ``['b', 'c', 'a',
'b']``). Otherwise, all cycles are enumerated.
- ``max_length`` -- non negative integer (default: ``None``); the
maximum length of the enumerated paths. If set to ``None``, then all
lengths are allowed.
- ``trivial`` - boolean (default: ``False``); if set to ``True``, then
the empty paths are also enumerated.
OUTPUT:
iterator
.. SEEALSO::
- :meth:`all_simple_cycles`
AUTHOR:
Alexandre Blondin Masse
EXAMPLES::
sage: g = DiGraph({'a': ['a', 'b'], 'b': ['c'], 'c': ['d'], 'd': ['c']}, loops=True)
sage: it = g.all_cycles_iterator()
sage: for _ in range(7): print(next(it))
['a', 'a']
['a', 'a', 'a']
['c', 'd', 'c']
['a', 'a', 'a', 'a']
['a', 'a', 'a', 'a', 'a']
['c', 'd', 'c', 'd', 'c']
['a', 'a', 'a', 'a', 'a', 'a']
There are no cycles in the empty graph and in acyclic graphs::
sage: g = DiGraph()
sage: it = g.all_cycles_iterator()
sage: list(it)
[]
sage: g = DiGraph({0:[1]})
sage: it = g.all_cycles_iterator()
sage: list(it)
[]
It is possible to restrict the starting vertices of the cycles::
sage: g = DiGraph({'a': ['a', 'b'], 'b': ['c'], 'c': ['d'], 'd': ['c']}, loops=True)
sage: it = g.all_cycles_iterator(starting_vertices=['b', 'c'])
sage: for _ in range(3): print(next(it))
['c', 'd', 'c']
['c', 'd', 'c', 'd', 'c']
['c', 'd', 'c', 'd', 'c', 'd', 'c']
Also, one can bound the length of the cycles::
sage: it = g.all_cycles_iterator(max_length=3)
sage: list(it)
[['a', 'a'], ['a', 'a', 'a'], ['c', 'd', 'c'],
['a', 'a', 'a', 'a']]
By default, cycles differing only by their starting point are not all
enumerated, but this may be parametrized::
sage: it = g.all_cycles_iterator(max_length=3, rooted=False)
sage: list(it)
[['a', 'a'], ['a', 'a', 'a'], ['c', 'd', 'c'],
['a', 'a', 'a', 'a']]
sage: it = g.all_cycles_iterator(max_length=3, rooted=True)
sage: list(it)
[['a', 'a'], ['a', 'a', 'a'], ['c', 'd', 'c'], ['d', 'c', 'd'],
['a', 'a', 'a', 'a']]
One may prefer to enumerate simple cycles, i.e. cycles such that the only
vertex occurring twice in it is the starting and ending one (see also
:meth:`all_simple_cycles`)::
sage: it = g.all_cycles_iterator(simple=True)
sage: list(it)
[['a', 'a'], ['c', 'd', 'c']]
sage: g = digraphs.Circuit(4)
sage: list(g.all_cycles_iterator(simple=True))
[[0, 1, 2, 3, 0]]
"""
if starting_vertices is None:
starting_vertices = self
# Since a cycle is always included in a given strongly connected
# component, we may remove edges from the graph
sccs = self.strongly_connected_components()
d = {}
for id, component in enumerate(sccs):
for v in component:
d[v] = id
h = copy(self)
h.delete_edges((u, v) for u, v in h.edge_iterator(labels=False) if d[u] != d[v])
# We create one cycles iterator per vertex. This is necessary if we
# want to iterate over cycles with increasing length.
vertex_iterators = {v :h._all_cycles_iterator_vertex(v
, starting_vertices=starting_vertices
, simple=simple
, rooted=rooted
, max_length=max_length
, trivial=trivial
, remove_acyclic_edges=False
) for v in starting_vertices}
cycles = []
for vi in vertex_iterators.values():
try:
cycle = next(vi)
cycles.append((len(cycle), cycle))
except(StopIteration):
pass
# Since we always extract a shortest path, using a heap
# can speed up the algorithm
from heapq import heapify, heappop, heappush
heapify(cycles)
while cycles:
# We choose the shortest available cycle
_, shortest_cycle = heappop(cycles)
yield shortest_cycle
# We update the cycle iterator to its next available cycle if it
# exists
try:
cycle = next(vertex_iterators[shortest_cycle[0]])
heappush(cycles, (len(cycle), cycle))
except(StopIteration):
pass
def all_simple_cycles(self, starting_vertices=None, rooted=False,
max_length=None, trivial=False):
r"""
Return a list of all simple cycles of ``self``.
INPUT:
- ``starting_vertices`` -- iterable (default: ``None``); vertices from
which the cycles must start. If ``None``, then all vertices of the
graph can be starting points. This argument is necessary if ``rooted``
is set to ``True``.
- ``rooted`` -- boolean (default: ``False``); if set to False, then
cycles differing only by their starting vertex are considered the same
(e.g. ``['a', 'b', 'c', 'a']`` and ``['b', 'c', 'a',
'b']``). Otherwise, all cycles are enumerated.
- ``max_length`` -- non negative integer (default: ``None``); the
maximum length of the enumerated paths. If set to ``None``, then all
lengths are allowed.
- ``trivial`` - boolean (default: ``False``); if set to ``True``, then
the empty paths are also enumerated.
OUTPUT:
list
.. NOTE::
Although the number of simple cycles of a finite graph is always
finite, computing all its cycles may take a very long time.
EXAMPLES::
sage: g = DiGraph({'a': ['a', 'b'], 'b': ['c'], 'c': ['d'], 'd': ['c']}, loops=True)
sage: g.all_simple_cycles()
[['a', 'a'], ['c', 'd', 'c']]
The directed version of the Petersen graph::
sage: g = graphs.PetersenGraph().to_directed()
sage: g.all_simple_cycles(max_length=4)
[[0, 1, 0], [0, 4, 0], [0, 5, 0], [1, 2, 1], [1, 6, 1], [2, 3, 2],
[2, 7, 2], [3, 8, 3], [3, 4, 3], [4, 9, 4], [5, 8, 5], [5, 7, 5],
[6, 8, 6], [6, 9, 6], [7, 9, 7]]
sage: g.all_simple_cycles(max_length=6)
[[0, 1, 0], [0, 4, 0], [0, 5, 0], [1, 2, 1], [1, 6, 1], [2, 3, 2],
[2, 7, 2], [3, 8, 3], [3, 4, 3], [4, 9, 4], [5, 8, 5], [5, 7, 5],
[6, 8, 6], [6, 9, 6], [7, 9, 7], [0, 1, 2, 3, 4, 0],
[0, 1, 2, 7, 5, 0], [0, 1, 6, 8, 5, 0], [0, 1, 6, 9, 4, 0],
[0, 4, 9, 6, 1, 0], [0, 4, 9, 7, 5, 0], [0, 4, 3, 8, 5, 0],
[0, 4, 3, 2, 1, 0], [0, 5, 8, 3, 4, 0], [0, 5, 8, 6, 1, 0],
[0, 5, 7, 9, 4, 0], [0, 5, 7, 2, 1, 0], [1, 2, 3, 8, 6, 1],
[1, 2, 7, 9, 6, 1], [1, 6, 8, 3, 2, 1], [1, 6, 9, 7, 2, 1],
[2, 3, 8, 5, 7, 2], [2, 3, 4, 9, 7, 2], [2, 7, 9, 4, 3, 2],
[2, 7, 5, 8, 3, 2], [3, 8, 6, 9, 4, 3], [3, 4, 9, 6, 8, 3],
[5, 8, 6, 9, 7, 5], [5, 7, 9, 6, 8, 5], [0, 1, 2, 3, 8, 5, 0],
[0, 1, 2, 7, 9, 4, 0], [0, 1, 6, 8, 3, 4, 0],
[0, 1, 6, 9, 7, 5, 0], [0, 4, 9, 6, 8, 5, 0],
[0, 4, 9, 7, 2, 1, 0], [0, 4, 3, 8, 6, 1, 0],
[0, 4, 3, 2, 7, 5, 0], [0, 5, 8, 3, 2, 1, 0],
[0, 5, 8, 6, 9, 4, 0], [0, 5, 7, 9, 6, 1, 0],
[0, 5, 7, 2, 3, 4, 0], [1, 2, 3, 4, 9, 6, 1],
[1, 2, 7, 5, 8, 6, 1], [1, 6, 8, 5, 7, 2, 1],
[1, 6, 9, 4, 3, 2, 1], [2, 3, 8, 6, 9, 7, 2],
[2, 7, 9, 6, 8, 3, 2], [3, 8, 5, 7, 9, 4, 3],
[3, 4, 9, 7, 5, 8, 3]]
The complete graph (without loops) on `4` vertices::
sage: g = graphs.CompleteGraph(4).to_directed()
sage: g.all_simple_cycles()
[[0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 2, 1], [1, 3, 1], [2, 3, 2],
[0, 1, 2, 0], [0, 1, 3, 0], [0, 2, 1, 0], [0, 2, 3, 0],
[0, 3, 1, 0], [0, 3, 2, 0], [1, 2, 3, 1], [1, 3, 2, 1],
[0, 1, 2, 3, 0], [0, 1, 3, 2, 0], [0, 2, 1, 3, 0],
[0, 2, 3, 1, 0], [0, 3, 1, 2, 0], [0, 3, 2, 1, 0]]
If the graph contains a large number of cycles, one can bound the length
of the cycles, or simply restrict the possible starting vertices of the
cycles::
sage: g = graphs.CompleteGraph(20).to_directed()
sage: g.all_simple_cycles(max_length=2)
[[0, 1, 0], [0, 2, 0], [0, 3, 0], [0, 4, 0], [0, 5, 0], [0, 6, 0],
[0, 7, 0], [0, 8, 0], [0, 9, 0], [0, 10, 0], [0, 11, 0],
[0, 12, 0], [0, 13, 0], [0, 14, 0], [0, 15, 0], [0, 16, 0],
[0, 17, 0], [0, 18, 0], [0, 19, 0], [1, 2, 1], [1, 3, 1],
[1, 4, 1], [1, 5, 1], [1, 6, 1], [1, 7, 1], [1, 8, 1], [1, 9, 1],
[1, 10, 1], [1, 11, 1], [1, 12, 1], [1, 13, 1], [1, 14, 1],
[1, 15, 1], [1, 16, 1], [1, 17, 1], [1, 18, 1], [1, 19, 1],
[2, 3, 2], [2, 4, 2], [2, 5, 2], [2, 6, 2], [2, 7, 2], [2, 8, 2],
[2, 9, 2], [2, 10, 2], [2, 11, 2], [2, 12, 2], [2, 13, 2],
[2, 14, 2], [2, 15, 2], [2, 16, 2], [2, 17, 2], [2, 18, 2],
[2, 19, 2], [3, 4, 3], [3, 5, 3], [3, 6, 3], [3, 7, 3], [3, 8, 3],
[3, 9, 3], [3, 10, 3], [3, 11, 3], [3, 12, 3], [3, 13, 3],
[3, 14, 3], [3, 15, 3], [3, 16, 3], [3, 17, 3], [3, 18, 3],
[3, 19, 3], [4, 5, 4], [4, 6, 4], [4, 7, 4], [4, 8, 4], [4, 9, 4],
[4, 10, 4], [4, 11, 4], [4, 12, 4], [4, 13, 4], [4, 14, 4],
[4, 15, 4], [4, 16, 4], [4, 17, 4], [4, 18, 4], [4, 19, 4],
[5, 6, 5], [5, 7, 5], [5, 8, 5], [5, 9, 5], [5, 10, 5],
[5, 11, 5], [5, 12, 5], [5, 13, 5], [5, 14, 5], [5, 15, 5],
[5, 16, 5], [5, 17, 5], [5, 18, 5], [5, 19, 5], [6, 7, 6],
[6, 8, 6], [6, 9, 6], [6, 10, 6], [6, 11, 6], [6, 12, 6],
[6, 13, 6], [6, 14, 6], [6, 15, 6], [6, 16, 6], [6, 17, 6],
[6, 18, 6], [6, 19, 6], [7, 8, 7], [7, 9, 7], [7, 10, 7],
[7, 11, 7], [7, 12, 7], [7, 13, 7], [7, 14, 7], [7, 15, 7],
[7, 16, 7], [7, 17, 7], [7, 18, 7], [7, 19, 7], [8, 9, 8],
[8, 10, 8], [8, 11, 8], [8, 12, 8], [8, 13, 8], [8, 14, 8],
[8, 15, 8], [8, 16, 8], [8, 17, 8], [8, 18, 8], [8, 19, 8],
[9, 10, 9], [9, 11, 9], [9, 12, 9], [9, 13, 9], [9, 14, 9],
[9, 15, 9], [9, 16, 9], [9, 17, 9], [9, 18, 9], [9, 19, 9],
[10, 11, 10], [10, 12, 10], [10, 13, 10], [10, 14, 10],
[10, 15, 10], [10, 16, 10], [10, 17, 10], [10, 18, 10],
[10, 19, 10], [11, 12, 11], [11, 13, 11], [11, 14, 11],
[11, 15, 11], [11, 16, 11], [11, 17, 11], [11, 18, 11],
[11, 19, 11], [12, 13, 12], [12, 14, 12], [12, 15, 12],
[12, 16, 12], [12, 17, 12], [12, 18, 12], [12, 19, 12],
[13, 14, 13], [13, 15, 13], [13, 16, 13], [13, 17, 13],
[13, 18, 13], [13, 19, 13], [14, 15, 14], [14, 16, 14],
[14, 17, 14], [14, 18, 14], [14, 19, 14], [15, 16, 15],
[15, 17, 15], [15, 18, 15], [15, 19, 15], [16, 17, 16],
[16, 18, 16], [16, 19, 16], [17, 18, 17], [17, 19, 17],
[18, 19, 18]]
sage: g = graphs.CompleteGraph(20).to_directed()
sage: g.all_simple_cycles(max_length=2, starting_vertices=[0])
[[0, 1, 0], [0, 2, 0], [0, 3, 0], [0, 4, 0], [0, 5, 0], [0, 6, 0],
[0, 7, 0], [0, 8, 0], [0, 9, 0], [0, 10, 0], [0, 11, 0],
[0, 12, 0], [0, 13, 0], [0, 14, 0], [0, 15, 0], [0, 16, 0],
[0, 17, 0], [0, 18, 0], [0, 19, 0]]
One may prefer to distinguish equivalent cycles having distinct starting
vertices (compare the following examples)::
sage: g = graphs.CompleteGraph(4).to_directed()
sage: g.all_simple_cycles(max_length=2, rooted=False)
[[0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 2, 1], [1, 3, 1], [2, 3, 2]]
sage: g.all_simple_cycles(max_length=2, rooted=True)
[[0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 1], [1, 2, 1], [1, 3, 1],
[2, 0, 2], [2, 1, 2], [2, 3, 2], [3, 0, 3], [3, 1, 3], [3, 2, 3]]
"""
return list(self.all_cycles_iterator(starting_vertices=starting_vertices,
simple=True, rooted=rooted,
max_length=max_length, trivial=trivial))
def path_semigroup(self):
"""
The partial semigroup formed by the paths of this quiver.
EXAMPLES::
sage: Q = DiGraph({1: {2: ['a', 'c']}, 2: {3: ['b']}})
sage: F = Q.path_semigroup(); F
Partial semigroup formed by the directed paths of Multi-digraph on 3 vertices
sage: list(F)
[e_1, e_2, e_3, a, c, b, a*b, c*b]
"""
from sage.quivers.path_semigroup import PathSemigroup
return PathSemigroup(self)
### Directed Acyclic Graphs (DAGs)
def topological_sort(self, implementation="default"):
"""
Return a topological sort of the digraph if it is acyclic.
If the digraph contains a directed cycle, a ``TypeError`` is raised. As
topological sorts are not necessarily unique, different implementations
may yield different results.
A topological sort is an ordering of the vertices of the digraph such
that each vertex comes before all of its successors. That is, if `u`
comes before `v` in the sort, then there may be a directed path from `u`
to `v`, but there will be no directed path from `v` to `u`.
INPUT:
- ``implementation`` -- string (default: ``"default"``); either use the
default Cython implementation, or the default NetworkX library
(``implementation = "NetworkX"``)
.. SEEALSO::
- :meth:`is_directed_acyclic` -- Tests whether a directed graph is
acyclic (can also join a certificate -- a topological sort or a
circuit in the graph).
EXAMPLES::
sage: D = DiGraph({0: [1, 2, 3], 4: [2, 5], 1: [8], 2: [7], 3: [7],
....: 5: [6, 7], 7: [8], 6: [9], 8: [10], 9: [10]})
sage: D.plot(layout='circular').show()
sage: D.topological_sort()
[4, 5, 6, 9, 0, 1, 2, 3, 7, 8, 10]
::
sage: D.add_edge(9, 7)
sage: D.topological_sort()
[4, 5, 6, 9, 0, 1, 2, 3, 7, 8, 10]
Using the NetworkX implementation ::
sage: s = list(D.topological_sort(implementation="NetworkX")); s # random
[0, 4, 1, 3, 2, 5, 6, 9, 7, 8, 10]
sage: all(s.index(u) < s.index(v) for u, v in D.edges(labels=False))
True
::
sage: D.add_edge(7, 4)
sage: D.topological_sort()
Traceback (most recent call last):
...
TypeError: digraph is not acyclic; there is no topological sort
TESTS:
A wrong value for the ``implementation`` keyword::
sage: D.topological_sort(implementation = "cloud-reading")
Traceback (most recent call last):
...
ValueError: implementation must be set to one of "default" or "NetworkX"
"""
if implementation == "default":
b, ordering = self._backend.is_directed_acyclic(certificate = True)
if b:
return ordering
else:
raise TypeError('digraph is not acyclic; there is no topological sort')
elif implementation == "NetworkX":
import networkx
S = networkx.topological_sort(self.networkx_graph())
if S is None:
raise TypeError('digraph is not acyclic; there is no topological sort')
else:
return S
else:
raise ValueError("implementation must be set to one of \"default\" or \"NetworkX\"")
def topological_sort_generator(self):
"""
Return an iterator over all topological sorts of the digraph if
it is acyclic.
If the digraph contains a directed cycle, a ``TypeError`` is raised.
A topological sort is an ordering of the vertices of the digraph such
that each vertex comes before all of its successors. That is, if u comes
before v in the sort, then there may be a directed path from u to v, but
there will be no directed path from v to u. See also
:meth:`topological_sort`.
AUTHORS:
- Mike Hansen - original implementation
- Robert L. Miller: wrapping, documentation
REFERENCE:
- [1] Pruesse, Gara and Ruskey, Frank. Generating Linear
Extensions Fast. SIAM J. Comput., Vol. 23 (1994), no. 2, pp.
373-386.
EXAMPLES::
sage: D = DiGraph({0: [1, 2], 1: [3], 2: [3, 4]})
sage: D.plot(layout='circular').show()
sage: list(D.topological_sort_generator())
[[0, 1, 2, 3, 4], [0, 2, 1, 3, 4], [0, 2, 1, 4, 3], [0, 2, 4, 1, 3], [0, 1, 2, 4, 3]]
::
sage: for sort in D.topological_sort_generator():
....: for u, v in D.edge_iterator(labels=False):
....: if sort.index(u) > sort.index(v):
....: print("this should never happen")
"""
from sage.combinat.posets.posets import Poset
return Poset(self).linear_extensions()
### Visualization
def layout_acyclic(self, rankdir="up", **options):
"""
Return a ranked layout so that all edges point upward.
To this end, the heights of the vertices are set according to the level
set decomposition of the graph (see :meth:`.level_sets`).
This is achieved by calling ``graphviz`` and ``dot2tex`` if available
(see :meth:`.layout_graphviz`), and using a spring layout with fixed
vertical placement of the vertices otherwise (see
:meth:`.layout_acyclic_dummy` and
:meth:`~sage.graphs.generic_graph.GenericGraph.layout_ranked`).
Non acyclic graphs are partially supported by ``graphviz``, which then
chooses some edges to point down.
INPUT:
- ``rankdir`` -- string (default: ``'up'``); indicates which direction
the edges should point toward among ``'up'``, ``'down'``, ``'left'``,
or ``'right'``
- ``**options`` -- passed down to
:meth:`~sage.graphs.generic_graph.GenericGraph.layout_ranked` or
:meth:`~sage.graphs.generic_graph.GenericGraph.layout_graphviz`
EXAMPLES::
sage: H = DiGraph({0: [1, 2], 1: [3], 2: [3], 3: [], 5: [1, 6], 6: [2, 3]})
The actual layout computed depends on whether dot2tex and graphviz are
installed, so we don't test its relative values::
sage: H.layout_acyclic()
{0: [..., ...], 1: [..., ...], 2: [..., ...], 3: [..., ...], 5: [..., ...], 6: [..., ...]}
sage: H = DiGraph({0: [1]})
sage: pos = H.layout_acyclic(rankdir='up')
sage: pos[1][1] > pos[0][1] + .5
True
sage: pos = H.layout_acyclic(rankdir='down')
sage: pos[1][1] < pos[0][1] - .5
True
sage: pos = H.layout_acyclic(rankdir='right')
sage: pos[1][0] > pos[0][0] + .5
True
sage: pos = H.layout_acyclic(rankdir='left')
sage: pos[1][0] < pos[0][0] - .5
True
"""
if have_dot2tex():
return self.layout_graphviz(rankdir=rankdir, **options)
else:
return self.layout_acyclic_dummy(rankdir=rankdir, **options)
def layout_acyclic_dummy(self, heights=None, rankdir='up', **options):
"""
Return a ranked layout so that all edges point upward.
To this end, the heights of the vertices are set according to the level
set decomposition of the graph (see :meth:`level_sets`). This is
achieved by a spring layout with fixed vertical placement of the
vertices otherwise (see :meth:`layout_acyclic_dummy` and
:meth:`~sage.graphs.generic_graph.GenericGraph.layout_ranked`).
INPUT:
- ``rankdir`` -- string (default: ``'up'``); indicates which direction
the edges should point toward among ``'up'``, ``'down'``, ``'left'``,
or ``'right'``
- ``**options`` -- passed down to
:meth:`~sage.graphs.generic_graph.GenericGraph.layout_ranked`
EXAMPLES::
sage: H = DiGraph({0: [1, 2], 1: [3], 2: [3], 3: [], 5: [1, 6], 6: [2, 3]})
sage: H.layout_acyclic_dummy()
{0: [1.0..., 0], 1: [1.0..., 1], 2: [1.5..., 2], 3: [1.5..., 3], 5: [2.0..., 0], 6: [2.0..., 1]}
sage: H = DiGraph({0: [1]})
sage: H.layout_acyclic_dummy(rankdir='up')
{0: [0.5..., 0], 1: [0.5..., 1]}
sage: H.layout_acyclic_dummy(rankdir='down')
{0: [0.5..., 1], 1: [0.5..., 0]}
sage: H.layout_acyclic_dummy(rankdir='left')
{0: [1, 0.5...], 1: [0, 0.5...]}
sage: H.layout_acyclic_dummy(rankdir='right')
{0: [0, 0.5...], 1: [1, 0.5...]}
sage: H = DiGraph({0: [1, 2], 1: [3], 2: [3], 3: [1], 5: [1, 6], 6: [2, 3]})
sage: H.layout_acyclic_dummy()
Traceback (most recent call last):
...
ValueError: `self` should be an acyclic graph
TESTS:
:trac:`31681` is fixed::
sage: H = DiGraph({0: [1], 'X': [1]}, format='dict_of_lists')
sage: pos = H.layout_acyclic_dummy(rankdir='up')
sage: pos['X'][1] == 0 and pos[0][1] == 0
True
sage: pos[1][1] == 1
True
"""
if heights is None:
if not self.is_directed_acyclic():
raise ValueError("`self` should be an acyclic graph")
levels = self.level_sets()
# Sort vertices in each level in best effort mode
for i in range(len(levels)):
try:
l = sorted(levels[i])
levels[i] = l
except:
continue
if rankdir=='down' or rankdir=='left':
levels.reverse()
heights = {i: levels[i] for i in range(len(levels))}
positions = self.layout_ranked(heights=heights, **options)
if rankdir == 'left' or rankdir == 'right':
for coordinates in positions.values():
coordinates.reverse()
return positions
def level_sets(self):
r"""
Return the level set decomposition of the digraph.
OUTPUT:
- a list of non empty lists of vertices of this graph
The level set decomposition of the digraph is a list `l` such that the
level `l[i]` contains all the vertices having all their predecessors in
the levels `l[j]` for `j < i`, and at least one in level `l[i-1]`
(unless `i = 0`).
The level decomposition contains exactly the vertices not occurring in
any cycle of the graph. In particular, the graph is acyclic if and only
if the decomposition forms a set partition of its vertices, and we
recover the usual level set decomposition of the corresponding poset.
EXAMPLES::
sage: H = DiGraph({0: [1, 2], 1: [3], 2: [3], 3: [], 5: [1, 6], 6: [2, 3]})
sage: H.level_sets()
[[0, 5], [1, 6], [2], [3]]
sage: H = DiGraph({0: [1, 2], 1: [3], 2: [3], 3: [1], 5: [1, 6], 6: [2, 3]})
sage: H.level_sets()
[[0, 5], [6], [2]]
This routine is mostly used for Hasse diagrams of posets::
sage: from sage.combinat.posets.hasse_diagram import HasseDiagram
sage: H = HasseDiagram({0: [1, 2], 1: [3], 2: [3], 3: []})
sage: [len(x) for x in H.level_sets()]
[1, 2, 1]
::
sage: from sage.combinat.posets.hasse_diagram import HasseDiagram
sage: H = HasseDiagram({0: [1, 2], 1: [3], 2: [4], 3: [4]})
sage: [len(x) for x in H.level_sets()]
[1, 2, 1, 1]
Complexity: `O(n+m)` in time and `O(n)` in memory (besides the storage
of the graph itself), where `n` and `m` are respectively the number of
vertices and edges (assuming that appending to a list is constant time,
which it is not quite).
"""
in_degrees = self.in_degree(labels=True)
level = [x for x in in_degrees if not in_degrees[x]]
Levels = []
while level:
Levels.append(level)
new_level = []
for x in level:
for y in self.neighbor_out_iterator(x):
in_degrees[y] -= 1
if not in_degrees[y]:
new_level.append(y)
level = new_level
return Levels
def is_aperiodic(self):
r"""
Return whether the current ``DiGraph`` is aperiodic.
A directed graph is aperiodic if there is no integer `k > 1` that
divides the length of every cycle in the graph. See the
:wikipedia:`Aperiodic_graph` for more information.
EXAMPLES:
The following graph has period ``2``, so it is not aperiodic::
sage: g = DiGraph({0: [1], 1: [0]})
sage: g.is_aperiodic()
False
The following graph has a cycle of length 2 and a cycle of length 3,
so it is aperiodic::
sage: g = DiGraph({0: [1, 4], 1: [2], 2: [0], 4: [0]})
sage: g.is_aperiodic()
True
.. SEEALSO::
:meth:`period`
"""
return self.period() == 1
def period(self):
r"""
Return the period of the current ``DiGraph``.
The period of a directed graph is the largest integer that divides the
length of every cycle in the graph. See the :wikipedia:`Aperiodic_graph`
for more information.
EXAMPLES:
The following graph has period ``2``::
sage: g = DiGraph({0: [1], 1: [0]})
sage: g.period()
2
The following graph has a cycle of length 2 and a cycle of length 3,
so it has period ``1``::
sage: g = DiGraph({0: [1, 4], 1: [2], 2: [0], 4: [0]})
sage: g.period()
1
Here is an example of computing the period of a digraph which is not
strongly connected. By definition, it is the :func:`gcd` of the periods
of its strongly connected components::
sage: g = DiGraph({-1: [-2], -2: [-3], -3: [-1],
....: 1: [2], 2: [1]})
sage: g.period()
1
sage: sorted([s.period() for s
....: in g.strongly_connected_components_subgraphs()])
[2, 3]
ALGORITHM:
See the networkX implementation of ``is_aperiodic``, that is based on
breadth first search.
.. SEEALSO::
:meth:`is_aperiodic`
"""
from sage.arith.all import gcd
g = 0
for component in self.strongly_connected_components():
levels = {s: None for s in component}
vertices_in_scc = levels # considers level as a set
s = component[0]
levels[s] = 0
this_level = [s]
l = 1
while this_level:
next_level = []
for u in this_level:
# we have levels[u] == l - 1
for v in self.neighbor_out_iterator(u):
# ignore edges leaving the component
if v not in vertices_in_scc:
continue
level_v = levels[v]
if level_v is not None: # Non-Tree Edge
g = gcd(g, l - level_v)
if g == 1:
return 1
else: # Tree Edge
next_level.append(v)
levels[v] = l
this_level = next_level
l += 1
return g
def flow_polytope(self, edges=None, ends=None, backend=None):
r"""
Return the flow polytope of a digraph.
The flow polytope of a directed graph is the polytope consisting of all
nonnegative flows on the graph with a given set `S` of sources and a
given set `T` of sinks.
A *flow* on a directed graph `G` with a given set `S` of sources and a
given set `T` of sinks means an assignment of a nonnegative real to each
edge of `G` such that the flow is conserved in each vertex outside of
`S` and `T`, and there is a unit of flow entering each vertex in `S` and
a unit of flow leaving each vertex in `T`. These flows clearly form a
polytope in the space of all assignments of reals to the edges of `G`.
The polytope is empty unless the sets `S` and `T` are equinumerous.
By default, `S` is taken to be the set of all sources (i.e., vertices of
indegree `0`) of `G`, and `T` is taken to be the set of all sinks (i.e.,
vertices of outdegree `0`) of `G`. If a different choice of `S` and `T`
is desired, it can be specified using the optional ``ends`` parameter.
The polytope is returned as a polytope in `\RR^m`, where `m` is the
number of edges of the digraph ``self``. The `k`-th coordinate of a
point in the polytope is the real assigned to the `k`-th edge of
``self``. The order of the edges is the one returned by
``self.edges()``. If a different order is desired, it can be specified
using the optional ``edges`` parameter.
The faces and volume of these polytopes are of interest. Examples of
these polytopes are the Chan-Robbins-Yuen polytope and the
Pitman-Stanley polytope [PS2002]_.
INPUT:
- ``edges`` -- list (default: ``None``); a list of edges of ``self``. If
not specified, the list of all edges of ``self`` is used with the
default ordering of ``self.edges()``. This determines which coordinate
of a point in the polytope will correspond to which edge of
``self``. It is also possible to specify a list which contains not all
edges of ``self``; this results in a polytope corresponding to the
flows which are `0` on all remaining edges. Notice that the edges
entered here must be in the precisely same format as outputted by
``self.edges()``; so, if ``self.edges()`` outputs an edge in the form
``(1, 3, None)``, then ``(1, 3)`` will not do!
- ``ends`` -- (optional, default: ``(self.sources(), self.sinks())``) a
pair `(S, T)` of an iterable `S` and an iterable `T`.
- ``backend`` -- string or ``None`` (default); the backend to use;
see :meth:`sage.geometry.polyhedron.constructor.Polyhedron`
.. NOTE::
Flow polytopes can also be built through the ``polytopes.<tab>``
object::
sage: polytopes.flow_polytope(digraphs.Path(5))
A 0-dimensional polyhedron in QQ^4 defined as the convex hull of 1 vertex
EXAMPLES:
A commutative square::
sage: G = DiGraph({1: [2, 3], 2: [4], 3: [4]})
sage: fl = G.flow_polytope(); fl
A 1-dimensional polyhedron in QQ^4 defined as the convex hull
of 2 vertices
sage: fl.vertices()
(A vertex at (0, 1, 0, 1), A vertex at (1, 0, 1, 0))
Using a different order for the edges of the graph::
sage: fl = G.flow_polytope(edges=G.edges(key=lambda x: x[0] - x[1])); fl
A 1-dimensional polyhedron in QQ^4 defined as the convex hull of 2 vertices
sage: fl.vertices()
(A vertex at (0, 1, 1, 0), A vertex at (1, 0, 0, 1))
A tournament on 4 vertices::
sage: H = digraphs.TransitiveTournament(4)
sage: fl = H.flow_polytope(); fl
A 3-dimensional polyhedron in QQ^6 defined as the convex hull
of 4 vertices
sage: fl.vertices()
(A vertex at (0, 0, 1, 0, 0, 0),
A vertex at (0, 1, 0, 0, 0, 1),
A vertex at (1, 0, 0, 0, 1, 0),
A vertex at (1, 0, 0, 1, 0, 1))
Restricting to a subset of the edges::
sage: fl = H.flow_polytope(edges=[(0, 1, None), (1, 2, None),
....: (2, 3, None), (0, 3, None)])
sage: fl
A 1-dimensional polyhedron in QQ^4 defined as the convex hull
of 2 vertices
sage: fl.vertices()
(A vertex at (0, 0, 0, 1), A vertex at (1, 1, 1, 0))
Using a different choice of sources and sinks::
sage: fl = H.flow_polytope(ends=([1], [3])); fl
A 1-dimensional polyhedron in QQ^6 defined as the convex hull
of 2 vertices
sage: fl.vertices()
(A vertex at (0, 0, 0, 1, 0, 1), A vertex at (0, 0, 0, 0, 1, 0))
sage: fl = H.flow_polytope(ends=([0, 1], [3])); fl
The empty polyhedron in QQ^6
sage: fl = H.flow_polytope(ends=([3], [0])); fl
The empty polyhedron in QQ^6
sage: fl = H.flow_polytope(ends=([0, 1], [2, 3])); fl
A 3-dimensional polyhedron in QQ^6 defined as the convex hull
of 5 vertices
sage: fl.vertices()
(A vertex at (0, 0, 1, 1, 0, 0),
A vertex at (0, 1, 0, 0, 1, 0),
A vertex at (1, 0, 0, 2, 0, 1),
A vertex at (1, 0, 0, 1, 1, 0),
A vertex at (0, 1, 0, 1, 0, 1))
sage: fl = H.flow_polytope(edges=[(0, 1, None), (1, 2, None),
....: (2, 3, None), (0, 2, None),
....: (1, 3, None)],
....: ends=([0, 1], [2, 3])); fl
A 2-dimensional polyhedron in QQ^5 defined as the convex hull
of 4 vertices
sage: fl.vertices()
(A vertex at (0, 0, 0, 1, 1),
A vertex at (1, 2, 1, 0, 0),
A vertex at (1, 1, 0, 0, 1),
A vertex at (0, 1, 1, 1, 0))
A digraph with one source and two sinks::
sage: Y = DiGraph({1: [2], 2: [3, 4]})
sage: Y.flow_polytope()
The empty polyhedron in QQ^3
A digraph with one vertex and no edge::
sage: Z = DiGraph({1: []})
sage: Z.flow_polytope()
A 0-dimensional polyhedron in QQ^0 defined as the convex hull
of 1 vertex
A digraph with multiple edges (:trac:`28837`)::
sage: G = DiGraph([(0, 1), (0,1)], multiedges=True)
sage: G
Multi-digraph on 2 vertices
sage: P = G.flow_polytope()
sage: P
A 1-dimensional polyhedron in QQ^2 defined as the convex hull of 2 vertices
sage: P.vertices()
(A vertex at (1, 0), A vertex at (0, 1))
sage: P.lines()
()
"""
from sage.geometry.polyhedron.constructor import Polyhedron
if edges is None:
edges = self.edges(sort=False)
m = len(edges)
ineqs = [[0] * (i + 1) + [1] + [0] * (m - i - 1) for i in range(m)]
eqs = []
for u in self:
ins = set(self.incoming_edge_iterator(u))
outs = set(self.outgoing_edge_iterator(u))
eq = [Integer(j in ins) - Integer(j in outs) for j in edges]
const = 0
if ends is None:
if not ins: # sources (indegree 0)
const += 1
if not outs: # sinks (outdegree 0)
const -= 1
else:
if u in ends[0]: # chosen sources
const += 1
if u in ends[1]: # chosen sinks
const -= 1
eq = [const] + eq
eqs.append(eq)
return Polyhedron(ieqs=ineqs, eqns=eqs, backend=backend)
def is_tournament(self):
r"""
Check whether the digraph is a tournament.
A tournament is a digraph in which each pair of distinct vertices is
connected by a single arc.
EXAMPLES::
sage: g = digraphs.RandomTournament(6)
sage: g.is_tournament()
True
sage: u,v = next(g.edge_iterator(labels=False))
sage: g.add_edge(v, u)
sage: g.is_tournament()
False
sage: g.add_edges([(u, v), (v, u)])
sage: g.is_tournament()
False
.. SEEALSO::
- :wikipedia:`Tournament_(graph_theory)`
- :meth:`~sage.graphs.digraph_generators.DiGraphGenerators.RandomTournament`
- :meth:`~sage.graphs.digraph_generators.DiGraphGenerators.TransitiveTournament`
"""
self._scream_if_not_simple()
if self.size() != self.order() * (self.order() - 1) // 2:
return False
import itertools
return not any(self.has_edge(u, v) == self.has_edge(v, u)
for u,v in itertools.combinations(self, 2))
def _girth_bfs(self, odd=False, certificate=False):
r"""
Return the girth of the digraph using breadth-first search.
Loops are ignored, so the returned value is at least 2.
INPUT:
- ``odd`` -- boolean (default: ``False``); whether to compute the odd
girth
- ``certificate`` -- boolean (default: ``False``); whether to return
``(g, c)``, where ``g`` is the (odd) girth and ``c`` is a list
of vertices of a directed cycle of length ``g`` in the graph,
thus providing a certificate that the (odd) girth is at most ``g``,
or ``None`` if ``g`` is infinite
EXAMPLES:
A digraph with girth 4 and odd girth 5::
sage: G = DiGraph([(0, 1), (1, 2), (1, 3), (2, 3), (3, 4), (4, 0)])
sage: G._girth_bfs(certificate=True) # random
(4, [1, 3, 4, 0])
sage: G._girth_bfs(odd=True)
5
.. SEEALSO::
* :meth:`~sage.graphs.GenericGraph.girth` -- return the girth of the
graph
* :meth:`~sage.graphs.GenericGraph.odd_girth` -- return the odd
girth of the graph
"""
n = self.num_verts()
best = n + 1
seen = set()
for w in self:
seen.add(w)
inSpan, outSpan = {w: None}, {w: None}
depth = 1
outList, inList = set([w]), set([w])
while 2 * depth <= best:
nextOutList, nextInList = set(), set()
for v in outList:
for u in self.neighbor_out_iterator(v):
if u in seen:
continue
if u not in outSpan:
outSpan[u] = v
nextOutList.add(u)
if u in inList:
best = depth * 2 - 1
ends = (v, u)
bestSpans = (outSpan, inSpan)
break
if best == 2 * depth - 1:
break
if best == 2 * depth - 1:
break
for v in inList:
for u in self.neighbor_in_iterator(v):
if u in seen:
continue
if u not in inSpan:
inSpan[u] = v
nextInList.add(u)
if not odd and u in nextOutList:
best = depth * 2
ends = (u, v)
bestSpans = (outSpan, inSpan)
break
if best == 2 * depth:
break
if best <= 2:
break
outList = nextOutList
inList = nextInList
depth += 1
if best == n + 1:
from sage.rings.infinity import Infinity
return (Infinity, None) if certificate else Infinity
if certificate:
cycles = {}
for x, span in zip(ends, bestSpans):
cycles[x] = []
y = x
while span[y] is not None:
cycles[x].append(y)
y = span[y]
cycles[x].append(y)
u, v = ends
return (best, list(reversed(cycles[u])) + cycles[v])
return best
def out_branchings(self, source, spanning=True):
r"""
Return an iterator over the out branchings rooted at given vertex in
``self``.
An out-branching is a directed tree rooted at ``source`` whose arcs are
directed from source to leaves. An out-branching is spanning if it
contains all vertices of the digraph.
If no spanning out branching rooted at ``source`` exist, raises
ValueError or return non spanning out branching rooted at ``source``,
depending on the value of ``spanning``.
INPUT:
- ``source`` -- vertex used as the source for all out branchings.
- ``spanning`` -- boolean (default: ``True``); if ``False`` return
maximum out branching from ``source``. Otherwise, return spanning out
branching if exists.
OUTPUT:
An iterator over the out branchings rooted in the given source.
.. SEEALSO::
- :meth:`~sage.graphs.digraph.DiGraph.in_branchings`
-- iterator over in-branchings rooted at given vertex.
- :meth:`~sage.graphs.graph.Graph.spanning_trees`
-- returns all spanning trees.
- :meth:`~sage.graphs.generic_graph.GenericGraph.spanning_trees_count`
-- counts the number of spanning trees.
ALGORITHM:
Recursively computes all out branchings.
At each step:
0. clean the graph (see below)
1. pick an edge e out of source
2. find all out branchings that do not contain e by first
removing it
3. find all out branchings that do contain e by first
merging the end vertices of e
Cleaning the graph implies to remove loops and replace multiedges by a
single one with an appropriate label since these lead to similar steps
of computation.
EXAMPLES:
A bidirectional 4-cycle::
sage: G = DiGraph({1:[2,3], 2:[1,4], 3:[1,4], 4:[2,3]}, format='dict_of_lists')
sage: list(G.out_branchings(1))
[Digraph on 4 vertices,
Digraph on 4 vertices,
Digraph on 4 vertices,
Digraph on 4 vertices]
With the Petersen graph turned into a symmetric directed graph::
sage: G = graphs.PetersenGraph().to_directed()
sage: len(list(G.out_branchings(0)))
2000
With a non connected ``DiGraph`` and ``spanning = True``::
sage: G = graphs.PetersenGraph().to_directed() + graphs.PetersenGraph().to_directed()
sage: G.out_branchings(0, spanning=True)
Traceback (most recent call last):
...
ValueError: no spanning out branching from vertex (0) exist
With a non connected ``DiGraph`` and ``spanning = False``::
sage: g=DiGraph([(0,1), (0,1), (1,2), (3,4)],multiedges=True)
sage: list(g.out_branchings(0, spanning=False))
[Digraph on 3 vertices, Digraph on 3 vertices]
With multiedges::
sage: G = DiGraph({0:[1,1,1], 1:[2,2]}, format='dict_of_lists', multiedges=True)
sage: len(list(G.out_branchings(0)))
6
With a DiGraph already being a spanning out branching::
sage: G = DiGraph({0:[1,2], 1:[3,4], 2:[5], 3:[], 4:[], 5:[]}, format='dict_of_lists')
sage: next(G.out_branchings(0)) == G
True
TESTS:
The empty ``DiGraph``::
sage: G = DiGraph()
sage: G.out_branchings(0)
Traceback (most recent call last):
...
ValueError: vertex (0) is not a vertex of the digraph
sage: edges = [(0,0,'x'), (0,0,'y')]
sage: G = DiGraph(edges, multiedges=True, loops=True, weighted=True)
sage: list(G.out_branchings(0))
[Digraph on 1 vertex]
sage: edges = [(0,1,'x'), (0,1,'y'), (1,2,'z'), (2,0,'w')]
sage: G = DiGraph(edges, multiedges=True, loops=True, weighted=True)
sage: len(list(G.out_branchings(0)))
2
"""
def _rec_out_branchings(depth):
r"""
The recursive function used to enumerate out branchings.
This function makes use of the following to keep track of partial
out branchings:
list_edges -- list of edges in self.
list_merged_edges -- list of edges that are currently merged
graph -- a copy of self where edges have an appropriate label
"""
if not depth:
# We have enough merged edges to form a out_branching
# We iterate over the lists of labels in list_merged_edges and
# yield the corresponding out_branchings
for indexes in product(*list_merged_edges):
yield DiGraph([list_edges[index] for index in indexes],
format='list_of_edges', pos=self.get_pos())
# 1) Clean the graph
# delete loops on source if any
D.delete_edges(D.incoming_edge_iterator(source))
# merge multi-edges if any by concatenating their labels
if D.has_multiple_edges():
merged_multiple_edges = {}
for u, v, l in D.multiple_edges():
D.delete_edge(u, v, l)
if (u, v) not in merged_multiple_edges:
merged_multiple_edges[(u, v)] = l
else:
merged_multiple_edges[(u, v)] += l
D.add_edges([(u, v, l) for (u, v),l in merged_multiple_edges.items()])
# 2) Pick an edge e outgoing from the source
try:
s, x, l = next(D.outgoing_edge_iterator(source))
except:
return
# 3) Find all out_branchings that do not contain e
# by first removing it
D.delete_edge(s, x, l)
if len(list(D.depth_first_search(source))) == depth + 1:
for out_branch in _rec_out_branchings(depth):
yield out_branch
D.add_edge(s, x, l)
# 4) Find all out_branchings that do contain e by merging
# the end vertices of e
# store different edges to unmerged the end vertices of e
saved_edges = D.outgoing_edges(source)
saved_edges.remove((s, x, l))
saved_edges += D.outgoing_edges(x)
saved_edges += D.incoming_edges(x)
D.merge_vertices((source, x))
list_merged_edges.add(l)
for out_branch in _rec_out_branchings(depth - 1):
yield out_branch
list_merged_edges.remove(l)
# unmerge the end vertices of e
D.delete_vertex(source)
D.add_edges(saved_edges)
def _singleton_out_branching():
r"""
Returns a DiGraph containing only ``source`` and no edges.
"""
D = DiGraph()
D.add_vertex(source)
yield D
if not self.has_vertex(source):
raise ValueError("vertex ({0}) is not a vertex of the digraph".format(source))
# check if self.order == 1
if self.order() == 1:
return _singleton_out_branching()
# check if the source can access to every other vertex
if spanning:
depth = self.order() - 1
if len(list(self.depth_first_search(source))) < self.order():
raise ValueError("no spanning out branching from vertex ({0}) exist".format(source))
else:
depth = len(list(self.depth_first_search(source))) - 1
# if vertex is isolated
if not depth:
return _singleton_out_branching()
# We build a copy of self in which each edge has a distinct label.
# On the way, we remove loops and edges incoming to source.
D = DiGraph(multiedges=True, loops=True)
list_edges = list(self.edges(sort=False))
for i, (u, v, _) in enumerate(list_edges):
if u != v and v != source:
D.add_edge(u, v, (i,))
list_merged_edges = set()
return _rec_out_branchings(depth)
def in_branchings(self, source, spanning=True):
r"""
Return an iterator over the in branchings rooted at given vertex in
``self``.
An in-branching is a directed tree rooted at ``source`` whose arcs are
directed to source from leaves. An in-branching is spanning if it
contains all vertices of the digraph.
If no spanning in branching rooted at ``source`` exist, raises
ValueError or return non spanning in branching rooted at ``source``,
depending on the value of ``spanning``.
INPUT:
- ``source`` -- vertex used as the source for all in branchings.
- ``spanning`` -- boolean (default: ``True``); if ``False`` return
maximum in branching to ``source``. Otherwise, return spanning in
branching if exists.
OUTPUT:
An iterator over the in branchings rooted in the given source.
.. SEEALSO::
- :meth:`~sage.graphs.digraph.DiGraph.out_branchings`
-- iterator over out-branchings rooted at given vertex.
- :meth:`~sage.graphs.graph.Graph.spanning_trees`
-- returns all spanning trees.
- :meth:`~sage.graphs.generic_graph.GenericGraph.spanning_trees_count`
-- counts the number of spanning trees.
ALGORITHM:
Recursively computes all in branchings.
At each step:
0. clean the graph (see below)
1. pick an edge e incoming to source
2. find all in branchings that do not contain e by first
removing it
3. find all in branchings that do contain e by first
merging the end vertices of e
Cleaning the graph implies to remove loops and replace multiedges by a
single one with an appropriate label since these lead to similar steps
of computation.
EXAMPLES:
A bidirectional 4-cycle::
sage: G = DiGraph({1:[2,3], 2:[1,4], 3:[1,4], 4:[2,3]}, format='dict_of_lists')
sage: list(G.in_branchings(1))
[Digraph on 4 vertices,
Digraph on 4 vertices,
Digraph on 4 vertices,
Digraph on 4 vertices]
With the Petersen graph turned into a symmetric directed graph::
sage: G = graphs.PetersenGraph().to_directed()
sage: len(list(G.in_branchings(0)))
2000
With a non connected ``DiGraph`` and ``spanning = True``::
sage: G = graphs.PetersenGraph().to_directed() + graphs.PetersenGraph().to_directed()
sage: G.in_branchings(0)
Traceback (most recent call last):
...
ValueError: no spanning in branching to vertex (0) exist
With a non connected ``DiGraph`` and ``spanning = False``::
sage: g=DiGraph([(1,0), (1,0), (2,1), (3,4)],multiedges=True)
sage: list(g.in_branchings(0,spanning=False))
[Digraph on 3 vertices, Digraph on 3 vertices]
With multiedges::
sage: G = DiGraph({0:[1,1,1], 1:[2,2]}, format='dict_of_lists', multiedges=True)
sage: len(list(G.in_branchings(2)))
6
With a DiGraph already being a spanning in branching::
sage: G = DiGraph({0:[], 1:[0], 2:[0], 3:[1], 4:[1], 5:[2]}, format='dict_of_lists')
sage: next(G.in_branchings(0)) == G
True
TESTS:
The empty ``DiGraph``::
sage: G = DiGraph()
sage: G.in_branchings(0)
Traceback (most recent call last):
...
ValueError: vertex (0) is not a vertex of the digraph
sage: edges = [(0,0,'x'), (0,0,'y')]
sage: G = DiGraph(edges, multiedges=True, loops=True, weighted=True)
sage: list(G.in_branchings(0))
[Digraph on 1 vertex]
sage: edges = [(0,1,'x'), (0,1,'y'), (1,2,'z'), (2,0,'w')]
sage: G = DiGraph(edges, multiedges=True, loops=True, weighted=True)
sage: len(list(G.in_branchings(0)))
1
"""
def _rec_in_branchings(depth):
r"""
The recursive function used to enumerate in branchings.
This function makes use of the following to keep track of partial in
branchings:
list_edges -- list of edges in self.
list_merged_edges -- list of edges that are currently merged
graph -- a copy of self where edges have an appropriate label
"""
if not depth:
# We have enough merged edges to form a in_branching
# We iterate over the lists of labels in list_merged_edges and
# yield the corresponding in_branchings
for indexes in product(*list_merged_edges):
yield DiGraph([list_edges[index] for index in indexes],
format='list_of_edges', pos=self.get_pos())
# 1) Clean the graph
# delete loops on source if any
D.delete_edges(D.outgoing_edge_iterator(source))
# merge multi-edges if any by concatenating their labels
if D.has_multiple_edges():
merged_multiple_edges = {}
for u, v, l in D.multiple_edges():
D.delete_edge(u, v, l)
if (u, v) not in merged_multiple_edges:
merged_multiple_edges[(u, v)] = l
else:
merged_multiple_edges[(u, v)] += l
D.add_edges([(u, v, l) for (u, v),l in merged_multiple_edges.items()])
# 2) Pick an edge e incoming to the source
try:
x, s, l = next(D.incoming_edge_iterator(source))
except:
return
# 3) Find all in_branchings that do not contain e
# by first removing it
D.delete_edge(x, s, l)
if len(list(D.depth_first_search(source, neighbors=D.neighbor_in_iterator))) == depth + 1:
for in_branch in _rec_in_branchings(depth):
yield in_branch
D.add_edge(x, s, l)
# 4) Find all in_branchings that do contain e by merging
# the end vertices of e
# store different edges to unmerged the end vertices of e
saved_edges = D.incoming_edges(source)
saved_edges.remove((x, s, l))
saved_edges += D.outgoing_edges(x)
saved_edges += D.incoming_edges(x)
D.merge_vertices((source, x))
list_merged_edges.add(l)
for in_branch in _rec_in_branchings(depth - 1):
yield in_branch
list_merged_edges.remove(l)
# unmerge the end vertices of e
D.delete_vertex(source)
D.add_edges(saved_edges)
def _singleton_in_branching():
r"""
Returns a DiGraph containing only ``source`` and no edges.
"""
D = DiGraph()
D.add_vertex(source)
yield D
if not self.has_vertex(source):
raise ValueError("vertex ({0}) is not a vertex of the digraph".format(source))
# check if self.order == 1
if self.order() == 1:
return _singleton_in_branching()
# check if the source can access to every other vertex
if spanning:
depth = self.order() - 1
if len(list(self.depth_first_search(source, neighbors=self.neighbor_in_iterator))) < self.order():
raise ValueError("no spanning in branching to vertex ({0}) exist".format(source))
else:
depth = len(list(self.depth_first_search(source, neighbors=self.neighbor_in_iterator))) - 1
# if vertex is isolated
if not depth:
return _singleton_in_branching()
# We build a copy of self in which each edge has a distinct label.
# On the way, we remove loops and edges incoming to source.
D = DiGraph(multiedges=True, loops=True)
list_edges = list(self.edges(sort=False))
for i, (u, v, _) in enumerate(list_edges):
if u != v and u != source:
D.add_edge(u, v, (i,))
list_merged_edges = set()
return _rec_in_branchings(depth)
# Aliases to functions defined in other modules
from sage.graphs.comparability import is_transitive
from sage.graphs.base.static_sparse_graph import tarjan_strongly_connected_components as strongly_connected_components
from sage.graphs.connectivity import is_strongly_connected
from sage.graphs.connectivity import strongly_connected_components_digraph
from sage.graphs.connectivity import strongly_connected_components_subgraphs
from sage.graphs.connectivity import strongly_connected_component_containing_vertex
from sage.graphs.connectivity import strong_articulation_points
from sage.graphs.path_enumeration import _all_paths_iterator
from sage.graphs.path_enumeration import all_paths_iterator
from sage.graphs.path_enumeration import all_simple_paths
|
the-stack_106_14396
|
import inspect
from typing import Callable, List
import cherrypy
from ingredients_http.request_methods import RequestMethods
from ingredients_http.router import Router
class SandwichRouter(Router):
def paginate(self, db_cls, response_cls, limit, marker, **kwargs):
resp_models = []
for obj in db_cls.list(**kwargs):
resp_models.append(response_cls.from_database(obj))
return resp_models, False
def on_register(self, uri: str, action: Callable, methods: List[RequestMethods]):
self.mount.api_spec.add_path(path=uri, router=self, func=action)
class SandwichSystemRouter(SandwichRouter):
def __init__(self, uri_base=None):
if uri_base is None:
uri_base = 'system'
else:
uri_base = 'system/' + uri_base
super().__init__(uri_base=uri_base)
class SandwichProjectRouter(SandwichRouter):
def __init__(self, uri_base=None):
if uri_base is None:
uri_base = 'projects/{project_name}'
else:
uri_base = 'projects/{project_name}/' + uri_base
super().__init__(uri_base=uri_base)
def setup_routes(self, dispatcher: cherrypy.dispatch.RoutesDispatcher, uri_prefix: str):
for member in [getattr(self, attr) for attr in dir(self)]:
if inspect.ismethod(member) and hasattr(member, '_route'):
# Enable project scope checking
self.__class__.__dict__[member.__name__]._cp_config['tools.project_scope.on'] = True
self.__class__.__dict__[member.__name__]._cp_config['tools.project_scope.delete_param'] = True
super().setup_routes(dispatcher, uri_prefix)
|
the-stack_106_14401
|
from __future__ import print_function # Python 2/3 compatibility
import boto3
def create_quotes():
table = client.create_table(
TableName='Quotes.EOD',
KeySchema=[
{
'AttributeName': 'Symbol',
'KeyType': 'HASH' # Partition key
},
{
'AttributeName': 'Date',
'KeyType': 'RANGE' # Sort key
}
],
AttributeDefinitions=[
{
'AttributeName': 'Symbol',
'AttributeType': 'S'
},
{
'AttributeName': 'Date',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
w = client.get_waiter('table_exists')
w.wait(TableName='Quotes.EOD')
print("table Quotes.EOD created")
print("Table status:", table)
def create_securities():
table = client.create_table(
TableName='Securities',
KeySchema=[
{
'AttributeName': 'Symbol',
'KeyType': 'HASH' # Partition key
},
{
'AttributeName': 'Broker',
'KeyType': 'RANGE' # Sort key
}
],
AttributeDefinitions=[
{
'AttributeName': 'Symbol',
'AttributeType': 'S'
},
{
'AttributeName': 'Broker',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
w = client.get_waiter('table_exists')
w.wait(TableName='Securities')
print("table Securities created")
print("Table status:", table)
client = boto3.client('dynamodb', region_name='us-east-1')
try:
if 'Quotes.EOD' in client.list_tables()['TableNames']:
client.delete_table(TableName='Quotes.EOD')
waiter = client.get_waiter('table_not_exists')
waiter.wait(TableName='Quotes.EOD')
print("table Quotes.EOD deleted")
if 'Securities' in client.list_tables()['TableNames']:
client.delete_table(TableName='Securities')
waiter = client.get_waiter('table_not_exists')
waiter.wait(TableName='Securities')
print("table Securities deleted")
except Exception as e:
print(e)
create_securities()
create_quotes()
|
the-stack_106_14402
|
def get_flop_per_sample(args):
l = args.num_layers
s = args.seq_length
h = args.hidden_size
k = args.top_k
return 2 * 3 * l * (
3 * s * h ** 2 # qkv
+ s ** 2 * h # attention scores
+ s ** 2 * h # apply attention
+ s * h ** 2 # dense out
+ s * 4 * h ** 2 * 2)
|
the-stack_106_14405
|
import serial
import time
class Arduino:
def __init__(self):
self.obj = serial.Serial("/dev/ttyUSB0", 9600, timeout=1)
print("Arduino Connected")
def left(self):
self.obj.write(b'1')
def right(self):
self.obj.write(b'2')
if __name__ == "__main__":
obj = Arduino()
obj.left()
time.sleep(5)
obj.right()
|
the-stack_106_14406
|
def test_identity_scaler():
import numpy as np
from pysad.transform.preprocessing import IdentityScaler
X = np.random.rand(100, 25)
scaler = IdentityScaler()
scaled_X = scaler.fit_transform(X)
assert np.all(np.isclose(scaled_X, X))
scaler = scaler.fit(X)
scaled_X = scaler.transform(X)
assert np.all(np.isclose(scaled_X, X))
|
the-stack_106_14411
|
import uuid
from yggdrasil.drivers.ConnectionDriver import ConnectionDriver
class ServerResponseDriver(ConnectionDriver):
r"""Class for handling server side RPC type communication.
Args:
response_address (str): The address of the channel used to send
responses to the client response driver.
comm (str, optional): The comm class that should be used to
communicate with the server resposne driver. Defaults to
tools.get_default_comm().
msg_id (str, optional): ID associate with the request message this
driver was created to respond to. Defaults to new unique ID.
**kwargs: Additional keyword arguments are passed to parent class.
Attributes:
comm (str): The comm class that should be used to communicate
with the server driver. Defaults to tools.get_default_comm().
msg_id (str): ID associate with the request message this driver was
created to respond to.
"""
_connection_type = None
def __init__(self, response_address, comm=None, msg_id=None,
request_name=None, **kwargs):
if msg_id is None:
msg_id = str(uuid.uuid4())
response_name = 'ServerResponse.%s' % msg_id
if request_name is not None:
response_name = request_name + '.' + response_name
# Input communicator from client model
icomm_kws = kwargs.get('icomm_kws', {})
icomm_kws['comm'] = None
icomm_kws['name'] = 'server_model_response.' + msg_id
icomm_kws['is_response_server'] = True
kwargs['icomm_kws'] = icomm_kws
# Output communicator to client response driver
ocomm_kws = kwargs.get('ocomm_kws', {})
ocomm_kws['comm'] = comm
ocomm_kws['name'] = response_name
if response_address is not None:
ocomm_kws['address'] = response_address
kwargs['ocomm_kws'] = ocomm_kws
# Overall keywords
kwargs['single_use'] = True
super(ServerResponseDriver, self).__init__(response_name, **kwargs)
self.comm = comm
self.msg_id = msg_id
@property
def model_response_name(self):
r"""str: The name of the channel used by the server model to send
responses."""
return self.icomm.name
@property
def model_response_address(self):
r"""str: The address of the channel used by the server model to send
responses."""
return self.icomm.address
@property
def response_address(self):
r"""str: The address of the channel used to send responses to the client
response driver."""
return self.ocomm.address
|
the-stack_106_14412
|
#this code is inspired by https://github.com/ajamjoom/Image-Captions/blob/master/processData.py
from pycocotools.coco import COCO
from collections import Counter
import nltk
import pickle
from pathconf import PathConfig
PAD_TOKEN = '<pad>' # Padding
START_TOKEN = '<start>' # Start of sentence
END_TOKEN = '<end>' # End of sentence
UNK_TOKEN = '<unk>' # Out of vocabulary (unknown)
class Vocabulary(object):
"""Represents vocabulary."""
def __init__(self):
self.w2i = {}
self.i2w = {}
self.idx = 0
def add_word(self, word):
if not word in self.w2i:
self.w2i[word] = self.idx
self.i2w[self.idx] = word
self.idx += 1
def __call__(self, word):
if not word in self.w2i:
return self.w2i[UNK_TOKEN]
return self.w2i[word]
def __len__(self):
return len(self.w2i)
def build_vocab(threshold=6):
# Compute word frquencies from captions.
coco = COCO(PathConfig.train_anno_file)
counter = Counter()
ids = coco.anns.keys()
for id in ids:
caption = str(coco.anns[id]['caption'])
tokens = nltk.tokenize.word_tokenize(caption.lower())
counter.update(tokens)
# Ommit non-frequent words determined by threshold.
words = [word for word, cnt in counter.items() if cnt >= threshold]
# Create vocabulary.
vocab = Vocabulary()
vocab.add_word(PAD_TOKEN)
for word in words:
vocab.add_word(word)
vocab.add_word(START_TOKEN)
vocab.add_word(END_TOKEN)
vocab.add_word(UNK_TOKEN)
return vocab
def save_vocab(vocab):
with open(PathConfig.vocab_file, 'wb') as f:
pickle.dump(vocab, f)
def load_vocab():
with open(PathConfig.vocab_file, 'rb') as f:
vocab = pickle.load(f)
return vocab
|
the-stack_106_14414
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
# superclass
from .File import File
# class declaration
class NamedPipe(File):
"""
Representation of named pipes, a unix interprocess communication mechanism
"""
# constant
marker = 'p'
# interface
def identify(self, explorer, **kwds):
"""
Tell {explorer} that it is visiting a FIFO
"""
# dispatch
return explorer.onNamedPipe(info=self, **kwds)
# end of file
|
the-stack_106_14416
|
from collections import defaultdict
def greatest_distance(arr):
indexes = defaultdict(list)
for i, a in enumerate(arr):
indexes[a].append(i)
try:
return max(b[-1] - b[0] for b in indexes.itervalues() if len(b) > 1)
except ValueError:
return 0
|
the-stack_106_14421
|
import gzip
import os
import subprocess
try:
import tracemalloc
except ImportError:
tracemalloc = None
import pytest
from ddtrace.profiling.collector import stack
from ddtrace.profiling.exporter import pprof_pb2
def test_call_script():
subp = subprocess.Popen(
["pyddprofile", os.path.join(os.path.dirname(__file__), "simple_program.py")], stdout=subprocess.PIPE
)
stdout, stderr = subp.communicate()
assert subp.wait() == 42
hello, interval, stacks = stdout.decode().strip().split("\n")
assert hello == "hello world"
assert float(interval) >= 0.01
assert int(stacks) >= 1
@pytest.mark.skipif(not os.getenv("DD_PROFILE_TEST_GEVENT", False), reason="Not testing gevent")
def test_call_script_gevent():
subp = subprocess.Popen(
["python", os.path.join(os.path.dirname(__file__), "simple_program_gevent.py")], stdout=subprocess.PIPE
)
assert subp.wait() == 0
def check_pprof_file(filename):
with gzip.open(filename, "rb") as f:
content = f.read()
p = pprof_pb2.Profile()
p.ParseFromString(content)
if tracemalloc:
if stack.FEATURES["stack-exceptions"]:
assert len(p.sample_type) == 11
else:
assert len(p.sample_type) == 10
else:
assert len(p.sample_type) == 8
assert p.string_table[p.sample_type[0].type] == "cpu-samples"
def test_call_script_pprof_output(tmp_path, monkeypatch):
"""This checks if the pprof output and atexit register work correctly.
The script does not run for one minute, so if the `stop_on_exit` flag is broken, this test will fail.
"""
filename = str(tmp_path / "pprof")
monkeypatch.setenv("DD_PROFILING_OUTPUT_PPROF", filename)
monkeypatch.setenv("DD_PROFILING_CAPTURE_PCT", "1")
subp = subprocess.Popen(["pyddprofile", os.path.join(os.path.dirname(__file__), "simple_program.py")])
assert subp.wait() == 42
check_pprof_file(filename + "." + str(subp.pid) + ".1")
return filename, subp.pid
def test_call_script_pprof_output_interval(tmp_path, monkeypatch):
monkeypatch.setenv("DD_PROFILING_UPLOAD_INTERVAL", "0.1")
filename, pid = test_call_script_pprof_output(tmp_path, monkeypatch)
for i in (2, 3):
check_pprof_file(filename + "." + str(pid) + (".%d" % i))
def test_fork(tmp_path, monkeypatch):
filename = str(tmp_path / "pprof")
monkeypatch.setenv("DD_PROFILING_OUTPUT_PPROF", filename)
monkeypatch.setenv("DD_PROFILING_CAPTURE_PCT", "100")
subp = subprocess.Popen(
["python", os.path.join(os.path.dirname(__file__), "simple_program_fork.py")], stdout=subprocess.PIPE
)
assert subp.wait() == 0
stdout, stderr = subp.communicate()
child_pid = stdout.decode().strip()
check_pprof_file(filename + "." + str(subp.pid) + ".1")
check_pprof_file(filename + "." + str(child_pid) + ".1")
|
the-stack_106_14423
|
from ..src.WebChecker import WebChecker
class TestWebChecker:
def test_check_link_success(self):
web = WebChecker()
link = "https://google.com"
result = web.checkLink(link)
assert len(result) == 4
assert result[1] == link
assert result[2] >= 100 and result[2] <= 599
assert result[3] > 0
|
the-stack_106_14424
|
import pandas as pd
from scipy.cluster.hierarchy import linkage, leaves_list
import os
import plotly.graph_objects as go
from .config import Var
from ..util.debug import dprint
REPORT_HEIGHT = 800 # px
MAX_TOTAL_DATA = 4000
MAX_DYN_LEN = 400
#MAX_DYN_SIZE = MAX_DYN_LEN ** 2
'''
Max matrix dim lengths, (sometimes assuming squarishness as upper bound):
* Clustering: >3000
* To use kaleido: ~3000. 3000^2 is largest total size supported by kaleido's chromium JSON stdin.
* To render in narrative: as little as possible. 600^2?
'''
# TODO test things like empty df, large ..
# TODO full function in hover? too much data ...
# TODO log coloring for large differential ... or keep all uniform scale?
# TODO dynamically determine max tsv dims
####################################################################################################
####################################################################################################
def do_heatmap(tsv_fp, html_fp, axis_labels): # TODO log coloring for func x sample?
'''
tsv_fp: data to heatmap. it is a TSV GZ in PICRUSt2's Var.out_dir
html_fp: where to write plotly html
'''
df = pd.read_csv(tsv_fp, sep='\t', index_col=0) # default infer compression from file name extension
tsv_flnm = os.path.basename(tsv_fp)
###
### subset
original_shape = df.shape
subset = df.shape[0] > MAX_DYN_LEN or df.shape[1] > MAX_DYN_LEN
if subset is True:
row_ordering = df.sum(axis=1).values.argsort()[::-1]
col_ordering = df.sum(axis=0).values.argsort()[::-1]
df = df.iloc[row_ordering, col_ordering]
df = df.iloc[:MAX_DYN_LEN, :MAX_DYN_LEN]
###
###
row_ordering = leaves_list(linkage(df))
col_ordering = leaves_list(linkage(df.T))
df = df.iloc[row_ordering, col_ordering]
###
###
fig = go.Figure(go.Heatmap(
z=df.values,
y=df.index.tolist(),
x=df.columns.tolist(),
colorbar=dict(
title=dict(
text='Abundance',
side='right',
),
),
))
fig.update_layout(
title=dict(
text=(
'Shape%s=%s' % (
('<sub>unsubset</sub>' if subset else ''),
original_shape,
)
),
font=dict(
size=14, # make label size since it's not cartoonish title so much as more info
),
x=0.5,
),
xaxis_title=axis_labels[1],
yaxis_title=axis_labels[0],
xaxis_tickangle=45,
margin_t=40,
)
###
###
fig.write_html(
html_fp,
)
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
class HTMLReportWriter:
'''
Needs to know Var.report_dir
'''
####################################################################################################
####################################################################################################
def __init__(self, cmd_l):
'''
'''
self.replacement_d = {}
#
self.cmd_l = cmd_l
if not os.path.exists(Var.report_dir):
os.mkdir(Var.report_dir)
####################################################################################################
####################################################################################################
def _compile_cmd(self):
txt = ''
for cmd in self.cmd_l:
txt += (
'<p class="fixwhitespace">\n'
'<code>' + cmd + '</code>\n'
'</p>\n'
)
self.replacement_d['CMD_TAG'] = txt
####################################################################################################
####################################################################################################
def _compile_figures(self):
button_l = []
content_l = []
for per in ['amplicon', 'metagenome']:
for func in Var.func_l:
if not Var.params.getd(func):
continue
fig_id = per + '_' + func
func_name = Var.func_2_cfg[func]['name']
fig_title = per.title() + ' ' + func_name
ind = 0 if per == 'amplicon' else 1
tsv_fp = os.path.join(Var.out_dir, Var.func_2_cfg[func]['relfp'][ind])
html_fp = os.path.join(Var.report_dir, fig_id + '.html')
axis_labels = (
('Amplicon', func_name) if per == 'amplicon' else
(func_name, 'Sample')
)
do_heatmap(tsv_fp, html_fp, axis_labels)
button_l.append(
'''<button class="tablinks %s" onclick="openTab(event, '%s')">%s</button>'''
% (
'active' if fig_id == 'metagenome_metacyc' else '',
fig_id,
fig_title,
)
)
content_l.append(
'<div id="%s" class="tabcontent" %s>\n' % (
fig_id,
('style="display:inline-flex;"' if fig_id == 'metagenome_metacyc' else ''),
) +
'<iframe src="%s" scrolling="no" seamless="seamless"></iframe>\n' % os.path.basename(html_fp) +
'</div>\n'
)
self.replacement_d['HEATMAP_BUTTON_TAG'] = '\n'.join(button_l)
self.replacement_d['HEATMAP_CONTENT_TAG'] = '\n'.join(content_l)
####################################################################################################
####################################################################################################
def write(self):
self._compile_cmd()
self._compile_figures() # TODO stress test heatmaps
REPORT_HTML_TEMPLATE_FLPTH = '/kb/module/lib/kb_PICRUSt2/template/report.html'
html_fp = os.path.join(Var.report_dir, 'report.html')
with open(REPORT_HTML_TEMPLATE_FLPTH, 'r') as src_fh:
with open(html_fp, 'w') as dst_fh:
for line in src_fh:
s = line.strip()
if s in self.replacement_d:
dst_fh.write(self.replacement_d[s].strip() + '\n')
else:
dst_fh.write(line)
return html_fp
|
the-stack_106_14427
|
import requests
from api import RDFStoreException, resolveTemplate
def rdf4j_push(rdfstore, model, obj, gr, mode ):
#import pdb; pdb.set_trace()
from rdf_io.models import ServiceBinding
headers = {'Content-Type': 'application/x-turtle;charset=UTF-8'}
resttgt = resolveTemplate("".join( ( rdfstore['server'],rdfstore['target'])), model, obj )
for h in rdfstore.get('headers') or [] :
headers[h] = resolveTemplate( rdfstore['headers'][h], model, obj )
if mode == ServiceBinding.PERSIST_REPLACE :
result = requests.put( resttgt, headers=headers , data=gr.serialize(format="turtle"))
elif mode == ServiceBinding.PERSIST_UPDATE :
result = requests.post( resttgt, headers=headers , data=gr.serialize(format="turtle"))
elif mode == ServiceBinding.PERSIST_PURGE :
result = requests.delete( resttgt, headers=headers )
else:
raise Exception ("RDF4J store does not yet support mode %s" % (mode,))
# logger.info ( "Updating resource {} {}".format(resttgt,result.status_code) )
if result.status_code > 400 :
# print "Posting new resource"
# result = requests.post( resttgt, headers=headers , data=gr.serialize(format="turtle"))
# logger.error ( "Failed to publish resource {} {}".format(resttgt,result.status_code) )
raise RDFStoreException ("Failed to publish resource {} {}".format(resttgt,result.status_code ) )
return result
def rdf4j_get(rdfstore, model,obj):
""" Gets a response from an RDF4J datastore access method. Returns HTTP request
"""
headers = {'Content-Type': 'application/x-turtle;charset=UTF-8'}
resttgt = resolveTemplate("".join( ( rdfstore['server'],rdfstore['target'])), model, obj )
result = requests.get( resttgt, headers=headers )
return result
def rdf4j_delete(rdfstore, model,obj):
""" Gets a response from an RDF4J datastore access method. Returns HTTP request
"""
headers = {'Content-Type': 'application/x-turtle;charset=UTF-8'}
resttgt = resolveTemplate("".join( ( rdfstore['server'],rdfstore['target'])), model, obj )
result = requests.delete( resttgt, headers=headers )
return result
|
the-stack_106_14428
|
import pytest
from galaxyls.tests.unit.utils import TestUtils
from pygls.lsp.types.basic_structures import Position
@pytest.mark.parametrize(
"source_with_mark, expected_position",
[
("^<root attr=></root>", Position(line=0, character=0)),
("<roo^t attr=></root>", Position(line=0, character=4)),
("<root>^\n</root>", Position(line=0, character=6)),
("<root>\n^</root>", Position(line=1, character=0)),
("<root>\n</root>^", Position(line=1, character=7)),
],
)
def test_extract_mark_from_source(
source_with_mark: str,
expected_position: Position,
) -> None:
mark = "^"
position, source = TestUtils.extract_mark_from_source(mark, source_with_mark)
assert mark not in source
assert position == expected_position
|
the-stack_106_14431
|
from matplotlib import pyplot as plt
import numpy as np
import pumapy.utilities.workspace as ws
def plot_slices(ws_nparray, slice_direction='z', crange=None, cmap='gray', index=1):
""" Plot slices of domain along a specified direction (z default)
:param ws_nparray: domain
:type ws_nparray: Workspace or ndarray
:param slice_direction: 'x', 'y', 'z'
:type slice_direction: string
:param crange: color range, i.e. specify min and max grayscale
:type crange: tuple(int, int)
:param cmap: color map for the plot, 'gray' (default), 'jet' or refer to matplotlib for other colormaps
:type cmap: string
:param index: specifying the slice index in which the plot will be opened
:type: int
:return: slicer object
:rtype: PlotSlicer
>>> import pumapy as puma
>>> ws = puma.import_3Dtiff(puma.path_to_example_file("100_fiberform.tif"), 1.3e-6)
>>> puma.plot_slices(ws)
"""
img, _ = PlotSlicer.error_checks(ws_nparray, None, slice_direction)
if img is None:
return
img, axis_labels, slices, rows, cols = PlotSlicer.rotate_domain_mpl(img, slice_direction)
return PlotSlicer(img, slice_direction, crange, cmap, index, axis_labels, slices, rows, cols)
def compare_slices(ws_nparray1, ws_nparray2, slice_direction='z', crange1=None, cmap1='gray',
crange2=None, cmap2='gray', index=1):
""" Plot slices of domain along a specified direction (z default)
:param ws_nparray1: domain
:type ws_nparray1: Workspace or ndarray
:param ws_nparray2: domain
:type ws_nparray2: Workspace or ndarray
:param slice_direction: 'x', 'y', 'z'
:type slice_direction: string
:param crange1: color range for plot 1, specify min and max grayscale
:type crange1: tuple(int, int)
:param cmap1: color map for plot 1, 'gray' (default), 'jet' or refer to matplotlib for other colormaps
:type cmap1: string
:param crange2: color range for plot 2, specify min and max grayscale
:type crange2: tuple(int, int)
:param cmap2: color map for plot 2, 'gray' (default), 'jet' or refer to matplotlib for other colormaps
:type cmap2: string
:param index: specifying the slice index in which the plot will be opened
:type index: int
:return: slicer object
:rtype: CompareSlicer
>>> import pumapy as puma
>>> ws = puma.import_3Dtiff(puma.path_to_example_file("100_fiberform.tif"), 1.3e-6)
>>> ws2 = ws.copy()
>>> ws2.binarize_range((100, 255))
>>> puma.compare_slices(ws, ws2)
"""
img1, img2 = CompareSlicer.error_checks(ws_nparray1, ws_nparray2, slice_direction)
if img1 is None:
return
img1, axis_labels, slices1, rows1, cols1 = CompareSlicer.rotate_domain_mpl(img1, slice_direction)
img2, _, slices2, rows2, cols2 = CompareSlicer.rotate_domain_mpl(img2, slice_direction)
return CompareSlicer(img1, img2, slice_direction, crange1, cmap1, crange2, cmap2, index,
axis_labels, slices1, slices2, rows1, cols1, rows2, cols2)
class IndexTracker:
def __init__(self, img, img2, slice_direction, index, axis_labels, slices, slices_titles):
self.slice_direction = slice_direction
self.ind = index - 1
self.axis_labels = axis_labels
self.slices = slices
self.slices_titles = slices_titles
self.axis_labels = axis_labels
self.img = img
self.img2 = img2
def onkey(self, event):
if event.key == "right":
if self.ind < self.slices - 10:
self.ind += 10
elif event.key == "left":
if self.ind >= 10:
self.ind -= 10
elif event.key == "up":
if self.ind < self.slices - 100:
self.ind += 100
elif event.key == "down":
if self.ind >= 100:
self.ind -= 100
self.update()
def onscroll(self, event):
if event.button == 'down':
if self.ind != self.slices - 1:
self.ind += 1
else:
if self.ind != 0:
self.ind -= 1
self.update()
def update(self):
pass # virtual
@staticmethod
def format_coord(x, y):
return 'x=%1.f, y=%1.f' % (x, y)
@staticmethod
def rotate_domain_mpl(img, slice_direction):
if slice_direction == 'x':
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 0, 1)
slices, rows, cols = img.shape
axis_labels = ('y', 'z')
elif slice_direction == 'y':
img = np.swapaxes(img, 1, 2)
img = np.swapaxes(img, 1, 0)
rows, slices, cols = img.shape
axis_labels = ('z', 'x')
else:
rows, cols, slices = img.shape
axis_labels = ('x', 'y')
img = np.rot90(img, axes=(1, 0))
img = np.flip(img, axis=1)
return img, axis_labels, slices, rows, cols
@staticmethod
def _ws_check(img):
if isinstance(img, ws.Workspace):
img = img.matrix.copy()
elif isinstance(img, np.ndarray):
if img.ndim > 3 or img.ndim < 2:
raise Exception("Numpy array has to have 3 dimensions")
elif img.ndim == 2:
img = img[:, :, np.newaxis]
img = img.copy()
else:
raise Exception("Data to be sliced has to be either a pumapy.Workspace or np.ndarray")
return img
@staticmethod
def error_checks(img, img2, slice_direction):
slice_direction = slice_direction.lower()
if not (slice_direction == 'x' or slice_direction == 'y' or slice_direction == 'z'):
raise Exception("Slice direction can only be along 'x', 'y' or 'z'")
img = PlotSlicer._ws_check(img)
if img is None:
return None, None
if img2 is not None:
img2 = PlotSlicer._ws_check(img2)
if img2 is None:
return None, None
if not (slice_direction == 'x' or slice_direction == 'y' or slice_direction == 'z'):
raise Exception("Slice direction can only be along 'x', 'y' or 'z'")
return img, img2
class PlotSlicer(IndexTracker):
def __init__(self, img, slice_direction, color_range, color_map, index, axis_labels, slices, rows, cols):
super().__init__(img, None, slice_direction, index, axis_labels, slices, None)
self.color_range = color_range
self.color_map = color_map
self.rows = rows
self.cols = cols
if self.color_range is None:
self.color_range = self.img.min(), self.img.max()
self.fig, self.ax = plt.subplots(1, 1)
self.im = self.ax.imshow(self.img[:, :, self.ind], cmap=self.color_map,
vmin=self.color_range[0], vmax=self.color_range[1])
self.cid_scroll = self.fig.canvas.mpl_connect('scroll_event', self.onscroll)
self.cid_key = self.fig.canvas.mpl_connect('key_press_event', self.onkey)
self.ax.set_xlabel(self.axis_labels[0])
self.ax.set_ylabel(self.axis_labels[1])
plt.colorbar(mappable=self.im, ax=self.ax, fraction=0.046, pad=0.04,
ticks=np.linspace(self.color_range[0], self.color_range[1], 15).astype(int))
self.ax.grid(linestyle=':')
self.update()
plt.show(block=True)
def update(self):
self.im.set_data(self.img[:, :, self.ind])
self.ax.set_title('Slice: {}/{} along {}'.format(self.ind + 1, self.slices, self.slice_direction))
self.ax.format_coord = PlotSlicer.format_coord
self.fig.canvas.draw()
class CompareSlicer(IndexTracker):
def __init__(self, img1, img2, slice_direction, color_range1, color_map1, color_range2, color_map2, index,
axis_labels, slices1, slices2, rows1, cols1, rows2, cols2):
super().__init__(img1, img2, slice_direction, index, axis_labels, max(slices1, slices2), [slices1, slices2])
self.imgs = img1, img2
self.color_ranges = [color_range1, color_range2]
self.color_maps = color_map1, color_map2
self.rows = rows1, rows2
self.cols = cols1, cols2
self.fig, self.ax = plt.subplots(1, 2)
self.fig.tight_layout(pad=3.0)
self.ims = [None, None]
self.cid_scroll = self.fig.canvas.mpl_connect('scroll_event', self.onscroll)
self.cid_key = self.fig.canvas.mpl_connect('key_press_event', self.onkey)
for i in range(2):
if self.color_ranges[i] is None:
self.color_ranges[i] = (self.imgs[i].min(), self.imgs[i].max())
self.ims[i] = self.ax[i].imshow(self.imgs[i][:, :, self.ind], cmap=self.color_maps[i],
vmin=self.color_ranges[i][0], vmax=self.color_ranges[i][1])
self.ax[i].set_xlabel(self.axis_labels[0])
self.ax[i].set_ylabel(self.axis_labels[1])
plt.colorbar(self.ims[i], ax=self.ax[i], fraction=0.046, pad=0.04,
ticks=np.linspace(self.color_ranges[i][0], self.color_ranges[i][1], 15).astype(int))
self.ax[i].grid(linestyle=':')
self.update()
plt.show(block=True)
def update(self):
for i in range(2):
self.ims[i].set_data(self.imgs[i][:, :, self.ind])
self.ax[i].set_title('Slice: {}/{} along {}'.format(self.ind + 1, self.slices_titles[i], self.slice_direction))
self.ax[i].format_coord = CompareSlicer.format_coord
self.fig.canvas.draw()
|
the-stack_106_14434
|
# based on https://cobe.io/blog/posts/kubernetes-watch-python/
import ast
import json
import base64
import select
import socket
import io
import re
try:
from http_parser.parser import HttpParser # pylint: disable=no-name-in-module
except ImportError:
from http_parser.pyparser import HttpParser
from backports import ssl
from st2reactor.sensor.base import Sensor
class SensorBase(Sensor):
def __init__(self, sensor_service, extension, trigger_ref, config=None):
super( # pylint: disable=bad-super-call
SensorBase,
self).__init__(
sensor_service=sensor_service,
config=config)
self._log = self._sensor_service.get_logger(self.__class__.__name__)
self.TRIGGER_REF = trigger_ref
self.extension = extension
self.client = None
self.authhead = None
self.authmethod = None
self.setup()
def setup(self):
if 'user' in self.config and self.config['user'] is not None:
if 'password' in self.config and self.config['password'] is not None:
auth = base64.b64encode(self.config['user'] + ":" + self.config['password'])
self.authhead = "authorization: Basic %s" % auth
self.authmethod = "basic"
if 'client_cert_path' in self.config and self.config['client_cert_path'] is not None:
if 'client_cert_key_path' in self.config:
if self.config['client_cert_key_path'] is not None:
self.authmethod = "cert"
try:
extension = self.extension
api_url = self.config['kubernetes_api_url'] + extension
if self.authmethod is None:
raise KeyError('No authentication mechanisms defined')
except KeyError:
self._log.exception(
'Configuration file does not contain required fields.')
raise
self._log.debug(
'Connecting to Kubernetes endpoint %s via api_client.' %
api_url)
m = re.search(r'(http|https)://([-\.a-zA-Z0-9]+):?(\d*)/?$',
self.config['kubernetes_api_url'])
method = m.group(1)
self.host = m.group(2)
port = m.group(3)
if port:
self.port = int(port)
else:
if method == "https":
self.port = 443
if method == "http":
self.port = 80
def run(self):
self._log.info('Watch %s for new data.' % self.extension)
while True:
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.authmethod == "basic":
self.client = ssl.wrap_socket(self.sock)
elif self.authmethod == "cert":
self.client = ssl.wrap_socket(self.sock,
keyfile=self.config['client_cert_key_path'],
certfile=self.config['client_cert_path'])
else:
raise KeyError('No authentication mechanisms defined')
self._log.debug('Connecting to %s %i' % (self.host, self.port))
# self.client.settimeout(10)
self.client.connect((self.host, self.port))
except socket.error as exc:
self._log.exception('unable to connect to %s: %s' % (self.host, exc))
raise
except KeyError:
raise KeyError('No authentication mechanisms defined')
if self.authhead is not None:
self.client.send("GET %s HTTP/1.1\r\nHost: %s\r\n%s\r\n\r\n" %
(self.extension,
self.host,
self.authhead))
else:
self.client.send("GET %s HTTP/1.1\r\nHost: %s\r\n\r\n" %
(self.extension,
self.host))
readers = [self.client]
writers = out_of_band = []
pending = b''
parser = HttpParser()
self._log.debug("+")
while not parser.is_headers_complete():
self._log.debug(".")
try:
chunk = self.client.recv(io.DEFAULT_BUFFER_SIZE)
except socket.error as exc:
err = exc.args[0]
self._log.debug('a recv err (%s): %s' % (err, exc))
break
if not chunk:
self._log.exception('a No response from %s' % self.extension)
break
self._log.debug('a chunk %s' % chunk)
nreceived = len(chunk)
nparsed = parser.execute(chunk, nreceived)
if nparsed != nreceived:
self._log.exception('a nparsed %i != nreceived %i' % (nparsed, nreceived))
break
self._log.debug('parser headers complete %s' % parser.get_headers())
while True:
self._log.debug("-")
try:
readable, _, _ = select.select(readers, writers, out_of_band)
except select.error as exc:
self._log.debug("b select error: %s" % exc)
if not readable:
self._log.debug('b not readable')
break
try:
chunk = self.client.recv(io.DEFAULT_BUFFER_SIZE)
except socket.error as exc:
err = exc.args[0]
self._log.debug('b recv err (%s): %s' % (err, exc))
break
if not chunk:
self._log.debug('b not chunk')
self.client.close() # pylint: disable=no-member
break
nreceived = len(chunk)
self._log.debug('b chunk %s' % chunk)
self._log.debug("repr: %s" % repr(chunk))
if re.match(r'0\r\n\r\n', chunk, re.M):
self._log.debug('b end end end')
break
nparsed = parser.execute(chunk, nreceived)
if nparsed != nreceived:
self._log.exception('b nparsed %i != nreceived %i' % (nparsed, nreceived))
break
data = pending + parser.recv_body()
msg = "DATA: %s" % data
self._log.debug(msg)
lines = data.split(b'\n')
pending = lines.pop(-1)
for line in lines:
trigger_payload = self._get_trigger_payload_from_line(line)
if trigger_payload == 0:
pass
else:
self._log.info('Triggering Dispatch Now')
self._sensor_service.dispatch(
trigger=self.TRIGGER_REF, payload=trigger_payload)
self._log.debug('main loop done')
self.client.close() # pylint: disable=no-member
def _get_trigger_payload_from_line(self, line):
k8s_object = self._fix_utf8_enconding_and_eval(line)
self._log.info(
'Incoming k8s object (from API response): %s',
k8s_object)
payload = self._k8s_object_to_st2_trigger(k8s_object)
return payload
def _fix_utf8_enconding_and_eval(self, line):
# need to perform a json dump due to uft8 error prior to performing a
# json.load
# kubernetes returns unquoted true/false values, need to be converted to python booleans
line = line.replace('true', 'True')
line = line.replace('false', 'False')
line = line.replace('null', 'None')
io = json.dumps(line)
n = json.loads(io)
line = ast.literal_eval(n)
return line
def _k8s_object_to_st2_trigger(self, k8s_object):
# Define some variables
try:
resource_type = k8s_object['type']
object_kind = k8s_object['object']['kind']
name = k8s_object['object']['metadata']['name']
if 'spec' in k8s_object['object']:
spec = k8s_object['object']['spec']
else:
spec = 'None'
if 'namespace' in k8s_object['object']['metadata']:
namespace = k8s_object['object']['metadata']['namespace']
else:
namespace = 'None'
uid = k8s_object['object']['metadata']['uid']
if 'labels' in k8s_object['object']['metadata']:
labels_data = k8s_object['object']['metadata']['labels']
else:
labels_data = 'None'
except KeyError:
msg = 'One of "type", "kind", "name" or "uid" or "labels" ' + \
'do not exist in the object. Incoming object=%s' % k8s_object
self._log.exception(msg)
return 0
else:
if name in ['default']:
self._log.debug('ignoring name: %s.' % name)
return 0
else:
payload = self._build_a_trigger(
resource_type=resource_type,
name=name,
labels=labels_data,
namespace=namespace,
spec=spec,
object_kind=object_kind,
uid=uid)
self._log.info('Trigger payload: %s.' % payload)
return payload
def _build_a_trigger(
self,
resource_type,
name,
labels,
namespace,
spec,
object_kind,
uid):
payload = {
'resource': resource_type,
'name': name,
'namespace': namespace,
'spec': spec,
'labels': labels,
'object_kind': object_kind,
'uid': uid
}
return payload
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
|
the-stack_106_14436
|
import pandas as pd
import numpy as np
import mplfinance as mpf
import matplotlib.pyplot as plt
print(mpf.available_styles())
daily = pd.read_csv(r'examples_data\SP500_NOV2019_Hist.csv', index_col=0, parse_dates=True)
print(daily.head())
daily.index.name = "Date"
# 简单定义:
# 1.如果收盘价比开盘价低5,做黄色标记
# 2.如果开盘价比收盘价高5,做红色标记
red_list = []
yellow_list = []
for _, row in daily.iterrows():
if row["Open"] - row["Close"] > 5:
red_list.append(row["Close"])
else:
red_list.append(np.NaN)
if row["Close"] - row["Open"] > 5:
yellow_list.append(row["Open"])
else:
yellow_list.append(np.NaN)
add_plot = [
# scatter 散点图
mpf.make_addplot(red_list, type="scatter", markersize=200, marker='^', color='r'),
mpf.make_addplot(yellow_list, type="scatter", markersize=200, marker='^', color='y'),
# dashdot 点虚线
mpf.make_addplot(daily[["High", "Low"]], linestyle='dashdot'),
# dotted 散点图
mpf.make_addplot(daily["Open"] - daily["Close"], linestyle="dotted", secondary_y=True),
# 添加附图,使用panel参数
mpf.make_addplot(daily["Volume"], panel=1, color='g', secondary_y='auto'),
]
# panel_ratios设置主图和附图的比例(两个附图panel_ratios=(1, 1,0.5))
# mpf.plot(daily, type='candle', panel_ratios=(1, 0.5), volume=True, addplot=add_plot, style='default')
mpf.plot(daily, type='candle', addplot=add_plot, volume=True,
figscale=1.5, style="blueskies",
title=u'SP500行情', figratio=(3, 2), ylabel='price', ylabel_lower='volume',
# savefig='my_image.png'
)
"""
plot绘图的部分参数
:type设置图像类型'ohlc'/'candle'/'line/renko'
:mav 绘制平局线
:show_nontrading= True 显示非交易日(k线之间有间隔),False 不显示交易日,k线之间没有间隔
:title:设置标题
:ylabel=设置主图Y轴标题
:ylabel_lower 设置成交量一栏Y坐标标题
:figratio:设置图形纵横比
:figscale 设置图像的缩小或放大,1.5就是放大50%,最大不会超过电脑屏幕大小
:style 设置整个图表样式,可以使用前面设置的样式my_style,只能在plot函数中使用指定整个图表样式,不能在make_addplot中使用。
savefig:导出图片,填写文件名及后缀,如果使用了,就不会显示图像了
"""
plt.show()
# https://qdhhkj.blog.csdn.net/article/details/105783640
|
the-stack_106_14437
|
# -*- coding: utf-8 -*-
import asyncio
import base64
import json
import ssl
import time
import urllib.parse
from unittest import mock
from unittest.mock import MagicMock
from asyncy.AppConfig import AppConfig, Expose, KEY_EXPOSE
from asyncy.Exceptions import K8sError
from asyncy.Kubernetes import Kubernetes
from asyncy.constants.LineConstants import LineConstants
from asyncy.constants.ServiceConstants import ServiceConstants
from asyncy.entities.ContainerConfig import ContainerConfig
from asyncy.entities.Volume import Volume
from asyncy.utils.HttpUtils import HttpUtils
import pytest
from pytest import fixture, mark
from tornado.httpclient import AsyncHTTPClient
@fixture
def line():
return MagicMock()
def test_find_all_ports():
services = {
'alpine': {
'http': {
'port': 8080
}
},
'alpha': {
'expose': {
'console': {
'http': {
'port': 1882
}
}
},
'http': {
'port': 9092,
'subscribe': {
'port': 9090
},
'unsubscribe': {
'port': 9091
}
}
},
'nested': {
'a': {
'b': {
'c': {
'd': {
'e': {
'http': {
'subscribe': {
'port': 1234
},
'unsubscribe': {
'port': 1235
}
}
}
}
}
}
}
}
}
assert Kubernetes.find_all_ports(services['alpine']) == {8080}
assert Kubernetes.find_all_ports(services['alpha']) == {1882, 9090, 9091,
9092}
assert Kubernetes.find_all_ports(services['nested']) == {1234, 1235}
def test_raise_if_not_2xx(story, line):
res = MagicMock()
res.code = 401
with pytest.raises(K8sError):
Kubernetes.raise_if_not_2xx(res)
res.code = 200
assert Kubernetes.raise_if_not_2xx(res) is None
@mark.asyncio
async def test_create_namespace_if_required_existing(patch, app,
async_mock):
res = MagicMock()
res.code = 200
patch.object(Kubernetes, 'make_k8s_call', new=async_mock(return_value=res))
app.app_id = 'my_app'
await Kubernetes.create_namespace(app)
Kubernetes.make_k8s_call.mock.assert_called_once()
Kubernetes.make_k8s_call.mock.assert_called_with(
app.config, app.logger, '/api/v1/namespaces/my_app')
@mark.asyncio
@mark.parametrize('create_result', [200, 500])
async def test_create_namespace_if_required(patch, app,
line, async_mock, create_result):
res_check = MagicMock()
res_check.code = 400
res_create = MagicMock()
res_create.code = create_result
app.app_id = 'my_app'
patch.object(Kubernetes, 'make_k8s_call',
new=async_mock(side_effect=[res_check, res_create]))
if create_result != 200:
with pytest.raises(K8sError):
await Kubernetes.create_namespace(app)
return
else:
await Kubernetes.create_namespace(app)
expected_payload = {
'apiVersion': 'v1',
'kind': 'Namespace',
'metadata': {
'name': 'my_app'
}
}
assert Kubernetes.make_k8s_call.mock.mock_calls == [
mock.call(app.config, app.logger, '/api/v1/namespaces/my_app'),
mock.call(app.config, app.logger, '/api/v1/namespaces',
payload=expected_payload)
]
@mark.asyncio
async def test_clean_namespace(patch, story, async_mock):
patch.object(Kubernetes, '_list_resource_names',
new=async_mock(side_effect=[['service_1', 'service_2'],
['depl_1', 'depl_2'],
['pod_1', 'pod_2'],
['ing_1', 'ing_2'],
['secret_1', 'secret_2']]))
patch.object(Kubernetes, '_delete_resource', new=async_mock())
await Kubernetes.clean_namespace(story.app)
assert Kubernetes._delete_resource.mock.mock_calls == [
mock.call(story.app, 'services', 'service_1'),
mock.call(story.app, 'services', 'service_2'),
mock.call(story.app, 'deployments', 'depl_1'),
mock.call(story.app, 'deployments', 'depl_2'),
mock.call(story.app, 'pods', 'pod_1'),
mock.call(story.app, 'pods', 'pod_2'),
mock.call(story.app, 'ingresses', 'ing_1'),
mock.call(story.app, 'ingresses', 'ing_2'),
mock.call(story.app, 'secrets', 'secret_1'),
mock.call(story.app, 'secrets', 'secret_2')
]
def test_get_hostname(story):
story.app.app_id = 'my_app'
container_name = 'alpine'
ret = Kubernetes.get_hostname(story.app, container_name)
assert ret == 'alpine.my_app.svc.cluster.local'
def _create_response(code: int, body: dict = None):
res = MagicMock()
res.code = code
if body:
res.body = json.dumps(body)
return res
@mark.parametrize('first_res', [200, 409, 404])
@mark.parametrize('resource', ['deployments', 'services', 'secrets',
'persistentvolumeclaims', 'unknown', 'pods'])
@mark.asyncio
async def test_delete_resource(patch, story, async_mock, first_res, resource):
story.app.app_id = 'my_app'
api_responses = [
_create_response(first_res),
_create_response(200),
_create_response(200),
_create_response(404),
]
patch.object(Kubernetes, 'make_k8s_call',
new=async_mock(side_effect=api_responses))
patch.object(asyncio, 'sleep', new=async_mock())
if resource == 'unknown':
with pytest.raises(Exception):
await Kubernetes._delete_resource(story.app, resource, 'foo')
return
else:
await Kubernetes._delete_resource(story.app, resource, 'foo')
if first_res == 404:
assert Kubernetes.make_k8s_call.mock.call_count == 1
return
prefix = Kubernetes._get_api_path_prefix(resource)
assert Kubernetes.make_k8s_call.mock.mock_calls == [
mock.call(story.app.config, story.app.logger,
f'{prefix}/my_app/{resource}/foo'
f'?gracePeriodSeconds=0',
method='delete'),
mock.call(story.app.config, story.app.logger,
f'{prefix}/my_app/{resource}/foo'),
mock.call(story.app.config, story.app.logger,
f'{prefix}/my_app/{resource}/foo'),
mock.call(story.app.config, story.app.logger,
f'{prefix}/my_app/{resource}/foo'),
]
@mark.parametrize('method', ['patch', 'post'])
@mark.asyncio
async def test_make_k8s_call(patch, story, async_mock, method):
patch.object(HttpUtils, 'fetch_with_retry', new=async_mock())
context = MagicMock()
patch.object(Kubernetes, 'new_ssl_context', return_value=context)
context.load_verify_locations = MagicMock()
patch.init(AsyncHTTPClient)
client = AsyncHTTPClient()
story.app.config.CLUSTER_CERT = 'this_is\\nmy_cert' # Notice the \\n.
story.app.config.CLUSTER_AUTH_TOKEN = 'my_token'
story.app.config.CLUSTER_HOST = 'k8s.local'
path = '/hello_world'
payload = {
'foo': 'bar'
}
expected_kwargs = {
'ssl_options': context,
'headers': {
'Authorization': 'bearer my_token',
'Content-Type': 'application/json; charset=utf-8'
},
'method': method.upper(),
'body': json.dumps(payload)
}
if method == 'patch':
expected_kwargs['headers']['Content-Type'] = \
'application/merge-patch+json; charset=utf-8'
assert await Kubernetes.make_k8s_call(story.app.config, story.app.logger,
path, payload, method=method) \
== HttpUtils.fetch_with_retry.mock.return_value
HttpUtils.fetch_with_retry.mock.assert_called_with(
3, story.app.logger, 'https://k8s.local/hello_world', client,
expected_kwargs)
# Notice the \n. \\n MUST be converted to \n in Kubernetes#make_k8s_call.
context.load_verify_locations.assert_called_with(cadata='this_is\nmy_cert')
@mark.asyncio
async def test_remove_volume(patch, story, async_mock):
name = 'foo'
patch.object(Kubernetes, '_delete_resource', new=async_mock())
await Kubernetes.remove_volume(story.app, name)
Kubernetes._delete_resource.mock.assert_called_with(
story.app, 'persistentvolumeclaims', name)
@mark.parametrize('resource', ['persistentvolumeclaims', 'deployments',
'services', 'foo'])
@mark.parametrize('res_code', [404, 200, 500])
@mark.asyncio
async def test_does_resource_exist(patch, story, resource,
async_mock, res_code):
resp = MagicMock()
resp.code = res_code
patch.object(Kubernetes, 'make_k8s_call',
new=async_mock(return_value=resp))
if res_code == 500 or resource == 'foo':
with pytest.raises(Exception):
await Kubernetes._does_resource_exist(story.app, resource, 'name')
return
ret = await Kubernetes._does_resource_exist(story.app, resource, 'name')
if res_code == 200:
assert ret is True
else:
assert ret is False
expected_path = Kubernetes._get_api_path_prefix(resource) + \
f'/{story.app.app_id}/{resource}/name'
Kubernetes.make_k8s_call.mock.assert_called_with(story.app.config,
story.app.logger,
expected_path)
@mark.asyncio
async def test_list_resource_names(story, patch, async_mock):
mock_res = MagicMock()
mock_res.body = json.dumps({
'items': [
{'metadata': {'name': 'hello'}},
{'metadata': {'name': 'world'}},
]
})
patch.object(Kubernetes, 'make_k8s_call',
new=async_mock(return_value=mock_res))
patch.object(Kubernetes, '_get_api_path_prefix', return_value='prefix')
ret = await Kubernetes._list_resource_names(story.app, 'services')
Kubernetes.make_k8s_call.mock.assert_called_with(
story.app.config, story.app.logger,
f'prefix/{story.app.app_id}/services?includeUninitialized=true')
assert ret == ['hello', 'world']
def test_new_ssl_context():
assert isinstance(Kubernetes.new_ssl_context(), ssl.SSLContext)
@mark.parametrize('res_code', [200, 400])
@mark.asyncio
async def test_create_pod(patch, async_mock, story, line, res_code):
res = MagicMock()
res.code = res_code
patch.object(Kubernetes, 'create_deployment', new=async_mock())
patch.object(Kubernetes, 'create_service', new=async_mock())
patch.object(Kubernetes, 'make_k8s_call', new=async_mock(return_value=res))
image = 'alpine/alpine:latest'
start_command = ['/bin/sleep', '1d']
container_name = 'asyncy--alpine-1'
env = {'token': 'foo'}
story.app.app_id = 'my_app'
await Kubernetes.create_pod(
story.app, line[LineConstants.service], image,
container_name, start_command, None, env, [], [])
Kubernetes.make_k8s_call.mock.assert_called_with(
story.app.config, story.app.logger,
'/apis/apps/v1/namespaces/my_app/deployments/asyncy--alpine-1')
if res_code == 200:
assert Kubernetes.create_deployment.mock.called is False
assert Kubernetes.create_service.mock.called is False
else:
Kubernetes.create_deployment.mock.assert_called_with(
story.app, line[LineConstants.service],
image, container_name, start_command, None, env, [], [])
Kubernetes.create_service.mock.assert_called_with(
story.app, line[LineConstants.service], container_name)
@mark.parametrize('persist', [True, False])
@mark.parametrize('resource_exists', [True, False])
@mark.asyncio
async def test_create_volume(story, patch, async_mock,
persist, resource_exists):
name = 'foo'
patch.object(Kubernetes, '_does_resource_exist',
new=async_mock(return_value=resource_exists))
patch.object(Kubernetes, '_update_volume_label',
new=async_mock())
patch.object(time, 'time', return_value=123)
res = MagicMock()
patch.object(Kubernetes, 'make_k8s_call', new=async_mock(return_value=res))
patch.object(Kubernetes, 'raise_if_not_2xx')
expected_path = f'/api/v1/namespaces/{story.app.app_id}' \
f'/persistentvolumeclaims'
expected_payload = {
'apiVersion': 'v1',
'kind': 'PersistentVolumeClaim',
'metadata': {
'name': name,
'namespace': story.app.app_id,
'labels': {
'last_referenced_on': '123',
'omg_persist': f'{persist}'
}
},
'spec': {
'accessModes': ['ReadWriteOnce'],
'resources': {
'requests': {
'storage': '100Mi'
}
}
}
}
await Kubernetes.create_volume(story.app, name, persist)
if resource_exists:
Kubernetes._update_volume_label.mock.assert_called_with(
story.app, name)
Kubernetes.make_k8s_call.mock.assert_not_called()
else:
Kubernetes._update_volume_label.mock.assert_not_called()
Kubernetes.make_k8s_call.mock.assert_called_with(
story.app.config, story.app.logger,
expected_path, expected_payload)
Kubernetes.raise_if_not_2xx.assert_called_with(res)
@mark.asyncio
async def test_create_imagepullsecret(story, patch, async_mock):
res = MagicMock()
res.code = 200
patch.object(Kubernetes, 'make_k8s_call', new=async_mock(return_value=res))
container_config = ContainerConfig(name='first', data={
'auths': {
'https://index.docker.io/v1/': {
'auth': 'username_password_base64'
}
}
})
b64_container_config = base64.b64encode(
json.dumps(container_config.data).encode()
).decode()
expected_path = f'/api/v1/namespaces/{story.app.app_id}/secrets'
expected_payload = {
'apiVersion': 'v1',
'kind': 'Secret',
'type': 'kubernetes.io/dockerconfigjson',
'metadata': {
'name': container_config.name,
'namespace': story.app.app_id
},
'data': {
'.dockerconfigjson': b64_container_config
}
}
await Kubernetes.create_imagepullsecret(story.app, container_config)
Kubernetes.make_k8s_call.mock.assert_called_with(
story.app.config, story.app.logger, expected_path, expected_payload)
@mark.asyncio
async def test_update_volume_label(story, patch, async_mock):
res = MagicMock()
patch.object(Kubernetes, 'make_k8s_call', new=async_mock(return_value=res))
patch.object(Kubernetes, 'raise_if_not_2xx')
patch.object(time, 'time', return_value=123)
payload = {
'metadata': {
'labels': {
'last_referenced_on': '123'
}
}
}
await Kubernetes._update_volume_label(story.app, 'db')
path = f'/api/v1/namespaces/{story.app.app_id}/persistentvolumeclaims/db'
Kubernetes.make_k8s_call.mock.assert_called_with(
story.app.config, story.app.logger, path, payload, method='patch')
Kubernetes.raise_if_not_2xx.assert_called_with(res)
@mark.parametrize('resource_exists', [True, False])
@mark.parametrize('k8s_api_returned_2xx', [True, False])
@mark.asyncio
async def test_create_ingress(patch, app, async_mock, resource_exists,
k8s_api_returned_2xx):
if resource_exists and not k8s_api_returned_2xx:
# Invalid combination, since if the ing resource exists already,
# no additional call to the k8s API is made.
return
app.app_id = 'my_app_id'
app.config.INGRESS_GLOBAL_STATIC_IP_NAME = 'ip-static-name-global'
ingress_name = 'my_ingress_name'
hostname = 'my_ingress_hostname'
container_name = 'my_container_name'
expose = Expose(service='service',
service_expose_name='expose_name',
http_path='expose_path')
http_conf = {
'path': '/my_app',
'port': 6000
}
app.services = {
expose.service: {
ServiceConstants.config: {
KEY_EXPOSE: {
expose.service_expose_name: {
'http': http_conf
}
}
}
}
}
app.config.APP_DOMAIN = 'foo.com'
patch.object(Kubernetes, '_does_resource_exist',
new=async_mock(return_value=resource_exists))
expected_payload = {
'apiVersion': 'extensions/v1beta1',
'kind': 'Ingress',
'metadata': {
'name': ingress_name,
'annotations': {
'kubernetes.io/ingress.class': 'nginx',
'kubernetes.io/ingress.global-static-ip-name':
app.config.INGRESS_GLOBAL_STATIC_IP_NAME,
'ingress.kubernetes.io/rewrite-target': expose.http_path,
'nginx.ingress.kubernetes.io/proxy-body-size': '1m',
'nginx.ingress.kubernetes.io/proxy-read-timeout': '120'
}
},
'spec': {
'tls': [
{
'hosts': [f'{hostname}.'
f'{app.config.APP_DOMAIN}']
}
],
'rules': [
{
'host': f'{hostname}.{app.config.APP_DOMAIN}',
'http': {
'paths': [
{
'path': http_conf['path'],
'backend': {
'serviceName': container_name,
'servicePort': http_conf['port']
}
}
]
}
}
]
}
}
patch.object(Kubernetes, 'make_k8s_call',
new=async_mock(return_value=314))
patch.object(Kubernetes, 'is_2xx',
return_value=k8s_api_returned_2xx)
if k8s_api_returned_2xx:
await Kubernetes.create_ingress(ingress_name, app, expose,
container_name,
hostname)
else:
with pytest.raises(K8sError):
await Kubernetes.create_ingress(ingress_name, app, expose,
container_name,
hostname)
return
if resource_exists:
Kubernetes.make_k8s_call.mock.assert_not_called()
else:
prefix = Kubernetes._get_api_path_prefix('ingresses')
prefix = f'{prefix}/{app.app_id}/ingresses'
Kubernetes.make_k8s_call.mock.assert_called_with(
app.config, app.logger, prefix, payload=expected_payload)
Kubernetes.is_2xx.assert_called_with(314)
@mark.asyncio
@mark.parametrize('image_pull_policy', ['Always', 'IfNotPresent'])
async def test_create_deployment(patch, async_mock, story, image_pull_policy):
container_name = 'asyncy--alpine-1'
story.app.app_id = 'my_app'
patch.object(story.app, 'image_pull_policy',
return_value=image_pull_policy)
image = 'alpine:latest'
env = {'token': 'asyncy-19920', 'username': 'asyncy'}
start_command = ['/bin/bash', 'sleep', '10000']
shutdown_command = ['wall', 'Shutdown']
volumes = [Volume(persist=False, name='tmp', mount_path='/tmp'),
Volume(persist=True, name='db', mount_path='/db')]
container_configs = [
ContainerConfig(name='first', data={
'auths': {
'https://index.docker.io/v1/': {
'auth': 'username_password_base64'
}
}
}),
ContainerConfig(name='second', data={
'auths': {
'https://index.docker.io/v1/': {
'auth': 'new_username_password_base64'
}
}
})
]
liveness_probe = {
'httpGet': {
'path': '/healthz',
'port': 8000
},
'initialDelaySeconds': 10,
'timeoutSeconds': 30,
'periodSeconds': 30,
'successThreshold': 1,
'failureThreshold': 5
}
patch.object(Kubernetes, 'remove_volume', new=async_mock())
patch.object(Kubernetes, 'create_volume', new=async_mock())
patch.object(Kubernetes, 'create_imagepullsecret', new=async_mock())
patch.object(Kubernetes, 'get_liveness_probe', return_value=liveness_probe)
b16_service_name = base64.b16encode('alpine'.encode()).decode()
expected_payload = {
'apiVersion': 'apps/v1',
'kind': 'Deployment',
'metadata': {
'name': container_name,
'namespace': story.app.app_id
},
'spec': {
'replicas': 1,
'strategy': {
'type': 'RollingUpdate'
},
'selector': {
'matchLabels': {
'app': container_name
}
},
'template': {
'metadata': {
'labels': {
'app': container_name,
'logstash-enabled': 'true',
'b16-service-name': b16_service_name
}
},
'spec': {
'containers': [
{
'name': container_name,
'image': image,
'resources': {
'limits': {
'memory': '200Mi'
# 'cpu': '500m'
}
},
'command': start_command,
'imagePullPolicy': image_pull_policy,
'env': [{'name': 'token', 'value': 'asyncy-19920'},
{'name': 'username', 'value': 'asyncy'}],
'lifecycle': {
'preStop': {
'exec': {
'command': shutdown_command
}
}
},
'volumeMounts': [
{
'mountPath': volumes[0].mount_path,
'name': volumes[0].name
},
{
'mountPath': volumes[1].mount_path,
'name': volumes[1].name
}
],
'livenessProbe': liveness_probe
}
],
'volumes': [
{
'name': volumes[0].name,
'persistentVolumeClaim': {
'claimName': volumes[0].name
}
},
{
'name': volumes[1].name,
'persistentVolumeClaim': {
'claimName': volumes[1].name
}
}
],
'imagePullSecrets': [
{
'name': container_configs[0].name
},
{
'name': container_configs[1].name
}
]
}
}
}
}
patch.object(asyncio, 'sleep', new=async_mock())
patch.object(Kubernetes, 'check_for_image_errors', new=async_mock())
expected_create_path = f'/apis/apps/v1/namespaces/' \
f'{story.app.app_id}/deployments'
expected_verify_path = f'/apis/apps/v1/namespaces/{story.app.app_id}' \
f'/deployments/{container_name}'
patch.object(Kubernetes, 'make_k8s_call', new=async_mock(side_effect=[
_create_response(404),
_create_response(201),
_create_response(200, {'status': {'readyReplicas': 0}}),
_create_response(200, {'status': {'readyReplicas': 0}}),
_create_response(200, {'status': {'readyReplicas': 1}})
]))
await Kubernetes.create_deployment(story.app, 'alpine', image,
container_name,
start_command, shutdown_command, env,
volumes, container_configs)
Kubernetes.remove_volume.mock.assert_called_once()
Kubernetes.remove_volume.mock.assert_called_with(
story.app, volumes[0].name)
assert Kubernetes.create_volume.mock.mock_calls == [
mock.call(story.app, volumes[0].name, volumes[0].persist),
mock.call(story.app, volumes[1].name, volumes[1].persist)
]
assert Kubernetes.create_imagepullsecret.mock.mock_calls == [
mock.call(story.app, container_configs[0]),
mock.call(story.app, container_configs[1]),
]
assert Kubernetes.make_k8s_call.mock.mock_calls == [
mock.call(story.app.config, story.app.logger,
expected_create_path, expected_payload),
mock.call(story.app.config, story.app.logger,
expected_create_path, expected_payload),
mock.call(story.app.config, story.app.logger, expected_verify_path),
mock.call(story.app.config, story.app.logger, expected_verify_path),
mock.call(story.app.config, story.app.logger, expected_verify_path)
]
@mark.parametrize('unavailable', [True, False])
@mark.asyncio
async def test_wait_for_port(patch, magic, async_mock, unavailable):
fut = magic()
patch.object(asyncio, 'open_connection', return_value=fut)
def exc(a, timeout=0):
raise ConnectionRefusedError()
if unavailable:
patch.object(asyncio, 'wait_for', new=async_mock(side_effect=exc))
else:
patch.object(asyncio, 'wait_for', new=async_mock())
patch.object(asyncio, 'sleep', new=async_mock())
ret = await Kubernetes.wait_for_port('asyncy.com', 80)
asyncio.wait_for.mock.assert_called_with(fut, timeout=2)
if unavailable:
assert ret is False
else:
assert ret is True
@mark.asyncio
async def test_create_service(patch, story, async_mock):
container_name = 'asyncy--alpine-1'
line = {
LineConstants.service: 'alpine'
}
patch.object(Kubernetes, 'find_all_ports', return_value={10, 20, 30})
patch.object(Kubernetes, 'raise_if_not_2xx')
patch.object(Kubernetes, 'get_hostname', return_value=container_name)
patch.object(Kubernetes, 'make_k8s_call', new=async_mock())
patch.object(Kubernetes, 'wait_for_port',
new=async_mock(return_value=True))
patch.object(asyncio, 'sleep', new=async_mock())
story.app.app_id = 'my_app'
expected_payload = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'name': container_name,
'namespace': story.app.app_id,
'labels': {
'app': container_name
}
},
'spec': {
'ports': [
{'port': 10, 'protocol': 'TCP', 'targetPort': 10},
{'port': 20, 'protocol': 'TCP', 'targetPort': 20},
{'port': 30, 'protocol': 'TCP', 'targetPort': 30}
],
'selector': {
'app': container_name
}
}
}
expected_path = f'/api/v1/namespaces/{story.app.app_id}/services'
await Kubernetes.create_service(story.app, line[LineConstants.service],
container_name)
Kubernetes.make_k8s_call.mock.assert_called_with(
story.app.config, story.app.logger, expected_path, expected_payload)
Kubernetes.raise_if_not_2xx.assert_called_with(
Kubernetes.make_k8s_call.mock.return_value)
assert Kubernetes.wait_for_port.mock.mock_calls == [
mock.call(container_name, 10),
mock.call(container_name, 20),
mock.call(container_name, 30)
]
@mark.asyncio
async def test_check_for_image_errors(patch, app, async_mock):
container_name = 'my_container'
app.app_id = 'my_app'
patch.object(Kubernetes, 'make_k8s_call', new=async_mock(side_effect=[
_create_response(200, {
'items': [{
'status': {
'containerStatuses': [{
'image': 'test',
'state': {
'waiting': {
'reason': 'ContainerCreating'
}
}
}]
}
}]
}),
_create_response(200, {
'items': [{
'status': {
'containerStatuses': [{
'image': 'test',
'state': {
'waiting': {
'reason': 'ImagePullBackOff'
}
}
}]
}
}]
}),
]))
await Kubernetes.check_for_image_errors(app, container_name)
with pytest.raises(K8sError) as exc:
await Kubernetes.check_for_image_errors(app, container_name)
assert exc.value.message == 'ImagePullBackOff - Failed to pull image test'
prefix = Kubernetes._get_api_path_prefix('pods')
qs = urllib.parse.urlencode({
'labelSelector': f'app={container_name}'
})
Kubernetes.make_k8s_call.mock.assert_called()
Kubernetes.make_k8s_call.mock.assert_called_with(app.config, app.logger,
f'{prefix}/{app.app_id}'
f'/pods?{qs}')
@mark.parametrize('service', [{
'name': 'first',
'configuration': {},
'liveness_probe': None
}, {
'name': 'second',
'configuration': {
'health': {
'http': {
'method': 'get',
'path': '/healthz',
'port': 8000
}
}
},
'liveness_probe': {
'httpGet': {
'path': '/healthz',
'port': 8000
},
'initialDelaySeconds': 10,
'timeoutSeconds': 30,
'periodSeconds': 30,
'successThreshold': 1,
'failureThreshold': 5
}
}])
def test_get_liveness_probe(app, service):
app.services = {
service['name']: {
'configuration': service['configuration']
}
}
liveness_probe = Kubernetes.get_liveness_probe(app, service['name'])
assert liveness_probe == service['liveness_probe']
def test_is_2xx():
res = MagicMock()
res.code = 200
assert Kubernetes.is_2xx(res) is True
res.code = 210
assert Kubernetes.is_2xx(res) is True
res.code = 300
assert Kubernetes.is_2xx(res) is False
res.code = 400
assert Kubernetes.is_2xx(res) is False
|
the-stack_106_14438
|
try:
import pydice
except ImportError:
print("WARNING: package missing for dicebeard.\n")
print("Install custom pydice with:\n\n"
"pip install git+git://github.com/nasfarley88/pydice.git")
raise SystemExit
from copy import deepcopy
import io
import telepot
import telepot.aio
from telepot import glance
from telepot.namedtuple import (
InlineKeyboardMarkup,
InlineKeyboardButton,
ReplyKeyboardMarkup,
ReplyKeyboardRemove
)
from skybeard.beards import BeardChatHandler, ThatsNotMineException
from skybeard.bearddbtable import BeardDBTable, BeardInstanceDBTable
from skybeard.decorators import onerror, getargsorask, getargs
from skybeard.predicates import regex_predicate
from .skb_roll import roll, beardeddie
from .helper import TrainResult, AnswerTimer
from .utils import image_to_bytesio
import matplotlib.pyplot as plt
from multiprocessing import Pool
import asyncio
import logging
logger = logging.getLogger(__name__)
async def run_in_async_process(func, *args, **kwargs):
"""Run ordinary function truly async with processes.
This function turns blocking ordinary functions (as opposed to coroutine
functions) into awaitables that do not block the main thread.
"""
with Pool(processes=1) as pool:
result = pool.apply_async(func, args, kwargs)
while True:
if result.ready():
return result.get()
else:
await asyncio.sleep(0.01)
class DiceBeard(BeardChatHandler):
__commands__ = [
('roll', 'roll', 'Rolls dice. Parses args and rolls.'),
('rgurps', 'roll_gurps', 'Rolls 3d6, for GURPS!'),
('train', 'train', 'does some training'),
('trainmany', 'train_many', 'Trains dice roll <code>n</code> times.'),
# TODO reinstate coins when imlemented
# ('flip', 'flip_coin', 'Flips a number of coins and returns the result'),
('mode', 'choose_mode', ('Can change the output mode of the bot'
' between picture, icons and text')),
('history', 'show_results', 'prints contents of the database'),
('stats', 'show_stats', 'shows the users statistics'),
('wait', 'wait', 'Waits for 3 seconds without blocking.'),
('toggleautogurps', 'toggle_auto_gurps', 'Toggles automatic GURPS rolling.'),
]
__userhelp__ = ('Can roll dice or flip coins.\n\n'
'To roll dice use the /roll command followed by any '
'number of arguments of the form 3d6+5 (can be + or -) '
'seperated by spaces. Currently, supported dice for '
'producing images are d4, d6, d8, d10, d12 and d20. To '
'flip coins simply type /flip followed by the number of '
'coins you would like to flip (e.g /flip 10 will filp 10 '
'coins)').strip()
_timeout = 90
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.keyboard = InlineKeyboardMarkup(
inline_keyboard=[
[InlineKeyboardButton(
text='Picture', callback_data=self.serialize('image')),
InlineKeyboardButton(
text='Text', callback_data=self.serialize('text'))],
])
# Table for storing results of training
self.train_table = BeardDBTable(self, 'train')
self.settings_table = BeardInstanceDBTable(self, 'settings')
if self.auto_gurps_roll_enabled:
self.register_auto_gurps_command()
# Can be 'text' or 'image'
self.mode = 'image'
def register_auto_gurps_command(self):
self.register_command(
regex_predicate(r'^-?\d+$'),
self.auto_roll_gurps
)
@property
def auto_gurps_roll_enabled(self):
with self.settings_table as table:
entries = [i for i in table.find(name='auto_gurps_roll_enabled')]
if len(entries) > 1:
# If there's more than one entry, there's been a problem. Drop
# the table and remake.
self.logger.warning("Too many entries found in settings table: {entries}".format(**locals()))
table.drop()
entries = []
# If there's no entry, make one.
if len([i for i in entries]) == 0:
self.auto_gurps_roll_enabled = False
with self.settings_table as table:
entry = table.find_one(name='auto_gurps_roll_enabled')
return entry['value']
# return None
@auto_gurps_roll_enabled.setter
def auto_gurps_roll_enabled(self, value):
assert isinstance(value, bool), "Toggle must be boolean."
with self.settings_table as table:
entry = table.find_one(name='auto_gurps_roll_enabled')
if entry is not None:
entry['value'] = value
table.update(entry, ['id'])
else:
table.insert(dict(name='auto_gurps_roll_enabled', value=value))
# Let's check the database
entries = [i for i in table.find(name='auto_gurps_roll_enabled')]
assert len(entries) == 1
assert table.find_one(name='auto_gurps_roll_enabled')['value'] == value
return value
async def toggle_auto_gurps(self, msg):
# This uses the database a lot. This might be a problem in the future.
# If it is, cache it locally.
if not self.auto_gurps_roll_enabled:
self.register_auto_gurps_command()
self.auto_gurps_roll_enabled = True
await self.sender.sendMessage("Auto GURPS rolling enabled!")
else:
for command in self._instance_commands:
# If command is regex predicate
if command.toJSON()['predicate'].startswith('re.compile'):
self._instance_commands.remove(command)
self.auto_gurps_roll_enabled = False
await self.sender.sendMessage("Auto GURPS rolling disabled.")
break
else:
assert False, "Shouldn't get here."
@onerror()
@getargs()
async def train_many(self, msg, no_of_times, no_of_dice=3):
total_score = 0
try:
no_of_times = int(no_of_times)
except ValueError:
await self.sender.sendMessage(
"I require an integer number of turns.")
for i in range(int(no_of_times)):
# Change message to be something more pallatable
msg_edited = deepcopy(msg)
msg_edited['text'] = "/train {}".format(no_of_dice)
result = await self.train(msg_edited)
if result.correct:
total_score += result.time
else:
total_score += 10.
assert isinstance(total_score, float)
await self.sender.sendMessage(
"Your total score is {:.3} for {} turns".format(
total_score, no_of_times))
async def _create_personal_listener_from_msg(self, msg):
my_listener = self.bot.create_listener()
my_listener.capture([{'from': {'id': msg['from']['id']}},
{'chat': {'id': msg['chat']['id']}}])
return my_listener
@onerror()
@getargs()
async def train(self, msg, no_of_dice=3):
'''Game for training adding up dice.'''
try:
no_of_dice = int(no_of_dice)
except ValueError:
await self.sender.sendMessage("Sorry, '{}' is not an number.".format(no_of_dice))
return
if no_of_dice > 10:
await self.sender.sendMessage(
"Sorry, that's too many dice! Try a number under 10.")
return
r = roll('{}d6'.format(no_of_dice))
if self.mode == 'image':
await self._send_roll(r, with_total=False, scattered=True)
await self.sender.sendMessage(
"What's the total?",
reply_markup=ReplyKeyboardMarkup(keyboard=(
('3', '4', '5', '6', '7', '8'),
('8', '9', '10', '11', '12'),
('13', '14', '15', '16', '17', '18'),
), one_time_keyboard=True))
else:
await self._send_roll(r, with_total=False)
my_listener = await self._create_personal_listener_from_msg(msg)
with AnswerTimer() as timer:
msg = await my_listener.wait()
# Check if the answer is a number
try:
answer = int(msg['text'])
except ValueError:
await self.sender.sendMessage("That answer was not a number.")
return
except KeyError:
await self.sender.sendMessage(
"Please answer with text based numbers.")
return
result = TrainResult(r, answer, timer.total_time)
# Add the result to the database
u_id = msg['from']['id']
await self._add_result_to_table(result, u_id)
# Report back to the user about their answer
if result.correct:
report_back_str = 'Correct: {:.3}s'.format(timer.total_time)
else:
report_back_str = 'Wrong: {:.3}s'.format(timer.total_time)
await self.sender.sendMessage(
report_back_str,
reply_markup=ReplyKeyboardRemove(remove_keyboard=True))
return result
async def _send_roll(self, roll, *args, **kwargs):
"""Sends roll through telegram using preferred method."""
try:
if self.mode == "text":
await self.sender.sendMessage(roll.to_text(*args, **kwargs))
elif self.mode == "image":
out_img = await run_in_async_process(
roll.to_image, *args, **kwargs)
bytes_output = image_to_bytesio(out_img)
await self.sender.sendPhoto(bytes_output)
else:
raise NotImplementedError(
"That mode is not implemented: {}".format(self.mode))
except (NotImplementedError, beardeddie.ImageNotSupported):
await self.sender.sendMessage("Mode not supported with this "
"expression. Here's a text version: ")
await self.sender.sendMessage(roll.to_text())
@onerror()
@getargsorask([('roll_expr', 'What dice do you want to roll?')])
async def roll(self, msg, roll_expr):
self.logger.debug(roll_expr)
r = roll(roll_expr)
await self._send_roll(r)
@onerror()
@getargs()
async def roll_gurps(self, msg, roll_against=None):
return await self._roll_gurps(msg, roll_against)
@onerror()
async def auto_roll_gurps(self, msg):
roll_against = msg['text']
return await self._roll_gurps(msg, roll_against)
async def _roll_gurps(self, msg, roll_against=None):
r = roll('3d6')
logger.debug("Sending dice roll...")
await self._send_roll(r, scattered=True)
logger.debug("Sent dice roll.")
logger.debug("Sending pass/fail...")
if roll_against is not None:
logger.debug("roll_against is not None!")
roll_against = int(roll_against)
if roll_against < 15 and r.total <= 4 or\
roll_against == 15 and r.total <= 5 or\
roll_against >= 16 and r.total <= 6:
await self.sender.sendMessage("✅✅ Critical success!")
elif (roll_against >= 16 and r.total == 18 or
roll_against <= 15 and r.total >= 17 or
r.total > roll_against+10):
await self.sender.sendMessage("❌❌ Critical fail!")
elif r.total <= roll_against:
await self.sender.sendMessage("✅ Success!")
else:
await self.sender.sendMessage("❌ Fail!")
logger.debug("Pass/fail should have been sent.")
@onerror()
@getargsorask([('input_args', 'How many coins do you want to flip?')])
async def flip_coin(self, msg, input_args):
raise NotImplementedError
@onerror()
async def choose_mode(self, msg):
await self.sender.sendMessage('Please choose:',
reply_markup=self.keyboard)
async def on_callback_query(self, msg):
query_id, from_id, query_data = glance(msg, flavor='callback_query')
try:
data = self.deserialize(query_data)
except ThatsNotMineException:
return
await self.bot.editMessageText(
telepot.origin_identifier(msg),
text="Mode changed to: {}".format(data),
reply_markup=self.keyboard)
self.mode = data
async def _add_result_to_table(self, result, u_id):
'''Adds the result to the database'''
dice = ','.join([str(die.faces.stop-1) for die in result.roll.dice])
roll = ','.join([str(die.result) for die in result.roll.dice])
time = result.time
total = result.roll.total
guess = result.guess
correct = result.correct
with self.train_table as table:
table.insert(dict(
uid=u_id,
dice=dice,
roll=roll,
total=total,
guess=guess,
correct=correct,
time=time))
async def show_results(self, msg):
'''Print items in the database'''
u_id = msg['from']['id']
with self.train_table as table:
matches = table.find(uid=u_id)
items = [match for match in matches]
await self.sender.sendMessage(
'\n'.join(['[{}], {}, {}'.format(item['roll'], item['guess'], item['time']) for item in items[:10]]))
async def show_stats(self, msg):
'''Show all the statistics'''
# Graph of roll total vs time
u_id = msg['from']['id']
with self.train_table as table:
matches = table.find(uid=u_id)
# Extracting only rolls of 3d6
items = [match for match in matches if match['dice'] == '6,6,6']
# Adding up all the time it took for the user to cound various values
totals = [[0, 0] for i in range(1, 19)]
for item in items:
n = item['total']-1
totals[n][0] += 1
totals[n][1] += item['time']
x = [i for i in range(1, 19)]
y = [(x[1]/x[0] if x[0] != 0 else 0) for x in totals]
plt.bar(x, y)
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
await self.sender.sendPhoto(buf)
# Comparison of different usuers
with self.train_table as table:
matches = table.find(dice='6,6,6')
items = [match for match in matches]
users = {}
for item in items:
u_id = item['uid']
if u_id not in users:
users[u_id] = [0, 0]
users[u_id][0] += 1
users[u_id][1] += item['time']
totals = [users[u_id][1]/users[u_id][0] for u_id in users]
for u_id in users:
rtn = 'User: {}, Average time: {:.3}s'.format(u_id, users[u_id][1]/users[u_id][0])
await self.sender.sendMessage(rtn)
|
the-stack_106_14440
|
#
# PySNMP MIB module Cajun-ROOT (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Cajun-ROOT
# Produced by pysmi-0.3.4 at Mon Apr 29 17:08:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, Bits, Counter32, NotificationType, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, ObjectIdentity, Unsigned32, iso, Counter64, enterprises, IpAddress, Integer32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Bits", "Counter32", "NotificationType", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "ObjectIdentity", "Unsigned32", "iso", "Counter64", "enterprises", "IpAddress", "Integer32", "Gauge32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
lucent = MibIdentifier((1, 3, 6, 1, 4, 1, 1751))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1))
mibs = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2))
cajunRtrProduct = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1, 43))
cajunRtr = ModuleIdentity((1, 3, 6, 1, 4, 1, 1751, 2, 43))
if mibBuilder.loadTexts: cajunRtr.setLastUpdated('9904220000Z')
if mibBuilder.loadTexts: cajunRtr.setOrganization("Lucent's Concord Technology Center (CTC) ")
cjnSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 1))
cjnProtocol = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2))
cjnMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3))
cjnCli = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 1, 1))
cjnDload = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 1, 2))
cjnIpv4 = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 1))
cjnIpv6 = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 2))
cjnIpx = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 3))
cjnAtalk = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 4))
cjnIpv4Serv = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 5))
cjnIpv6Serv = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 6))
cjnIpxServ = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 7))
cjnAtalkServ = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 8))
cjnOspf = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 9))
cjnRip = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10))
cjnIgmp = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 11))
cjnRtm = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 12))
cjnDvmrp = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 13))
cjnPimSm = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 14))
cjnPimDm = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 15))
cjnRsvp = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 16))
cjnSnmp = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 17))
cjnBgp = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 18))
cjnLrrp = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 19))
cjnIpxRip = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20))
cjnIpxSap = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 21))
cjnIpIfMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 1))
cjnIpxIfMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2))
cjnAtalkIfMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 3))
cjnResourceMgr = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 4))
cjnIpAListMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 5))
cjnIpForwardCtlMgt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 6))
cjnIpFwdMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 7))
mibBuilder.exportSymbols("Cajun-ROOT", cjnIpForwardCtlMgt=cjnIpForwardCtlMgt, cjnSnmp=cjnSnmp, cjnIpv6=cjnIpv6, cjnAtalkServ=cjnAtalkServ, mibs=mibs, cjnIpIfMgmt=cjnIpIfMgmt, cjnDvmrp=cjnDvmrp, PYSNMP_MODULE_ID=cajunRtr, products=products, cjnRsvp=cjnRsvp, cjnIpv6Serv=cjnIpv6Serv, cjnResourceMgr=cjnResourceMgr, cjnIgmp=cjnIgmp, cjnOspf=cjnOspf, cjnBgp=cjnBgp, cjnIpxIfMgmt=cjnIpxIfMgmt, cjnAtalkIfMgmt=cjnAtalkIfMgmt, cjnMgmt=cjnMgmt, cjnRtm=cjnRtm, cajunRtr=cajunRtr, cjnPimSm=cjnPimSm, cjnIpFwdMgmt=cjnIpFwdMgmt, cjnLrrp=cjnLrrp, cjnIpxRip=cjnIpxRip, cjnAtalk=cjnAtalk, cjnIpAListMgmt=cjnIpAListMgmt, cajunRtrProduct=cajunRtrProduct, cjnCli=cjnCli, cjnIpv4Serv=cjnIpv4Serv, cjnPimDm=cjnPimDm, cjnIpxServ=cjnIpxServ, cjnRip=cjnRip, cjnDload=cjnDload, cjnIpx=cjnIpx, cjnProtocol=cjnProtocol, lucent=lucent, cjnIpv4=cjnIpv4, cjnSystem=cjnSystem, cjnIpxSap=cjnIpxSap)
|
the-stack_106_14441
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from copy import deepcopy
from collections import Counter
from rlpytorch import Model, ActorCritic
from actor_critic_changed import ActorCriticChanged
from forward_predict import ForwardPredict
from trunk import MiniRTSNet
class Model_ActorCritic(Model):
def __init__(self, args):
super(Model_ActorCritic, self).__init__(args)
self._init(args)
def _init(self, args):
params = args.params
assert isinstance(params["num_action"], int), "num_action has to be a number. action = " + str(params["num_action"])
self.params = params
self.net = MiniRTSNet(args)
last_num_channel = self.net.num_channels[-1]
if self.params.get("model_no_spatial", False):
self.num_unit = params["num_unit_type"]
linear_in_dim = last_num_channel
else:
linear_in_dim = last_num_channel * 25
self.linear_policy = nn.Linear(linear_in_dim, params["num_action"])
self.linear_value = nn.Linear(linear_in_dim, 1)
self.relu = nn.LeakyReLU(0.1)
self.Wt = nn.Linear(linear_in_dim + params["num_action"], linear_in_dim)
self.Wt2 = nn.Linear(linear_in_dim, linear_in_dim)
self.Wt3 = nn.Linear(linear_in_dim, linear_in_dim)
self.softmax = nn.Softmax()
def get_define_args():
return MiniRTSNet.get_define_args()
def forward(self, x):
if self.params.get("model_no_spatial", False):
# Replace a complicated network with a simple retraction.
# Input: batchsize, channel, height, width
xreduced = x["s"].sum(2).sum(3).squeeze()
xreduced[:, self.num_unit:] /= 20 * 20
output = self._var(xreduced)
else:
output = self.net(self._var(x["s"]))
return self.decision(output)
def decision(self, h):
h = self._var(h)
policy = self.softmax(self.linear_policy(h))
value = self.linear_value(h)
return dict(h=h, V=value, pi=policy, action_type=0)
def decision_fix_weight(self, h):
# Copy linear policy and linear value
if not hasattr(self, "fixed"):
self.fixed = dict()
self.fixed["linear_policy"] = deepcopy(self.linear_policy)
self.fixed["linear_value"] = deepcopy(self.linear_value)
policy = self.softmax(self.fixed["linear_policy"](h))
value = self.fixed["linear_value"](h)
return dict(h=h, V=value, pi=policy, action_type=0)
def transition(self, h, a):
''' A transition model that could predict the future given the current state and its action '''
h = self._var(h)
na = self.params["num_action"]
a_onehot = h.data.clone().resize_(a.size(0), na).zero_()
a_onehot.scatter_(1, a.view(-1, 1), 1)
input = torch.cat((h, self._var(a_onehot)), 1)
h2 = self.relu(self.Wt(input))
h3 = self.relu(self.Wt2(h2))
h4 = self.relu(self.Wt3(h3))
return dict(hf=h4)
def reset_forward(self):
self.Wt.reset_parameters()
self.Wt2.reset_parameters()
self.Wt3.reset_parameters()
# Format: key, [model, method]
# if method is None, fall back to default mapping from key to method
Models = {
"actor_critic": [Model_ActorCritic, ActorCritic],
"actor_critic_changed": [Model_ActorCritic, ActorCriticChanged],
"forward_predict": [Model_ActorCritic, ForwardPredict]
}
|
the-stack_106_14442
|
PRODUCT_CATEGORIES = (
(1, "Barang"),
(2, "Pulsa"),
)
GENDER_CHOICES = (
(1, 'Laki-laki'),
(2, 'Perempuan'),
(3, 'Rahasia'),
)
INVESTOR_TYPE_CHOICES = (
(1, 'Aktif'),
(2, 'Tidak Aktif'),
)
DISCOUNT_TYPES = (
(1, 'Global'),
(2, 'Periodik'),
(3, 'Khusus Member'),
)
SALE_STATUSES = (
(1, 'Diproses'),
(2, 'Dibatalkan'),
(3, 'Ditahan'),
(4, 'Selesai'),
)
INVENTORY_METHOD = (
(1, 'FIFO'),
(2, 'LIFO'),
)
|
the-stack_106_14443
|
from table import clean_cell
def parse(data, no_budge_sheets=False):
employees = {}
for i, row in data.iterrows():
matricula = row['MATRÍCULA']
if type(matricula) != str:
matricula = str(matricula)
if matricula == 'TOTAL GERAL':
break
nome = row['NOME']
cargo_efetivo = row['CARGO']
lotacao = row['LOTAÇÃO']
remuneracao_cargo_efetivo = clean_cell(
row['REMUNERAÇÃO_DO_CARGO_EFETIVO'])
outras_verbas_remuneratorias = clean_cell(
row['OUTRAS_VERBAS_REMUNERATÓRIAS_LEGAIS_OU_JUDICIAIS'])
# Função de Confiança ou Cargo em Comissão
confianca_comissao = clean_cell(
row['FUNÇÃO_DE_CONFIANÇA_OU_CARGO_EM_COMISSÃO'])
# Gratificação Natalina
grat_natalina = abs(clean_cell(row['GRATIFICAÇÃO_NATALINA']))
ferias = clean_cell(row['FÉRIAS(1/3_CONSTITUCIONAL)'])
# Abono de Permanência
permanencia = clean_cell(row['ABONO_PERMANÊNCIA'])
# Remunerações tempórarias
outras_remuneracoes_temporarias = clean_cell(
row['OUTRAS_REMUNERAÇÕES_TEMPORÁRIAS'])
# Indenizações
total_indenizacao = clean_cell(row['VERBAS_INDENIZATÓRIAS'])
# Contribuição Previdenciária
previdencia = abs(clean_cell(row['CONTRIBUIÇÃO_PREVIDENCIÁRIA']))
# Imposto de Renda
imp_renda = abs(clean_cell(row['IMPOSTO_DE_RENDA']))
# Retenção por Teto Constitucional
teto_constitucional = abs(clean_cell(row['RETENÇÃO_TETO']))
total_desconto = previdencia + teto_constitucional + imp_renda
total_gratificacoes = (
grat_natalina
+ permanencia
+ confianca_comissao
+ ferias
)
total_bruto = remuneracao_cargo_efetivo + \
outras_verbas_remuneratorias + outras_remuneracoes_temporarias \
+ total_indenizacao + total_gratificacoes
campos = {
'matricula': matricula,
'nome': nome,
'cargo_efetivo':cargo_efetivo,
'lotacao': lotacao,
'remuneracao_cargo_efetivo': remuneracao_cargo_efetivo,
'outras_verbas_remuneratorias': outras_verbas_remuneratorias,
'confianca_comissao': confianca_comissao,
'grat_natalina': grat_natalina,
'ferias': ferias,
'permanencia': permanencia,
'outras_remuneracoes_temporarias': outras_remuneracoes_temporarias,
'total_indenizacao': total_indenizacao,
'previdencia': previdencia,
'imp_renda': imp_renda,
'teto_constitucional': teto_constitucional,
'total_desconto': total_desconto,
'total_gratificacoes': total_gratificacoes,
'total_bruto': total_bruto
}
employees[matricula] = table(campos)
if no_budge_sheets:
employees[matricula]['income']['other'].update(
{
"eventual_benefits": outras_remuneracoes_temporarias,
}
)
employees[matricula]['income']['other'].update(
{
'total': employees[matricula]['income']['other']['total'] + outras_remuneracoes_temporarias
}
)
return employees
def table(campos):
employees = {
"reg": campos['matricula'],
"name": campos['nome'],
"role": campos['cargo_efetivo'],
"type": "membro",
"workplace": campos['lotacao'],
"active": True,
"income": {
"total": round(campos['total_bruto'], 2),
# REMUNERAÇÃO BÁSICA = campos['Remuneração Cargo Efetivo'] + campos['Outras Verbas Remuneratórias'],
# legais ou judiciais
"wage": round(
campos['remuneracao_cargo_efetivo'] +
campos['outras_verbas_remuneratorias'], 2
),
"perks": {
"total": round(campos['total_indenizacao'],2),
},
"other": { # Gratificações
"total": campos['total_gratificacoes'],
"trust_position": campos['confianca_comissao'],
"others_total": round(
campos['grat_natalina'] +
campos['ferias'] + campos['permanencia'], 2
),
"others": {
"Férias 1/3 constitucionais": campos['ferias'],
"Gratificação Natalina": campos['grat_natalina'],
"Abono de Permanência": campos['permanencia'],
},
},
},
"discounts": {
# Discounts Object. Using abs to garantee numbers are positive
# (spreadsheet have negative discounts).
"total": round(campos['total_desconto'], 2),
"prev_contribution": campos['previdencia'],
# Retenção por teto constitucional
"ceil_retention": campos['teto_constitucional'],
"income_tax": campos['imp_renda'],
},
}
return employees
|
the-stack_106_14444
|
from envs.deep_cure_env import DeepCure, random_base_infect_rate, random_lifetime, ForeignCountry
from plotting import plot
import gym
import numpy as np
import pandas as pd
import itertools
import matplotlib.pyplot as plt
def relu(x):
x_and_zeros = np.array([x, np.zeros(x.shape)])
return np.max(x_and_zeros, axis=0)
def sigmoid(x):
return 1/(1+np.exp(-x))
class NeuralNetworkPolicy:
def __init__(self, env, h_size=16, one_layer=False): # h_size = number of neurons on the hidden layer
if one_layer:
self.activation_functions = (sigmoid,)
weights = (np.zeros([env.observation_space.shape[0] + 1, env.action_space.shape[0]]),)
else:
self.activation_functions = (relu, sigmoid)
# Make a neural network with 1 hidden layer of `h_size` units
weights = (np.zeros([env.observation_space.shape[0] + 1, h_size]),
np.zeros([h_size + 1, env.action_space.shape[0]]))
self.shape_list = weights_shape(weights)
self.num_params = len(flatten_weights(weights))
def __call__(self, state, theta):
weights = unflatten_weights(theta, self.shape_list)
return feed_forward(inputs=state,
weights=weights,
activation_functions=self.activation_functions)
def feed_forward(inputs, weights, activation_functions, verbose=False):
x = inputs.copy()
for layer_weights, layer_activation_fn in zip(weights, activation_functions):
y = np.dot(x, layer_weights[1:])
y += layer_weights[0]
layer_output = layer_activation_fn(y)
x = layer_output
return layer_output
def weights_shape(weights):
return [weights_array.shape for weights_array in weights]
def flatten_weights(weights):
"""Convert weight parameters to a 1 dimension array (more convenient for optimization algorithms)"""
nested_list = [weights_2d_array.flatten().tolist() for weights_2d_array in weights]
flat_list = list(itertools.chain(*nested_list))
return flat_list
def unflatten_weights(flat_list, shape_list):
"""The reverse function of `flatten_weights`"""
length_list = [shape[0] * shape[1] for shape in shape_list]
nested_list = []
start_index = 0
for length, shape in zip(length_list, shape_list):
nested_list.append(np.array(flat_list[start_index:start_index+length]).reshape(shape))
start_index += length
return nested_list
class ObjectiveFunction:
def __init__(self, env, policy, num_episodes=1, max_time_steps=float('inf'), minimization_solver=True):
self.ndim = policy.num_params # Number of dimensions of the parameter (weights) space
self.env = env
self.policy = policy
self.num_episodes = num_episodes
self.max_time_steps = max_time_steps
self.minimization_solver = minimization_solver
self.num_evals = 0
def eval(self, policy_params, num_episodes=None):
"""Evaluate a policy"""
self.num_evals += 1
if num_episodes is None:
num_episodes = self.num_episodes
average_total_rewards = 0
for i_episode in range(num_episodes):
total_rewards = 0.
state = self.env.reset()
done = False
while not done:
action = self.policy(state, policy_params)
action = action >= 0.5
state, reward, done, info = self.env.step(action)
total_rewards += reward
average_total_rewards += float(total_rewards) / num_episodes
if self.minimization_solver:
average_total_rewards *= -1.
return average_total_rewards # Optimizers do minimization by default...
def __call__(self, policy_params, num_episodes=None):
return self.eval(policy_params, num_episodes)
def saes(objective_function,
x_array,
sigma_array,
max_iterations=500,
tau=None,
hist_dict=None):
"""
x_array : shape (n,)
sigma_array: shape (n,)
"""
if tau is None:
# Self-adaptation learning rate
tau = 1./(2.* len(x_array))
fx = objective_function(x_array)
for i in range(max_iterations):
sigma_array_ = sigma_array * np.exp(tau*np.random.normal(0,1,size=sigma_array.shape))
x_array_ = x_array + sigma_array_ * np.random.normal(0,1,size=x_array.shape)
fx_ = objective_function(x_array_)
if fx_ < fx:
fx = fx_
x_array = x_array_
sigma_array = sigma_array_
if hist_dict is not None:
hist_dict[i] = [fx] + x_array.tolist() + sigma_array.tolist()
return x_array
if __name__ == "__main__":
SEED = 42
np.random.seed(SEED)
env = DeepCure(foreign_countries = [ForeignCountry(0.1,100,100_000, save_history=True)], save_history=True, seed=SEED)
env.reset()
nn_policy = NeuralNetworkPolicy(env, one_layer=True)
objective_function = ObjectiveFunction(env=env, policy=nn_policy, num_episodes=25)
hist_dict = {}
initial_solution_array = np.random.random(nn_policy.num_params)
initial_sigma_array = np.ones(nn_policy.num_params) * 1.
theta = saes(objective_function=objective_function,
x_array=initial_solution_array,
sigma_array=initial_sigma_array,
max_iterations=1000,
hist_dict=hist_dict)
np.save('saes-theta.npy', theta)
print(theta)
np.random.seed(SEED)
nn_policy2 = NeuralNetworkPolicy(env, h_size=10, one_layer=False)
objective_function2 = ObjectiveFunction(env=env, policy=nn_policy2, num_episodes=25)
hist_dict2 = {}
initial_solution_array = np.random.random(nn_policy2.num_params)
initial_sigma_array = np.ones(nn_policy2.num_params) * 1.
theta2 = saes(objective_function=objective_function2,
x_array=initial_solution_array,
sigma_array=initial_sigma_array,
max_iterations=1000,
hist_dict=hist_dict2)
np.save('saes-theta2.npy', theta2)
print(theta2)
rewards = pd.DataFrame.from_dict(hist_dict, orient='index').iloc[:,0].to_numpy()
rewards2 = pd.DataFrame.from_dict(hist_dict2, orient='index').iloc[:,0].to_numpy()
plt.figure()
plt.plot(range(len(rewards)), rewards, label='1 layer')
plt.plot(range(len(rewards2)), rewards2, label='2 layer')
plt.xlabel("Training Steps")
plt.ylabel("Reward")
plt.legend()
plt.show()
|
the-stack_106_14446
|
import coins
import cans
import user_interface
class SodaMachine:
def __init__(self):
self.register = []
self.fill_register()
self.inventory = []
self.fill_inventory()
def fill_register(self):
"""Method will fill SodaMachine's register with certain amounts of each coin when called."""
for index in range(10):
self.register.append(coins.Quarter())
for index in range(10):
self.register.append(coins.Dime())
for index in range(10):
self.register.append(coins.Nickel())
for index in range(10):
self.register.append(coins.Penny())
def fill_inventory(self):
"""Method will fill SodaMachine's cans list with certain amounts of each can when called."""
for index in range(4):
self.inventory.append(cans.Cola())
for index in range(4):
self.inventory.append(cans.OrangeSoda())
for index in range(4):
self.inventory.append(cans.RootBeer())
return self.inventory
def begin_transaction(self, customer):
"""Method is complete. Initiates purchase if user decides to proceed. No errors."""
will_proceed = user_interface.display_welcome()
if will_proceed == True:
self.run_transaction(customer)
def run_transaction(self, customer):
selected_soda_name = user_interface.soda_selection(self.inventory)
selected_soda = self.get_inventory_soda(selected_soda_name)
customer_payment = customer.gather_coins_from_wallet(selected_soda)
self.calculate_transaction(customer_payment, selected_soda, customer)
user_interface.output_text("Transaction complete")
def calculate_transaction(self, customer_payment, selected_soda, customer):
total_payment_value = self.calculate_coin_value(customer_payment)
if total_payment_value > selected_soda.price:
change_value = self.determine_change_value(total_payment_value, selected_soda.price)
customer_change = self.gather_change_from_register(change_value)
if customer_change is None:
user_interface.output_text(f'Dispensing ${change_value} back to customer')
customer.add_coins_to_wallet(customer_change)
self.return_inventory(selected_soda)
else:
self.deposit_coins_into_register(customer_payment)
customer.add_coins_to_wallet(customer_change)
customer.add_can_to_backpack(selected_soda)
user_interface.end_message(selected_soda, change_value)
elif total_payment_value == selected_soda.price:
self.deposit_coins_into_register(customer_payment)
customer.add_can_to_backpack(selected_soda)
user_interface.end_message(selected_soda, 0)
else:
user_interface.output_text("You do not have enough money to purchase this item, returning payment")
customer.add_coins_to_wallet(customer_payment)
self.return_inventory(selected_soda)
def gather_change_from_register(self, change_value):
change_list = []
while change_value > 0:
if change_value >= 0.25 and self.register_has_coin("Quarter"):
change_list.append(self.get_coin_from_register("Quarter"))
change_value -= 0.25
elif change_value >= 0.10 and self.register_has_coin("Dime"):
change_list.append(self.get_coin_from_register("Dime"))
change_value -= 0.10
elif change_value >= 0.05 and self.register_has_coin("Nickel"):
change_list.append(self.get_coin_from_register("Nickel"))
change_value -= 0.05
elif change_value >= 0.01 and self.register_has_coin("Penny"):
change_list.append(self.get_coin_from_register("Penny"))
change_value -= 0.01
elif change_value == 0:
break
else:
user_interface.output_text("Error: Machine does not have enough change to complete transaction")
self.deposit_coins_into_register(change_list)
change_list = None
break
change_value = round(change_value, 2)
return change_list
def get_coin_from_register(self, coin_name):
"""Removes and returns a coin from register"""
for coin in self.register:
if coin.name == coin_name:
self.register.remove(coin)
return coin
return None
def register_has_coin(self, coin_name):
"""Searches register for a type of coin, returns True if coin is found"""
for coin in self.register:
if coin.name == coin_name:
return True
return False
def determine_change_value(self, total_payment, selected_soda_price):
"""Determines amount of change needed by finding difference of payment amount and can price"""
return round(total_payment - selected_soda_price, 2)
def calculate_coin_value(self, total_value):
"""Takes in a list of coins, returns the monetary value of list."""
new_total_value = 0
for coin in total_value:
new_total_value += 1 * coin.value
return round(new_total_value, 2)
def get_inventory_soda(self, selected_soda_name):
"""Returns the first instance of a can whose name matches the selected_soda_name parameter"""
for can in self.inventory:
if can.name == selected_soda_name:
self.inventory.remove(can)
return can
return None
def return_inventory(self, chosen_soda):
"""Re-adds a remove can back to inventory upon unsuccessful purchase attempt"""
self.inventory.append(chosen_soda)
def deposit_coins_into_register(self, coin_list):
"""Takes in list of coins as argument, adds each coin from list to the register"""
for coin in coin_list:
self.register.append(coin_list)
|
the-stack_106_14447
|
#!/usr/bin/python3
import time
import PyDragonfly
from PyDragonfly import copy_from_msg, copy_to_msg, MT_EXIT, MT_KILL
import message_defs as mdefs
import sys
MID_REQUEST = 11
# Note: Request must be started second
if __name__ == "__main__":
mod = PyDragonfly.Dragonfly_Module(MID_REQUEST, 0)
mod.ConnectToMMM()
mod.Subscribe(mdefs.MT_TEST_DATA)
mod.Subscribe(MT_EXIT)
print("Request running...\n")
while ( 1):
mod.SendSignal(mdefs.MT_REQUEST_TEST_DATA)
print("Sent request for data")
msg = PyDragonfly.CMessage()
mod.ReadMessage(msg)
print("Received message", msg.GetHeader().msg_type)
if msg.GetHeader().msg_type == mdefs.MT_TEST_DATA:
msg_data = mdefs.MDF_TEST_DATA()
copy_from_msg(msg_data, msg)
print("Data = [a: %d, b: %d, x: %f]" % (msg_data.a, msg_data.b, msg_data.x))
time.sleep(1)
mod.DisconnectFromMMM()
|
the-stack_106_14448
|
import cv2
import numpy as numpy
from matplotlib import pyplot as plt
img=cv2.imread('JK.jpg',0)
edges=cv2.Canny(img,100,200)
plt.subplot(121),plt.imshow(img,cmap='gray')
plt.title('JehanKandy'),plt.xticks([]),plt.yticks([])
plt.subplot(122),plt.imshow(edges, cmap= 'gray')
plt.title('OutPut'),plt.xticks([]),plt.yticks([])
plt.show()
|
the-stack_106_14450
|
'''
Copyright 2020 [email protected]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
def bubble_sort(array):
""" Read more about bubble sort here https://www.geeksforgeeks.org/bubble-sort/
>>> bubble_sort([3,2,1])
[1, 2, 3]
"""
length = len(array) # length of array
for i in range(length - 1):
for j in range(length - i - 1):
if array[j] > array[j + 1]:
array[j], array[j + 1] = array[j + 1], array[j] # swap two numbers
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
array = [100, 67, 3, 56, 23, 86, 13, 64, 890]
sorted_array = bubble_sort(array)
print(sorted_array)
|
the-stack_106_14453
|
from .base import (
AFFECTED_TASK_OCCURRENCES_CHOICES,
ALL_OCCURRENCES,
ALL_PROPERTIES,
ALWAYS_OVERWRITE,
AUTO_RESOLVE,
CONFLICT_RESOLUTION_CHOICES,
DEFAULT,
DELETE_TYPE_CHOICES,
HARD_DELETE,
ID_ONLY,
MESSAGE_DISPOSITION_CHOICES,
MOVE_TO_DELETED_ITEMS,
NEVER_OVERWRITE,
SAVE_ONLY,
SEND_AND_SAVE_COPY,
SEND_MEETING_CANCELLATIONS_CHOICES,
SEND_MEETING_INVITATIONS_AND_CANCELLATIONS_CHOICES,
SEND_MEETING_INVITATIONS_CHOICES,
SEND_ONLY,
SEND_ONLY_TO_ALL,
SEND_ONLY_TO_CHANGED,
SEND_TO_ALL_AND_SAVE_COPY,
SEND_TO_CHANGED_AND_SAVE_COPY,
SEND_TO_NONE,
SHAPE_CHOICES,
SOFT_DELETE,
SPECIFIED_OCCURRENCE_ONLY,
BulkCreateResult,
RegisterMixIn,
)
from .calendar_item import (
CONFERENCE_TYPES,
AcceptItem,
CalendarItem,
CancelCalendarItem,
DeclineItem,
MeetingCancellation,
MeetingMessage,
MeetingRequest,
MeetingResponse,
TentativelyAcceptItem,
)
from .contact import Contact, DistributionList, Persona
from .item import BaseItem, Item
from .message import ForwardItem, Message, ReplyAllToItem, ReplyToItem
from .post import PostItem, PostReplyItem
from .task import Task
# Traversal enums
SHALLOW = "Shallow"
SOFT_DELETED = "SoftDeleted"
ASSOCIATED = "Associated"
ITEM_TRAVERSAL_CHOICES = (SHALLOW, SOFT_DELETED, ASSOCIATED)
# Contacts search (ResolveNames) scope enums
ACTIVE_DIRECTORY = "ActiveDirectory"
ACTIVE_DIRECTORY_CONTACTS = "ActiveDirectoryContacts"
CONTACTS = "Contacts"
CONTACTS_ACTIVE_DIRECTORY = "ContactsActiveDirectory"
SEARCH_SCOPE_CHOICES = (ACTIVE_DIRECTORY, ACTIVE_DIRECTORY_CONTACTS, CONTACTS, CONTACTS_ACTIVE_DIRECTORY)
ITEM_CLASSES = (
CalendarItem,
Contact,
DistributionList,
Item,
Message,
MeetingMessage,
MeetingRequest,
MeetingResponse,
MeetingCancellation,
PostItem,
Task,
)
__all__ = [
"RegisterMixIn",
"MESSAGE_DISPOSITION_CHOICES",
"SAVE_ONLY",
"SEND_ONLY",
"SEND_AND_SAVE_COPY",
"CalendarItem",
"AcceptItem",
"TentativelyAcceptItem",
"DeclineItem",
"CancelCalendarItem",
"MeetingRequest",
"MeetingResponse",
"MeetingCancellation",
"CONFERENCE_TYPES",
"Contact",
"Persona",
"DistributionList",
"SEND_MEETING_INVITATIONS_CHOICES",
"SEND_TO_NONE",
"SEND_ONLY_TO_ALL",
"SEND_TO_ALL_AND_SAVE_COPY",
"SEND_MEETING_INVITATIONS_AND_CANCELLATIONS_CHOICES",
"SEND_ONLY_TO_CHANGED",
"SEND_TO_CHANGED_AND_SAVE_COPY",
"SEND_MEETING_CANCELLATIONS_CHOICES",
"AFFECTED_TASK_OCCURRENCES_CHOICES",
"ALL_OCCURRENCES",
"SPECIFIED_OCCURRENCE_ONLY",
"CONFLICT_RESOLUTION_CHOICES",
"NEVER_OVERWRITE",
"AUTO_RESOLVE",
"ALWAYS_OVERWRITE",
"DELETE_TYPE_CHOICES",
"HARD_DELETE",
"SOFT_DELETE",
"MOVE_TO_DELETED_ITEMS",
"BaseItem",
"Item",
"BulkCreateResult",
"Message",
"ReplyToItem",
"ReplyAllToItem",
"ForwardItem",
"PostItem",
"PostReplyItem",
"Task",
"ITEM_TRAVERSAL_CHOICES",
"SHALLOW",
"SOFT_DELETED",
"ASSOCIATED",
"SHAPE_CHOICES",
"ID_ONLY",
"DEFAULT",
"ALL_PROPERTIES",
"SEARCH_SCOPE_CHOICES",
"ACTIVE_DIRECTORY",
"ACTIVE_DIRECTORY_CONTACTS",
"CONTACTS",
"CONTACTS_ACTIVE_DIRECTORY",
"ITEM_CLASSES",
]
|
the-stack_106_14457
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_feature_api_base1430
except ImportError:
bt_feature_api_base1430 = sys.modules[
"onshape_client.oas.models.bt_feature_api_base1430"
]
try:
from onshape_client.oas.models import bt_feature_definition_call1406_all_of
except ImportError:
bt_feature_definition_call1406_all_of = sys.modules[
"onshape_client.oas.models.bt_feature_definition_call1406_all_of"
]
try:
from onshape_client.oas.models import btm_feature134
except ImportError:
btm_feature134 = sys.modules["onshape_client.oas.models.btm_feature134"]
class BTFeatureDefinitionCall1406(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"feature": (btm_feature134.BTMFeature134,), # noqa: E501
"library_version": (int,), # noqa: E501
"microversion_skew": (bool,), # noqa: E501
"reject_microversion_skew": (bool,), # noqa: E501
"serialization_version": (str,), # noqa: E501
"source_microversion": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"feature": "feature", # noqa: E501
"library_version": "libraryVersion", # noqa: E501
"microversion_skew": "microversionSkew", # noqa: E501
"reject_microversion_skew": "rejectMicroversionSkew", # noqa: E501
"serialization_version": "serializationVersion", # noqa: E501
"source_microversion": "sourceMicroversion", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_feature_definition_call1406.BTFeatureDefinitionCall1406 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
feature (btm_feature134.BTMFeature134): [optional] # noqa: E501
library_version (int): [optional] # noqa: E501
microversion_skew (bool): [optional] # noqa: E501
reject_microversion_skew (bool): [optional] # noqa: E501
serialization_version (str): [optional] # noqa: E501
source_microversion (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_feature_api_base1430.BTFeatureApiBase1430,
bt_feature_definition_call1406_all_of.BTFeatureDefinitionCall1406AllOf,
],
"oneOf": [],
}
|
the-stack_106_14459
|
# coding: utf-8
import pprint
import re
import six
class RemoveProjectRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'project_id': 'str'
}
attribute_map = {
'project_id': 'project_id'
}
def __init__(self, project_id=None):
"""RemoveProjectRequest - a model defined in huaweicloud sdk"""
self._project_id = None
self.discriminator = None
self.project_id = project_id
@property
def project_id(self):
"""Gets the project_id of this RemoveProjectRequest.
项目id
:return: The project_id of this RemoveProjectRequest.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this RemoveProjectRequest.
项目id
:param project_id: The project_id of this RemoveProjectRequest.
:type: str
"""
self._project_id = project_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RemoveProjectRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_14461
|
"""
"""
from sqlalchemy.exc import OperationalError
from server.utils import *
from server.models.payment_method_property import PaymentMethodProperty
from server.data import DataBase
from server.data.helper import ConnectionHelper
from server.db_factory import db
from server.exceptions import *
class PaymentMethodPropertyData(DataBase):
"""
Payment method property data class for accssing database
"""
def __init__(self):
pass
def get_list(self, method_code):
_rows = None
try:
_rows = db.session.query(PaymentMethodProperty).\
filter(PaymentMethodProperty.method_code == method_code).all()
except OperationalError as ex:
raise InternalServerError(ex)
rows = [row.to_dict() for row in _rows]
return rows
def get(self, method_code, property_type):
try:
row = db.session.query(PaymentMethodProperty).\
filter(PaymentMethodProperty.method_code == method_code).\
filter(PaymentMethodProperty.property_type == property_type).\
one_or_none()
if not row:
raise ResourceNotFoundException
except OperationalError as ex:
raise InternalServerError(ex)
return row.to_dict()
def set(self, method_code, property_type, property_value=None):
property = PaymentMethodProperty(
method_code,
property_type,
property_value)
try:
db.session.add(property)
db.session.commit()
except OperationalError as ex:
raise InternalServerError(ex)
except:
db.session.rollback()
return False
return True
def delete(self, method_code, property_type):
try:
row = db.session.query(PaymentMethodProperty).\
filter(PaymentMethodProperty.method_code == method_code).\
filter(PaymentMethodProperty.property_type == property_type).\
delete()
db.session.commit()
except OperationalError as ex:
raise InternalServerError(ex)
except:
db.session.rollback()
return False
return True
|
the-stack_106_14462
|
from rest_framework.response import Response
from misago.threads.serializers import PostLikeSerializer
def likes_list_endpoint(request, post):
queryset = post.postlike_set.select_related('liker').values(
'id', 'liker_id', 'liker_name', 'liker_slug', 'liked_on', 'liker__avatars')
likes = []
for like in queryset.iterator():
likes.append(PostLikeSerializer(like).data)
return Response(likes)
|
the-stack_106_14466
|
#!/usr/bin/env python3
from pbzlib import write_pbz, open_pbz
from tests import messages_pb2
def main():
objs = [messages_pb2.Header(version=1)]
for i in range(10):
objs.append(messages_pb2.Object(id=i))
# Method 1: Write messages incrementally
with write_pbz("output.pbz", "tests/messages.descr") as w:
for obj in objs:
w.write(obj)
# Method 2: Write all messages at once
write_pbz("output.pbz", "tests/messages.descr", *objs)
for msg in open_pbz("output.pbz"):
print(msg)
if __name__ == "__main__":
main()
|
the-stack_106_14470
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 16 10:53:53 2018
@author: tjbanks
"""
from cx_Freeze import setup, Executable
import matplotlib
from matplotlib.backends import backend_qt5agg
import os
os.environ['TCL_LIBRARY'] = r'C:\Users\Tyler\Anaconda3\tcl\tcl8.6'
os.environ['TK_LIBRARY'] = r'C:\Users\Tyler\Anaconda3\tcl\tk8.6'
base = "Win32GUI"
#path_platforms = ( "C:\\Users\\Tyler\\Anaconda3\\pkgs\\qt-5.9.5-vc14he4a7d60_0\\Library\\plugins\\platforms\\qwindows.dll", "platforms\qwindows.dll" )
executables = [Executable("sim_builder.py", base=base)]
packages = ["idna","time","re","subprocess","threading","tempfile","shutil","os","random","numpy","pandas","paramiko","getpass","zipfile","tkinter","tarfile","matplotlib.backends.backend_qt5agg"]
includes = ["atexit","PyQt5.QtCore","PyQt5.QtGui", "PyQt5.QtWidgets", "numpy", "numpy.core._methods"]
#includefiles = [path_platforms]
options = {
'build_exe': {
'includes': includes,
# 'include_files': includefiles,
'packages':packages,
"include_files":[(matplotlib.get_data_path(), "mpl-data")]
},
}
setup(
name = "Sim Builder",
options = options,
version = "1",
description = 'Sim Builder - University of Missouri - Nair Lab (Tyler Banks)',
executables = executables
)
|
the-stack_106_14471
|
import asyncio
from datetime import datetime
import time
from firebot import ALIVE_NAME, CMD_HELP
from firebot.utils import admin_cmd, sudo_cmd, edit_or_reply
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "Skynoid"
@borg.on(admin_cmd(pattern="ping$"))
@borg.on(sudo_cmd(pattern="ping$", allow_sudo=True))
async def _(event):
text = f"""
┏━━━┓━━━━━━━━━┓
┃┏━┓┃━━━━━━━━━┃
┃┗━┛┃━━┓━┓━━━┓┃
┃┏━━┛┏┓┃┏┓┓┏┓┃┛
┃┃━━━┗┛┃┃┃┃┗┛┃┓
┗┛━━━━━┛┛┗┛━┓┃┛
━━━━━━━━━━━━┛┃━
━━━━━━━━━━━━━┛━
__FIRE-X__ is **ON!**ツ
•My Master→ {DEFAULTUSER}
↓||•Ms•||↓
"""
st = time.time()
await event.edit(text)
et = time.time()
text += f"\n`{round((et - st), 3)} ms`"
await event.edit(text)
CMD_HELP.update(
{
"ping": "**Ping**\
\n\n**Syntax : **`.ping`\
\n**Usage :** Get speed of your bot."
}
)
|
the-stack_106_14472
|
import os
for filename in ['train.txt', 'valid.txt']:
with open('train.txt','r') as f:
for line in f:
words = line.split(' ')
index = words[1].split('.')[0]
if (int(words[2]) > int(words[4])) or (int(words[3]) > int(words[5])):
print(line,words[2],words[4],words[3],words[5])
print(words[2] > words[4],words[3] > words[5])
|
the-stack_106_14473
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import netaddr
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from ryu.lib import hub
LOG = logging.getLogger('ryu.lib.ofctl_v1_3')
DEFAULT_TIMEOUT = 1.0
def str_to_int(src):
if isinstance(src, str):
if src.startswith("0x") or src.startswith("0X"):
dst = int(src, 16)
else:
dst = int(src)
else:
dst = src
return dst
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
if action_type == 'OUTPUT':
out_port = int(dic.get('port', ofp.OFPP_ANY))
max_len = int(dic.get('max_len', ofp.OFPCML_MAX))
result = parser.OFPActionOutput(out_port, max_len)
elif action_type == 'COPY_TTL_OUT':
result = parser.OFPActionCopyTtlOut()
elif action_type == 'COPY_TTL_IN':
result = parser.OFPActionCopyTtlIn()
elif action_type == 'SET_MPLS_TTL':
mpls_ttl = int(dic.get('mpls_ttl'))
result = parser.OFPActionSetMplsTtl(mpls_ttl)
elif action_type == 'DEC_MPLS_TTL':
result = parser.OFPActionDecMplsTtl()
elif action_type == 'PUSH_VLAN':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushVlan(ethertype)
elif action_type == 'POP_VLAN':
result = parser.OFPActionPopVlan()
elif action_type == 'PUSH_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushMpls(ethertype)
elif action_type == 'POP_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPopMpls(ethertype)
elif action_type == 'SET_QUEUE':
queue_id = int(dic.get('queue_id'))
result = parser.OFPActionSetQueue(queue_id)
elif action_type == 'GROUP':
group_id = int(dic.get('group_id'))
result = parser.OFPActionGroup(group_id)
elif action_type == 'SET_NW_TTL':
nw_ttl = int(dic.get('nw_ttl'))
result = parser.OFPActionSetNwTtl(nw_ttl)
elif action_type == 'DEC_NW_TTL':
result = parser.OFPActionDecNwTtl()
elif action_type == 'SET_FIELD':
field = dic.get('field')
value = dic.get('value')
result = parser.OFPActionSetField(**{field: value})
elif action_type == 'PUSH_PBB':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushPbb(ethertype)
elif action_type == 'POP_PBB':
result = parser.OFPActionPopPbb()
else:
result = None
return result
def to_actions(dp, acts):
inst = []
actions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for a in acts:
action = to_action(dp, a)
if action is not None:
actions.append(action)
else:
action_type = a.get('type')
if action_type == 'GOTO_TABLE':
table_id = int(a.get('table_id'))
inst.append(parser.OFPInstructionGotoTable(table_id))
elif action_type == 'WRITE_METADATA':
metadata = str_to_int(a.get('metadata'))
metadata_mask = (str_to_int(a['metadata_mask'])
if 'metadata_mask' in a
else parser.UINT64_MAX)
inst.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
elif action_type == 'METER':
meter_id = int(a.get('meter_id'))
inst.append(parser.OFPInstructionMeter(meter_id))
else:
LOG.error('Unknown action type: %s', action_type)
inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
return inst
def action_to_str(act):
action_type = act.cls_action_type
if action_type == ofproto_v1_3.OFPAT_OUTPUT:
buf = 'OUTPUT:' + str(act.port)
elif action_type == ofproto_v1_3.OFPAT_COPY_TTL_OUT:
buf = 'COPY_TTL_OUT'
elif action_type == ofproto_v1_3.OFPAT_COPY_TTL_IN:
buf = 'COPY_TTL_IN'
elif action_type == ofproto_v1_3.OFPAT_SET_MPLS_TTL:
buf = 'SET_MPLS_TTL:' + str(act.mpls_ttl)
elif action_type == ofproto_v1_3.OFPAT_DEC_MPLS_TTL:
buf = 'DEC_MPLS_TTL'
elif action_type == ofproto_v1_3.OFPAT_PUSH_VLAN:
buf = 'PUSH_VLAN:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_VLAN:
buf = 'POP_VLAN'
elif action_type == ofproto_v1_3.OFPAT_PUSH_MPLS:
buf = 'PUSH_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_MPLS:
buf = 'POP_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_SET_QUEUE:
buf = 'SET_QUEUE:' + str(act.queue_id)
elif action_type == ofproto_v1_3.OFPAT_GROUP:
buf = 'GROUP:' + str(act.group_id)
elif action_type == ofproto_v1_3.OFPAT_SET_NW_TTL:
buf = 'SET_NW_TTL:' + str(act.nw_ttl)
elif action_type == ofproto_v1_3.OFPAT_DEC_NW_TTL:
buf = 'DEC_NW_TTL'
elif action_type == ofproto_v1_3.OFPAT_SET_FIELD:
buf = 'SET_FIELD: {%s:%s}' % (act.key, act.value)
elif action_type == ofproto_v1_3.OFPAT_PUSH_PBB:
buf = 'PUSH_PBB:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_PBB:
buf = 'POP_PBB'
else:
buf = 'UNKNOWN'
return buf
def actions_to_str(instructions):
actions = []
for instruction in instructions:
if isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionActions):
for a in instruction.actions:
actions.append(action_to_str(a))
elif isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionGotoTable):
buf = 'GOTO_TABLE:' + str(instruction.table_id)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionWriteMetadata):
buf = ('WRITE_METADATA:0x%x/0x%x' % (instruction.metadata,
instruction.metadata_mask)
if instruction.metadata_mask
else 'WRITE_METADATA:0x%x' % instruction.metadata)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionMeter):
buf = 'METER:' + str(instruction.meter_id)
actions.append(buf)
else:
continue
return actions
def to_match(dp, attrs):
convert = {'in_port': int,
'in_phy_port': int,
'metadata': to_match_masked_int,
'dl_dst': to_match_eth,
'dl_src': to_match_eth,
'eth_dst': to_match_eth,
'eth_src': to_match_eth,
'dl_type': int,
'eth_type': int,
'dl_vlan': to_match_vid,
'vlan_vid': to_match_vid,
'vlan_pcp': int,
'ip_dscp': int,
'ip_ecn': int,
'nw_proto': int,
'ip_proto': int,
'nw_src': to_match_ip,
'nw_dst': to_match_ip,
'ipv4_src': to_match_ip,
'ipv4_dst': to_match_ip,
'tp_src': int,
'tp_dst': int,
'tcp_src': int,
'tcp_dst': int,
'udp_src': int,
'udp_dst': int,
'sctp_src': int,
'sctp_dst': int,
'icmpv4_type': int,
'icmpv4_code': int,
'arp_op': int,
'arp_spa': to_match_ip,
'arp_tpa': to_match_ip,
'arp_sha': to_match_eth,
'arp_tha': to_match_eth,
'ipv6_src': to_match_ip,
'ipv6_dst': to_match_ip,
'ipv6_flabel': int,
'icmpv6_type': int,
'icmpv6_code': int,
'ipv6_nd_target': to_match_ip,
'ipv6_nd_sll': to_match_eth,
'ipv6_nd_tll': to_match_eth,
'mpls_label': int,
'mpls_tc': int,
'mpls_bos': int,
'pbb_isid': int,
'tunnel_id': int,
'ipv6_exthdr': to_match_masked_int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('dl_type') == ether.ETH_TYPE_ARP or \
attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'nw_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['nw_src']
del attrs['nw_src']
if 'nw_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['nw_dst']
del attrs['nw_dst']
kwargs = {}
for key, value in attrs.items():
if key in convert:
value = convert[key](value)
if key in keys:
# For old field name
key = keys[key]
if key == 'tp_src' or key == 'tp_dst':
# TCP/UDP port
conv = {inet.IPPROTO_TCP: {'tp_src': 'tcp_src',
'tp_dst': 'tcp_dst'},
inet.IPPROTO_UDP: {'tp_src': 'udp_src',
'tp_dst': 'udp_dst'}}
ip_proto = attrs.get('nw_proto', attrs.get('ip_proto', 0))
key = conv[ip_proto][key]
kwargs[key] = value
else:
# others
kwargs[key] = value
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_eth(value):
if '/' in value:
value = value.split('/')
return value[0], value[1]
else:
return value
def to_match_ip(value):
if '/' in value:
(ip_addr, ip_mask) = value.split('/')
if ip_mask.isdigit():
ip = netaddr.ip.IPNetwork(value)
ip_addr = str(ip.ip)
ip_mask = str(ip.netmask)
return ip_addr, ip_mask
else:
return value
def to_match_vid(value):
# NOTE: If "vlan_id/dl_vlan" field is described as decimal int value
# (and decimal string value), it is treated as values of
# VLAN tag, and OFPVID_PRESENT(0x1000) bit is automatically
# applied. OTOH, If it is described as hexadecimal string,
# treated as values of oxm_value (including OFPVID_PRESENT
# bit), and OFPVID_PRESENT bit is NOT automatically applied.
if isinstance(value, int):
# described as decimal int value
return value | ofproto_v1_3.OFPVID_PRESENT
else:
if '/' in value:
val = value.split('/')
return int(val[0], 0), int(val[1], 0)
else:
if value.isdigit():
# described as decimal string value
return int(value, 10) | ofproto_v1_3.OFPVID_PRESENT
else:
return int(value, 0)
def to_match_masked_int(value):
if isinstance(value, str) and '/' in value:
value = value.split('/')
return str_to_int(value[0]), str_to_int(value[1])
else:
return str_to_int(value)
def match_to_str(ofmatch):
keys = {'eth_src': 'dl_src',
'eth_dst': 'dl_dst',
'eth_type': 'dl_type',
'vlan_vid': 'dl_vlan',
'ipv4_src': 'nw_src',
'ipv4_dst': 'nw_dst',
'ip_proto': 'nw_proto',
'tcp_src': 'tp_src',
'tcp_dst': 'tp_dst',
'udp_src': 'tp_src',
'udp_dst': 'tp_dst'
}
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
if key in keys:
key = keys[key]
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'dl_vlan':
value = match_vid_to_str(value, mask)
elif key == 'metadata' or key == 'ipv6_exthdr':
value = match_masked_int_to_str(value, mask)
else:
if mask is not None:
value = value + '/' + mask
else:
value = value
match.setdefault(key, value)
return match
def match_masked_int_to_str(value, mask):
return '%d/%d' % (value, mask) if mask else '%d' % value
def match_vid_to_str(value, mask):
if mask is not None:
value = '0x%04x/0x%04x' % (value, mask)
else:
if value & ofproto_v1_3.OFPVID_PRESENT:
value = str(value & ~ofproto_v1_3.OFPVID_PRESENT)
else:
value = '0x%04x' % value
return value
def send_stats_request(dp, stats, waiters, msgs):
dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(dp.id, {})
lock = hub.Event()
waiters_per_dp[stats.xid] = (lock, msgs)
dp.send_msg(stats)
lock.wait(timeout=DEFAULT_TIMEOUT)
if not lock.is_set():
del waiters_per_dp[stats.xid]
def get_desc_stats(dp, waiters):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = {}
for msg in msgs:
stats = msg.body
s = {'mfr_desc': stats.mfr_desc,
'hw_desc': stats.hw_desc,
'sw_desc': stats.sw_desc,
'serial_num': stats.serial_num,
'dp_desc': stats.dp_desc}
desc = {str(dp.id): s}
return desc
def get_queue_stats(dp, waiters):
ofp = dp.ofproto
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, ofp.OFPP_ANY,
ofp.OFPQ_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = []
for msg in msgs:
stats = msg.body
for stat in stats:
s.append({'duration_nsec': stat.duration_nsec,
'duration_sec': stat.duration_sec,
'port_no': stat.port_no,
'queue_id': stat.queue_id,
'tx_bytes': stat.tx_bytes,
'tx_errors': stat.tx_errors,
'tx_packets': stat.tx_packets})
desc = {str(dp.id): s}
return desc
def get_flow_stats(dp, waiters, flow={}):
table_id = int(flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = int(flow.get('flags', 0))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
for stats in msg.body:
actions = actions_to_str(stats.instructions)
match = match_to_str(stats.match)
s = {'priority': stats.priority,
'cookie': stats.cookie,
'idle_timeout': stats.idle_timeout,
'hard_timeout': stats.hard_timeout,
'actions': actions,
'match': match,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'packet_count': stats.packet_count,
'table_id': stats.table_id,
'length': stats.length,
'flags': stats.flags}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def get_aggregate_flow_stats(dp, waiters, flow={}):
table_id = int(flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = int(flow.get('flags', 0))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPAggregateStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
stats = msg.body
s = {'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'flow_count': stats.flow_count}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def get_port_stats(dp, waiters):
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, 0, dp.ofproto.OFPP_ANY)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ports = []
for msg in msgs:
for stats in msg.body:
s = {'port_no': stats.port_no,
'rx_packets': stats.rx_packets,
'tx_packets': stats.tx_packets,
'rx_bytes': stats.rx_bytes,
'tx_bytes': stats.tx_bytes,
'rx_dropped': stats.rx_dropped,
'tx_dropped': stats.tx_dropped,
'rx_errors': stats.rx_errors,
'tx_errors': stats.tx_errors,
'rx_frame_err': stats.rx_frame_err,
'rx_over_err': stats.rx_over_err,
'rx_crc_err': stats.rx_crc_err,
'collisions': stats.collisions,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec}
ports.append(s)
ports = {str(dp.id): ports}
return ports
def get_meter_stats(dp, waiters):
stats = dp.ofproto_parser.OFPMeterStatsRequest(
dp, 0, dp.ofproto.OFPM_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
meters = []
for msg in msgs:
for stats in msg.body:
bands = []
for band in stats.band_stats:
b = {'packet_band_count': band.packet_band_count,
'byte_band_count': band.byte_band_count}
bands.append(b)
s = {'meter_id': stats.meter_id,
'len': stats.len,
'flow_count': stats.flow_count,
'packet_in_count': stats.packet_in_count,
'byte_in_count': stats.byte_in_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'band_stats': bands}
meters.append(s)
meters = {str(dp.id): meters}
return meters
def get_meter_features(dp, waiters):
ofp = dp.ofproto
type_convert = {ofp.OFPMBT_DROP: 'DROP',
ofp.OFPMBT_DSCP_REMARK: 'DSCP_REMARK'}
capa_convert = {ofp.OFPMF_KBPS: 'KBPS',
ofp.OFPMF_PKTPS: 'PKTPS',
ofp.OFPMF_BURST: 'BURST',
ofp.OFPMF_STATS: 'STATS'}
stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
features = []
for msg in msgs:
for feature in msg.body:
band_types = []
for k, v in type_convert.items():
if (1 << k) & feature.band_types:
band_types.append(v)
capabilities = []
for k, v in capa_convert.items():
if k & feature.capabilities:
capabilities.append(v)
f = {'max_meter': feature.max_meter,
'band_types': band_types,
'capabilities': capabilities,
'max_bands': feature.max_bands,
'max_color': feature.max_color}
features.append(f)
features = {str(dp.id): features}
return features
def get_meter_config(dp, waiters):
flags = {dp.ofproto.OFPMF_KBPS: 'KBPS',
dp.ofproto.OFPMF_PKTPS: 'PKTPS',
dp.ofproto.OFPMF_BURST: 'BURST',
dp.ofproto.OFPMF_STATS: 'STATS'}
band_type = {dp.ofproto.OFPMBT_DROP: 'DROP',
dp.ofproto.OFPMBT_DSCP_REMARK: 'DSCP_REMARK',
dp.ofproto.OFPMBT_EXPERIMENTER: 'EXPERIMENTER'}
stats = dp.ofproto_parser.OFPMeterConfigStatsRequest(
dp, 0, dp.ofproto.OFPM_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
configs = []
for msg in msgs:
for config in msg.body:
bands = []
for band in config.bands:
b = {'type': band_type.get(band.type, ''),
'rate': band.rate,
'burst_size': band.burst_size}
if band.type == dp.ofproto.OFPMBT_DSCP_REMARK:
b['prec_level'] = band.prec_level
elif band.type == dp.ofproto.OFPMBT_EXPERIMENTER:
b['experimenter'] = band.experimenter
bands.append(b)
c_flags = []
for k, v in flags.items():
if k & config.flags:
c_flags.append(v)
c = {'flags': c_flags,
'meter_id': config.meter_id,
'bands': bands}
configs.append(c)
configs = {str(dp.id): configs}
return configs
def get_group_stats(dp, waiters):
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, 0, dp.ofproto.OFPG_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
groups = []
for msg in msgs:
for stats in msg.body:
bucket_stats = []
for bucket_stat in stats.bucket_stats:
c = {'packet_count': bucket_stat.packet_count,
'byte_count': bucket_stat.byte_count}
bucket_stats.append(c)
g = {'length': stats.length,
'group_id': stats.group_id,
'ref_count': stats.ref_count,
'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'bucket_stats': bucket_stats}
groups.append(g)
groups = {str(dp.id): groups}
return groups
def get_group_features(dp, waiters):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD',
ofp.OFPAT_PUSH_PBB: 'PUSH_PBB',
ofp.OFPAT_POP_PBB: 'POP_PBB'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
types.append(v)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
capabilities.append(v)
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
acts.append(v2)
actions.append({v1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
features = {str(dp.id): features}
return features
def get_group_desc(dp, waiters):
type_convert = {dp.ofproto.OFPGT_ALL: 'ALL',
dp.ofproto.OFPGT_SELECT: 'SELECT',
dp.ofproto.OFPGT_INDIRECT: 'INDIRECT',
dp.ofproto.OFPGT_FF: 'FF'}
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
for stats in msg.body:
buckets = []
for bucket in stats.buckets:
actions = []
for action in bucket.actions:
actions.append(action_to_str(action))
b = {'weight': bucket.weight,
'watch_port': bucket.watch_port,
'watch_group': bucket.watch_group,
'actions': actions}
buckets.append(b)
d = {'type': type_convert.get(stats.type),
'group_id': stats.group_id,
'buckets': buckets}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def get_port_desc(dp, waiters):
stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
stats = msg.body
for stat in stats:
d = {'port_no': stat.port_no,
'hw_addr': stat.hw_addr,
'name': stat.name,
'config': stat.config,
'state': stat.state,
'curr': stat.curr,
'advertised': stat.advertised,
'supported': stat.supported,
'peer': stat.peer,
'curr_speed': stat.curr_speed,
'max_speed': stat.max_speed}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def mod_flow_entry(dp, flow, cmd):
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
table_id = int(flow.get('table_id', 0))
idle_timeout = int(flow.get('idle_timeout', 0))
hard_timeout = int(flow.get('hard_timeout', 0))
priority = int(flow.get('priority', 0))
buffer_id = int(flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
flags = int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_actions(dp, flow.get('actions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, match, inst)
dp.send_msg(flow_mod)
def mod_meter_entry(dp, flow, cmd):
flags_convert = {'KBPS': dp.ofproto.OFPMF_KBPS,
'PKTPS': dp.ofproto.OFPMF_PKTPS,
'BURST': dp.ofproto.OFPMF_BURST,
'STATS': dp.ofproto.OFPMF_STATS}
flow_flags = flow.get('flags')
if not isinstance(flow_flags, list):
flow_flags = [flow_flags]
flags = 0
for flag in flow_flags:
flags |= flags_convert.get(flag, 0)
if not flags:
LOG.error('Unknown flags: %s', flow.get('flags'))
meter_id = int(flow.get('meter_id', 0))
bands = []
for band in flow.get('bands', []):
band_type = band.get('type')
rate = int(band.get('rate', 0))
burst_size = int(band.get('burst_size', 0))
if band_type == 'DROP':
bands.append(
dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size))
elif band_type == 'DSCP_REMARK':
prec_level = int(band.get('prec_level', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandDscpRemark(
rate, burst_size, prec_level))
elif band_type == 'EXPERIMENTER':
experimenter = int(band.get('experimenter', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandExperimenter(
rate, burst_size, experimenter))
else:
LOG.error('Unknown band type: %s', band_type)
meter_mod = dp.ofproto_parser.OFPMeterMod(
dp, cmd, flags, meter_id, bands)
dp.send_msg(meter_mod)
def mod_group_entry(dp, group, cmd):
type_convert = {'ALL': dp.ofproto.OFPGT_ALL,
'SELECT': dp.ofproto.OFPGT_SELECT,
'INDIRECT': dp.ofproto.OFPGT_INDIRECT,
'FF': dp.ofproto.OFPGT_FF}
type_ = type_convert.get(group.get('type', 'ALL'))
if type_ is None:
LOG.error('Unknown type: %s', group.get('type'))
group_id = int(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = int(bucket.get('weight', 0))
watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, type_, group_id, buckets)
dp.send_msg(group_mod)
def mod_port_behavior(dp, port_config):
port_no = int(port_config.get('port_no', 0))
hw_addr = port_config.get('hw_addr')
config = int(port_config.get('config', 0))
mask = int(port_config.get('mask', 0))
advertise = int(port_config.get('advertise'))
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise)
dp.send_msg(port_mod)
def send_experimenter(dp, exp):
experimenter = exp.get('experimenter', 0)
exp_type = exp.get('exp_type', 0)
data_type = exp.get('data_type', 'ascii')
if data_type != 'ascii' and data_type != 'base64':
LOG.error('Unknown data type: %s', data_type)
data = exp.get('data', '')
if data_type == 'base64':
data = base64.b64decode(data)
expmsg = dp.ofproto_parser.OFPExperimenter(
dp, experimenter, exp_type, data)
dp.send_msg(expmsg)
|
the-stack_106_14474
|
# -*- coding: utf-8 -*-
from MyQR.mylibs import data, ECC, structure, matrix, draw
# ver: Version from 1 to 40
# ecl: Error Correction Level (L,M,Q,H)
# get a qrcode picture of 3*3 pixels per module
def get_qrcode(ver, ecl, str, save_place):
# Data Coding
ver, data_codewords = data.encode(ver, ecl, str)
# Error Correction Coding
ecc = ECC.encode(ver, ecl, data_codewords)
# Structure final bits
final_bits = structure.structure_final_bits(ver, ecl, data_codewords, ecc)
# Get the QR Matrix
qrmatrix = matrix.get_qrmatrix(ver, ecl, final_bits)
# Draw the picture and Save it, then return the real ver and the absolute name
return ver, draw.draw_qrcode(save_place, qrmatrix)
|
the-stack_106_14477
|
#
# Module implementing synchronization primitives
#
# multiprocessing/synchronize.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
]
import threading
import sys
import tempfile
import _multiprocessing
import time
from . import context
from . import process
from . import util
# Try to import the mp.synchronize module cleanly, if it fails
# raise ImportError for platforms lacking a working sem_open implementation.
# See issue 3770
try:
from _multiprocessing import SemLock, sem_unlink
except (ImportError):
raise ImportError("This platform lacks a functioning sem_open" +
" implementation, therefore, the required" +
" synchronization primitives needed will not" +
" function, see issue 3770.")
#
# Constants
#
RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
#
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
#
class SemLock(object):
_rand = tempfile._RandomNameSequence()
def __init__(self, kind, value, maxvalue, *, ctx):
if ctx is None:
ctx = context._default_context.get_context()
name = ctx.get_start_method()
unlink_now = sys.platform == 'win32' or name == 'fork'
for i in range(100):
try:
sl = self._semlock = _multiprocessing.SemLock(
kind, value, maxvalue, self._make_name(),
unlink_now)
except FileExistsError:
pass
else:
break
else:
raise FileExistsError('cannot find name for semaphore')
util.debug('created semlock with handle %s' % sl.handle)
self._make_methods()
if sys.platform != 'win32':
def _after_fork(obj):
obj._semlock._after_fork()
util.register_after_fork(self, _after_fork)
if self._semlock.name is not None:
# We only get here if we are on Unix with forking
# disabled. When the object is garbage collected or the
# process shuts down we unlink the semaphore name
from .resource_tracker import register
register(self._semlock.name, "semaphore")
util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
exitpriority=0)
@staticmethod
def _cleanup(name):
from .resource_tracker import unregister
sem_unlink(name)
unregister(name, "semaphore")
def _make_methods(self):
self.acquire = self._semlock.acquire
self.release = self._semlock.release
def __enter__(self):
return self._semlock.__enter__()
def __exit__(self, *args):
return self._semlock.__exit__(*args)
def __getstate__(self):
context.assert_spawning(self)
sl = self._semlock
if sys.platform == 'win32':
h = context.get_spawning_popen().duplicate_for_child(sl.handle)
else:
h = sl.handle
return (h, sl.kind, sl.maxvalue, sl.name)
def __setstate__(self, state):
self._semlock = _multiprocessing.SemLock._rebuild(*state)
util.debug('recreated blocker with handle %r' % state[0])
self._make_methods()
@staticmethod
def _make_name():
return '%s-%s' % (process.current_process()._config['semprefix'],
next(SemLock._rand))
#
# Semaphore
#
class Semaphore(SemLock):
def __init__(self, value=1, *, ctx):
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx)
def get_value(self):
return self._semlock._get_value()
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s)>' % (self.__class__.__name__, value)
#
# Bounded semaphore
#
class BoundedSemaphore(Semaphore):
def __init__(self, value=1, *, ctx):
SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx)
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s, maxvalue=%s)>' % \
(self.__class__.__name__, value, self._semlock.maxvalue)
#
# Non-recursive lock
#
class Lock(SemLock):
def __init__(self, *, ctx):
SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
elif self._semlock._get_value() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except Exception:
name = 'unknown'
return '<%s(owner=%s)>' % (self.__class__.__name__, name)
#
# Recursive lock
#
class RLock(SemLock):
def __init__(self, *, ctx):
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
else:
name, count = 'SomeOtherProcess', 'nonzero'
except Exception:
name, count = 'unknown', 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)
#
# Condition variable
#
class Condition(object):
def __init__(self, lock=None, *, ctx):
self._lock = lock or ctx.RLock()
self._sleeping_count = ctx.Semaphore(0)
self._woken_count = ctx.Semaphore(0)
self._wait_semaphore = ctx.Semaphore(0)
self._make_methods()
def __getstate__(self):
context.assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)
def __setstate__(self, state):
(self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) = state
self._make_methods()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release
def __repr__(self):
try:
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
num_waiters = 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters)
def wait(self, timeout=None):
assert self._lock._semlock._is_mine(), \
'must acquire() condition before using wait()'
# indicate that this thread is going to sleep
self._sleeping_count.release()
# release lock
count = self._lock._semlock._count()
for i in range(count):
self._lock.release()
try:
# wait for notification or timeout
return self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread has woken
self._woken_count.release()
# reacquire lock
for i in range(count):
self._lock.acquire()
def notify(self, n=1):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(
False), ('notify: Should not have been able to acquire '
+ '_wait_semaphore')
# to take account of timeouts since last notify*() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res, ('notify: Bug in sleeping_count.acquire'
+ '- res should not be False')
sleepers = 0
while sleepers < n and self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
sleepers += 1
if sleepers:
for i in range(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake
# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass
def notify_all(self):
self.notify(n=sys.maxsize)
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = time.monotonic() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - time.monotonic()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
#
# Event
#
class Event(object):
def __init__(self, *, ctx):
self._cond = ctx.Condition(ctx.Lock())
self._flag = ctx.Semaphore(0)
def is_set(self):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
return True
return False
def set(self):
with self._cond:
self._flag.acquire(False)
self._flag.release()
self._cond.notify_all()
def clear(self):
with self._cond:
self._flag.acquire(False)
def wait(self, timeout=None):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
else:
self._cond.wait(timeout)
if self._flag.acquire(False):
self._flag.release()
return True
return False
#
# Barrier
#
class Barrier(threading.Barrier):
def __init__(self, parties, action=None, timeout=None, *, ctx):
import struct
from .heap import BufferWrapper
wrapper = BufferWrapper(struct.calcsize('i') * 2)
cond = ctx.Condition()
self.__setstate__((parties, action, timeout, cond, wrapper))
self._state = 0
self._count = 0
def __setstate__(self, state):
(self._parties, self._action, self._timeout,
self._cond, self._wrapper) = state
self._array = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._parties, self._action, self._timeout,
self._cond, self._wrapper)
@property
def _state(self):
return self._array[0]
@_state.setter
def _state(self, value):
self._array[0] = value
@property
def _count(self):
return self._array[1]
@_count.setter
def _count(self, value):
self._array[1] = value
|
the-stack_106_14479
|
import aiohttp
import discord
import logging
import asyncio
import time
import re
from redbot.core import Config
from redbot.core.bot import Red
from redbot.core.i18n import Translator
from discord.ext.commands.converter import Converter
from discord.ext.commands.errors import BadArgument
from .flags import FLAGS
from .errors import GoogleTranslateAPIError
BASE_URL = "https://translation.googleapis.com"
_ = Translator("Translate", __file__)
log = logging.getLogger("red.Translate")
FLAG_REGEX = re.compile(r"|".join(rf"{re.escape(f)}" for f in FLAGS.keys()))
class FlagTranslation(Converter):
"""
This will convert flags and languages to the correct code to be used by the API
Guidance code on how to do this from:
https://github.com/Rapptz/discord.py/blob/rewrite/discord/ext/commands/converter.py#L85
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/mod/mod.py#L24
"""
async def convert(self, ctx, argument):
result = []
if argument in FLAGS:
result = FLAGS[argument]["code"].upper()
else:
for lang in FLAGS:
if FLAGS[lang]["name"].lower() in argument.lower():
result = FLAGS[lang]["code"]
break
if FLAGS[lang]["country"].lower() in argument.lower():
result = FLAGS[lang]["code"]
break
if not FLAGS[lang]["code"]:
continue
if FLAGS[lang]["code"] in argument.lower() and len(argument) == 2:
result = FLAGS[lang]["code"]
break
if not result:
raise BadArgument('Language "{}" not found'.format(argument))
return result
class GoogleTranslateAPI:
config: Config
bot: Red
cache: dict
def __init__(self, *_args):
self.config: Config
self.bot: Red
self.cache: dict
async def cleanup_cache(self):
await self.bot.wait_until_ready()
while self is self.bot.get_cog("Translate"):
# cleanup the cache every 10 minutes
self.cache = {"translations": []}
await asyncio.sleep(600)
async def detect_language(self, text):
"""
Detect the language from given text
"""
params = {"q": text, "key": await self.config.api_key()}
url = BASE_URL + "/language/translate/v2/detect"
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as resp:
data = await resp.json()
if "error" in data:
log.error(data["error"]["message"])
raise GoogleTranslateAPIError(data["error"]["message"])
return data["data"]["detections"]
async def translation_embed(self, author, translation, requestor=None):
em = discord.Embed(colour=author.colour, description=translation[0])
em.set_author(name=author.display_name + _(" said:"), icon_url=author.avatar_url)
detail_string = _("{_from} to {_to} | Requested by ").format(
_from=translation[1].upper(), _to=translation[2].upper()
)
if requestor:
detail_string += str(requestor)
else:
detail_string += str(author)
em.set_footer(text=detail_string)
return em
async def translate_text(self, from_lang, target, text):
"""
request to translate the text
"""
formatting = "text"
params = {
"q": text,
"target": target,
"key": await self.config.api_key(),
"format": formatting,
"source": from_lang,
}
url = BASE_URL + "/language/translate/v2"
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as resp:
data = await resp.json()
except Exception:
return None
if "error" in data:
log.error(data["error"]["message"])
raise GoogleTranslateAPIError(data["error"]["message"])
if "data" in data:
translated_text = data["data"]["translations"][0]["translatedText"]
return translated_text
async def on_message(self, message):
"""
Translates the message based off reactions
with country flags
"""
if not message.guild:
return
if message.author.bot:
return
channel = message.channel
guild = message.guild
author = message.author
if await self.config.api_key() is None:
return
# check_emoji = lambda emoji: emoji in FLAGS
if not await self.config.guild(guild).text():
return
if not await self.local_perms(guild, author):
return
if not await self.global_perms(author):
return
if not await self.check_ignored_channel(message):
return
flag = FLAG_REGEX.search(message.clean_content)
if not flag:
return
if message.id in self.cache:
if str(flag) in self.cache[message.id]["past_flags"]:
return
if not self.cache[message.id]["multiple"]:
return
if time.time() < self.cache[message.id]["wait"]:
await channel.send(_("You're translating too many messages!"), delete_after=10)
return
if message.embeds != []:
to_translate = message.embeds[0].description
else:
to_translate = message.clean_content
try:
detected_lang = await self.detect_language(to_translate)
except GoogleTranslateAPIError:
return
original_lang = detected_lang[0][0]["language"]
target = FLAGS[flag.group()]["code"]
if target == original_lang:
return
try:
translated_text = await self.translate_text(original_lang, target, to_translate)
except GoogleTranslateAPIError:
return
if not translated_text:
return
from_lang = detected_lang[0][0]["language"].upper()
to_lang = target.upper()
if from_lang == to_lang:
# don't post anything if the detected language is the same
return
translation = (translated_text, from_lang, to_lang)
if message.id not in self.cache:
cooldown = await self.config.cooldown()
else:
cooldown = self.cache[message.id]
cooldown["wait"] = time.time() + cooldown["timeout"]
cooldown["past_flags"].append(str(flag))
self.cache[message.id] = cooldown
if channel.permissions_for(guild.me).embed_links:
em = await self.translation_embed(author, translation)
translation = await channel.send(embed=em)
else:
msg = f"{author.display_name} " + _("said:") + "\n"
translation = await channel.send(msg + translated_text)
if not cooldown["multiple"]:
self.cache["translations"].append(translation.id)
async def on_raw_reaction_add(self, payload):
"""
Translates the message based off reactions
with country flags
"""
if payload.message_id in self.cache["translations"]:
return
channel = self.bot.get_channel(id=payload.channel_id)
try:
guild = channel.guild
message = await channel.get_message(id=payload.message_id)
user = guild.get_member(payload.user_id)
except Exception:
return
if user.bot:
return
if await self.config.api_key() is None:
return
# check_emoji = lambda emoji: emoji in FLAGS
if not await self.config.guild(guild).reaction():
return
if str(payload.emoji) not in FLAGS:
return
if not await self.local_perms(guild, user):
return
if not await self.global_perms(user):
return
if not await self.check_ignored_channel(message):
return
if message.id in self.cache:
if str(payload.emoji) in self.cache[message.id]["past_flags"]:
return
if not self.cache[message.id]["multiple"]:
return
if time.time() < self.cache[message.id]["wait"]:
await channel.send(_("You're translating too many messages!"), delete_after=10)
return
if message.embeds != []:
to_translate = message.embeds[0].description
else:
to_translate = message.clean_content
num_emojis = 0
for reaction in message.reactions:
if reaction.emoji == str(payload.emoji):
num_emojis = reaction.count
if num_emojis > 1:
return
target = FLAGS[str(payload.emoji)]["code"]
try:
detected_lang = await self.detect_language(to_translate)
except GoogleTranslateAPIError:
return
original_lang = detected_lang[0][0]["language"]
if target == original_lang:
return
try:
translated_text = await self.translate_text(original_lang, target, to_translate)
except Exception:
return
if not translated_text:
return
author = message.author
from_lang = detected_lang[0][0]["language"].upper()
to_lang = target.upper()
if from_lang == to_lang:
# don't post anything if the detected language is the same
return
translation = (translated_text, from_lang, to_lang)
if message.id not in self.cache:
cooldown = await self.config.cooldown()
else:
cooldown = self.cache[message.id]
cooldown["wait"] = time.time() + cooldown["timeout"]
cooldown["past_flags"].append(str(payload.emoji))
self.cache[message.id] = cooldown
if channel.permissions_for(guild.me).embed_links:
em = await self.translation_embed(author, translation, user)
translation = await channel.send(embed=em)
else:
msg = _(
"{author} said:\n{translated_text}"
).format(author=author, translate_text=translated_text)
translation = await channel.send(msg)
if not cooldown["multiple"]:
self.cache["translations"].append(translation.id)
async def local_perms(self, guild, author):
"""Check the user is/isn't locally whitelisted/blacklisted.
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/release/3.0.0/redbot/core/global_checks.py
"""
if await self.bot.is_owner(author):
return True
elif guild is None:
return True
guild_settings = self.bot.db.guild(guild)
local_blacklist = await guild_settings.blacklist()
local_whitelist = await guild_settings.whitelist()
_ids = [r.id for r in author.roles if not r.is_default()]
_ids.append(author.id)
if local_whitelist:
return any(i in local_whitelist for i in _ids)
return not any(i in local_blacklist for i in _ids)
async def global_perms(self, author):
"""Check the user is/isn't globally whitelisted/blacklisted.
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/release/3.0.0/redbot/core/global_checks.py
"""
if await self.bot.is_owner(author):
return True
whitelist = await self.bot.db.whitelist()
if whitelist:
return author.id in whitelist
return author.id not in await self.bot.db.blacklist()
async def check_ignored_channel(self, message):
"""
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/release/3.0.0/redbot/cogs/mod/mod.py#L1273
"""
channel = message.channel
guild = channel.guild
author = message.author
mod = self.bot.get_cog("Mod")
perms = channel.permissions_for(author)
surpass_ignore = (
isinstance(channel, discord.abc.PrivateChannel)
or perms.manage_guild
or await self.bot.is_owner(author)
or await self.bot.is_admin(author)
)
if surpass_ignore:
return True
guild_ignored = await mod.settings.guild(guild).ignored()
chann_ignored = await mod.settings.channel(channel).ignored()
return not (guild_ignored or chann_ignored and not perms.manage_channels)
|
the-stack_106_14481
|
# Copyright (C) 2015-2021 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from hamcrest import ( assert_that,
contains_exactly,
empty,
has_key,
is_not )
from pprint import pformat
from unittest import TestCase
from ycmd.tests.rust import setUpModule, tearDownModule # noqa
from ycmd.tests.rust import PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import ( BuildRequest,
CompletionEntryMatcher,
WithRetry )
from ycmd.utils import ReadFile
class GetCompletionsTest( TestCase ):
@WithRetry()
@SharedYcmd
def test_GetCompletions_Basic( self, app ):
filepath = PathToTestFile( 'common', 'src', 'main.rs' )
contents = ReadFile( filepath )
completion_data = BuildRequest( filepath = filepath,
filetype = 'rust',
contents = contents,
line_num = 14,
column_num = 19 )
results = app.post_json( '/completions',
completion_data ).json[ 'completions' ]
assert_that(
results,
contains_exactly(
CompletionEntryMatcher(
'build_rocket',
'fn(&self)',
{
'detailed_info': 'build_rocket\n\nDo not try at home',
'menu_text': 'build_rocket',
'kind': 'Method'
}
),
CompletionEntryMatcher(
'build_shuttle',
'fn(&self)',
{
'detailed_info': 'build_shuttle\n\n',
'menu_text': 'build_shuttle',
'kind': 'Method'
}
)
)
)
# This completer does not require or support resolve
assert_that( results[ 0 ], is_not( has_key( 'resolve' ) ) )
assert_that( results[ 0 ], is_not( has_key( 'item' ) ) )
# So (erroneously) resolving an item returns the item
completion_data[ 'resolve' ] = 0
response = app.post_json( '/resolve_completion', completion_data ).json
print( f"Resolve resolve: { pformat( response ) }" )
# We can't actually check the result because we don't know what completion
# resolve ID 0 actually is (could be anything), so we just check that we
# get 1 result, and that there are no errors.
assert_that( response[ 'completion' ], is_not( None ) )
assert_that( response[ 'errors' ], empty() )
|
the-stack_106_14482
|
import cv2
import numpy as np
from tensorflow.keras import Model
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.layers import *
def build_autoencoder(input_shape=(28, 28, 1),
encoding_size=32,
alpha=0.2):
inputs = Input(shape=input_shape)
encoder = Conv2D(filters=32,
kernel_size=(3, 3),
strides=2,
padding='same')(inputs)
encoder = LeakyReLU(alpha=alpha)(encoder)
encoder = BatchNormalization()(encoder)
encoder = Conv2D(filters=64,
kernel_size=(3, 3),
strides=2,
padding='same')(encoder)
encoder = LeakyReLU(alpha=alpha)(encoder)
encoder = BatchNormalization()(encoder)
encoder_output_shape = encoder.shape
encoder = Flatten()(encoder)
encoder_output = Dense(units=encoding_size,
name='encoder_output')(encoder)
# Build decoder
target_shape = tuple(encoder_output_shape[1:])
decoder = Dense(np.prod(target_shape))(encoder_output)
decoder = Reshape(target_shape)(decoder)
decoder = Conv2DTranspose(filters=64,
kernel_size=(3, 3),
strides=2,
padding='same')(decoder)
decoder = LeakyReLU(alpha=alpha)(decoder)
decoder = BatchNormalization()(decoder)
decoder = Conv2DTranspose(filters=32,
kernel_size=(3, 3),
strides=2,
padding='same')(decoder)
decoder = LeakyReLU(alpha=alpha)(decoder)
decoder = BatchNormalization()(decoder)
decoder = Conv2DTranspose(filters=1,
kernel_size=(3, 3),
padding='same')(decoder)
outputs = Activation(activation='sigmoid',
name='decoder_output')(decoder)
autoencoder_model = Model(inputs, outputs)
return autoencoder_model
def euclidean_dist(x, y):
return np.linalg.norm(x - y)
def search(query_vector, search_index, max_results=16):
vectors = search_index['features']
results = []
for i in range(len(vectors)):
distance = euclidean_dist(query_vector, vectors[i])
results.append((distance, search_index['images'][i]))
results = sorted(results,
key=lambda p: p[0])[:max_results]
return results
(X_train, _), (X_test, _) = fashion_mnist.load_data()
X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0
X_train = np.expand_dims(X_train, axis=-1)
X_test = np.expand_dims(X_test, axis=-1)
autoencoder = build_autoencoder()
autoencoder.compile(optimizer='adam', loss='mse')
EPOCHS = 10
BATCH_SIZE = 512
autoencoder.fit(X_train, X_train,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
shuffle=True,
validation_data=(X_test, X_test))
fe_input = autoencoder.input
fe_output = autoencoder.get_layer('encoder_output').output
feature_extractor = Model(inputs=fe_input, outputs=fe_output)
train_vectors = feature_extractor.predict(X_train)
X_train = (X_train * 255.0).astype('uint8')
X_train = X_train.reshape((X_train.shape[0], 28, 28))
search_index = {
'features': train_vectors,
'images': X_train
}
test_vectors = feature_extractor.predict(X_test)
X_test = (X_test * 255.0).astype('uint8')
X_test = X_test.reshape((X_test.shape[0], 28, 28))
sample_indices = np.random.randint(0, X_test.shape[0], 16)
sample_images = X_test[sample_indices]
sample_queries = test_vectors[sample_indices]
for i, (vector, image) in \
enumerate(zip(sample_queries, sample_images)):
results = search(vector, search_index)
results = [r[1] for r in results]
query_image = cv2.resize(image, (28 * 4, 28 * 4),
interpolation=cv2.INTER_AREA)
results_mosaic = np.vstack([np.hstack(results[0:4]),
np.hstack(results[4:8]),
np.hstack(results[8:12]),
np.hstack(results[12:16])])
result_image = np.hstack([query_image, results_mosaic])
cv2.imwrite(f'{i}.jpg', result_image)
|
the-stack_106_14486
|
from office365.runtime.auth.authentication_context import AuthenticationContext
from office365.runtime.client_request import ClientRequest
from office365.runtime.utilities.request_options import RequestOptions
from settings import settings
import json
from office365.sharepoint.client_context import ClientContext
if __name__ == '__main__':
context_auth = AuthenticationContext(url=settings['url'])
if context_auth.acquire_token_for_user(username=settings['username'], password=settings['password']):
"""Read Web client object"""
ctx = ClientContext(settings['url'], context_auth)
request = ClientRequest(ctx)
options = RequestOptions("{0}/_api/web/".format(settings['url']))
options.set_header('Accept', 'application/json')
options.set_header('Content-Type', 'application/json')
data = ctx.execute_query_direct(options)
s = json.loads(data.content)
web_title = s['Title']
print("Web title: {0}".format(web_title))
else:
print(context_auth.get_last_error())
|
the-stack_106_14493
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.KbdishAreaInfo import KbdishAreaInfo
class KoubeiCateringDishAreaSyncResponse(AlipayResponse):
def __init__(self):
super(KoubeiCateringDishAreaSyncResponse, self).__init__()
self._kb_dish_area = None
@property
def kb_dish_area(self):
return self._kb_dish_area
@kb_dish_area.setter
def kb_dish_area(self, value):
if isinstance(value, KbdishAreaInfo):
self._kb_dish_area = value
else:
self._kb_dish_area = KbdishAreaInfo.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(KoubeiCateringDishAreaSyncResponse, self).parse_response_content(response_content)
if 'kb_dish_area' in response:
self.kb_dish_area = response['kb_dish_area']
|
the-stack_106_14494
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from copy import deepcopy
import mmcv
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug3D(object):
"""Test-time augmentation with multiple scales and flipping.
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple]: Images scales for resizing.
pts_scale_ratio (float | list[float]): Points scale ratios for
resizing.
flip (bool, optional): Whether apply flip augmentation.
Defaults to False.
flip_direction (str | list[str], optional): Flip augmentation
directions for images, options are "horizontal" and "vertical".
If flip_direction is list, multiple flip augmentations will
be applied. It has no effect when ``flip == False``.
Defaults to "horizontal".
pcd_horizontal_flip (bool, optional): Whether apply horizontal
flip augmentation to point cloud. Defaults to True.
Note that it works only when 'flip' is turned on.
pcd_vertical_flip (bool, optional): Whether apply vertical flip
augmentation to point cloud. Defaults to True.
Note that it works only when 'flip' is turned on.
"""
def __init__(self,
transforms,
img_scale,
pts_scale_ratio,
flip=False,
flip_direction='horizontal',
pcd_horizontal_flip=False,
pcd_vertical_flip=False):
self.transforms = Compose(transforms)
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.pts_scale_ratio = pts_scale_ratio \
if isinstance(pts_scale_ratio, list) else[float(pts_scale_ratio)]
assert mmcv.is_list_of(self.img_scale, tuple)
assert mmcv.is_list_of(self.pts_scale_ratio, float)
self.flip = flip
self.pcd_horizontal_flip = pcd_horizontal_flip
self.pcd_vertical_flip = pcd_vertical_flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip and not any([(t['type'] == 'RandomFlip3D'
or t['type'] == 'RandomFlip')
for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to augment common fields in results.
Args:
results (dict): Result dict contains the data to augment.
Returns:
dict: The result dict contains the data that is augmented with
different scales and flips.
"""
aug_data = []
# modified from `flip_aug = [False, True] if self.flip else [False]`
# to reduce unnecessary scenes when using double flip augmentation
# during test time
flip_aug = [True] if self.flip else [False]
pcd_horizontal_flip_aug = [False, True] \
if self.flip and self.pcd_horizontal_flip else [False]
pcd_vertical_flip_aug = [False, True] \
if self.flip and self.pcd_vertical_flip else [False]
for scale in self.img_scale:
for pts_scale_ratio in self.pts_scale_ratio:
for flip in flip_aug:
for pcd_horizontal_flip in pcd_horizontal_flip_aug:
for pcd_vertical_flip in pcd_vertical_flip_aug:
for direction in self.flip_direction:
# results.copy will cause bug
# since it is shallow copy
_results = deepcopy(results)
_results['scale'] = scale
_results['flip'] = flip
_results['pcd_scale_factor'] = \
pts_scale_ratio
_results['flip_direction'] = direction
_results['pcd_horizontal_flip'] = \
pcd_horizontal_flip
_results['pcd_vertical_flip'] = \
pcd_vertical_flip
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'pts_scale_ratio={self.pts_scale_ratio}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
|
the-stack_106_14497
|
import turtle
x = 20
tortuguita = turtle.Turtle()
tortuguita.shape('turtle')
tortuguita.width(5)
tortuguita.color('yellow')
tortuguita.up()
tortuguita.foward
huguinho = turtle.Turtle()
huguinho.shape("turtle")
huguinho.width(5)
huguinho.color("Blue")
huguinho.up()
huguinho.forward(x * 10)
huguinho.down()
zezinho = turtle.Turtle()
zezinho.shape("turtle")
zezinho.width(5)
zezinho.color("Red")
zezinho.up()
zezinho.forward(x * 5)
zezinho.down()
luizinho = turtle.Turtle()
luizinho.shape("turtle")
luizinho.width(5)
luizinho.color("Green")
for i in range(24):
huguinho.right(15)
huguinho.forward(x)
zezinho.right(15)
zezinho.forward(x)
luizinho.right(15)
luizinho.forward(x)
turtle.done()
|
the-stack_106_14498
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import math
import argparse
import numpy as np
import multiprocessing as mp
from utils import load_data, dump_data, mkdir_if_no_exists, Timer
__all__ = ['knn_brute_force', 'knn_hnsw', 'knn_faiss',
'knns2spmat', 'build_knns', 'filter_knns']
def knns_recall(ners, idx2lb, lb2idxs):
with Timer('compute recall'):
recs = []
cnt = 0
for idx, (n, _) in enumerate(ners):
lb = idx2lb[idx]
idxs = lb2idxs[lb]
n = list(n)
if len(n) == 1:
cnt += 1
s = set(idxs) & set(n)
recs += [1. * len(s) / len(idxs)]
print('there are {} / {} = {:.3f} isolated anchors.'\
.format(cnt, len(ners), 1. * cnt / len(ners)))
recall = np.mean(recs)
return recall
def filter_knns(knns, k, th):
pairs = []
scores = []
n = len(knns)
ners = np.zeros([n, k], dtype=np.int32) - 1
simi = np.zeros([n, k]) - 1
for i, (ner, dist) in enumerate(knns):
assert len(ner) == len(dist)
ners[i, :len(ner)] = ner
simi[i, :len(ner)] = 1. - dist
anchor = np.tile(np.arange(n).reshape(n, 1), (1, k))
# filter
selidx = np.where((simi >= th) & (ners != -1) & (ners != anchor))
pairs = np.hstack((anchor[selidx].reshape(-1, 1), ners[selidx].reshape(-1, 1)))
scores = simi[selidx]
# keep uniq pairs
pairs = np.sort(pairs, axis=1)
pairs, unique_idx = np.unique(pairs, return_index=True, axis=0)
scores = scores[unique_idx]
return pairs, scores
def knns2spmat(knns, k, th_sim=0.7):
# convert knns to symmetric sparse matrix
from scipy.sparse import csr_matrix
n = len(knns)
row, col, data = [], [], []
for row_i, knn in enumerate(knns):
nbrs, dists = knn
for nbr, dist in zip(nbrs, dists):
if 1 - dist < th_sim or nbr == -1:
continue
row.append(row_i)
col.append(nbr)
data.append(dist)
assert len(row) == len(col) == len(data)
spmat = csr_matrix((data, (row, col)), shape=(n, n))
return spmat
def build_knns(knn_prefix, feats, knn_method, k, is_rebuild=False):
knn_prefix = os.path.join(knn_prefix, '{}_k_{}'.format(knn_method, k))
mkdir_if_no_exists(knn_prefix)
knn_path = knn_prefix + '.npz'
if not os.path.isfile(knn_path) or is_rebuild:
index_path = knn_prefix + '.index'
with Timer('build index'):
if knn_method == 'hnsw':
index = knn_hnsw(feats, k, index_path)
elif knn_method == 'faiss':
index = knn_faiss(feats, k, index_path)
else:
raise KeyError('Unsupported method({}). \
Only support hnsw and faiss currently'.format(knn_method))
knns = index.get_knns()
with Timer('dump knns to {}'.format(knn_path)):
dump_data(knn_path, knns, force=True)
else:
print('read knn from {}'.format(knn_path))
knns = load_data(knn_path)
return knns
class knn():
def __init__(self, feats, k, index_path='', verbose=True):
pass
def filter_by_th(self, i):
th_ners = []
th_dists = []
ners, dists = self.knns[i]
for n, dist in zip(ners, dists):
if 1 - dist < self.th:
continue
th_ners.append(n)
th_dists.append(dist)
th_ners = np.array(th_ners)
th_dists = np.array(th_dists)
return (th_ners, th_dists)
def get_knns(self, th=None):
if th is None or th <= 0.:
return self.knns
# nproc = mp.cpu_count()
nproc = 1
with Timer('filter edges by th {} (CPU={})'.\
format(th, nproc), self.verbose):
self.th = th
self.th_knns = []
tot = len(self.knns)
if nproc > 1:
pool = mp.Pool(nproc)
th_knns = list(tqdm(pool.imap(self.filter_by_th, range(tot)), total=tot))
pool.close()
else:
th_knns = [self.filter_by_th(i) for i in range(tot)]
return th_knns
class knn_brute_force(knn):
def __init__(self, feats, k, index_path='', verbose=True):
self.verbose = verbose
with Timer('[brute force] build index', verbose):
feats = feats.astype('float32')
sim = feats.dot(feats.T)
with Timer('[brute force] query topk {}'.format(k), verbose):
ners = np.argpartition(-sim, kth=k)[:, :k]
idxs = np.array([i for i in range(ners.shape[0])])
dists = 1 - sim[idxs.reshape(-1, 1), ners]
self.knns = [(np.array(ner, dtype=np.int32), np.array(dist, dtype=np.float32)) \
for ner, dist in zip(ners, dists)]
class knn_hnsw(knn):
def __init__(self, feats, k, index_path='', verbose=True):
import nmslib
self.verbose = verbose
with Timer('[hnsw] build index', verbose):
""" higher ef leads to better accuracy, but slower search
higher M leads to higher accuracy/run_time at fixed ef, but consumes more memory
"""
# space_params = {
# 'ef': 100,
# 'M': 16,
# }
# index = nmslib.init(method='hnsw', space='cosinesimil', space_params=space_params)
index = nmslib.init(method='hnsw', space='cosinesimil')
if index_path != '' and os.path.isfile(index_path):
index.loadIndex(index_path)
else:
index.addDataPointBatch(feats)
index.createIndex({'post': 2, 'indexThreadQty': 1}, print_progress=verbose)
if index_path:
print('[hnsw] save index to {}'.format(index_path))
mkdir_if_no_exists(index_path)
index.saveIndex(index_path)
with Timer('[hnsw] query topk {}'.format(k), verbose):
knn_ofn = index_path + '.npz'
if os.path.exists(knn_ofn):
print('[hnsw] read knns from {}'.format(knn_ofn))
self.knns = [(knn[0, :].astype(np.int32), knn[1, :].astype(np.float32)) \
for knn in np.load(knn_ofn)['data']]
else:
self.knns = index.knnQueryBatch(feats, k=k)
class knn_faiss(knn):
def __init__(self, feats, k, index_path='', index_key='', nprobe=128, verbose=True):
import faiss
self.verbose = verbose
with Timer('[faiss] build index', verbose):
if index_path != '' and os.path.exists(index_path):
print('[faiss] read index from {}'.format(index_path))
index = faiss.read_index(index_path)
else:
feats = feats.astype('float32')
size, dim = feats.shape
index = faiss.IndexFlatIP(dim)
if index_key != '':
assert index_key.find('HNSW') < 0, 'HNSW returns distances insted of sims'
metric = faiss.METRIC_INNER_PRODUCT
nlist = min(4096, 8 * round(math.sqrt(size)))
if index_key == 'IVF':
quantizer = index
index = faiss.IndexIVFFlat(quantizer, dim, nlist, metric)
else:
index = faiss.index_factory(dim, index_key, metric)
if index_key.find('Flat') < 0:
assert not index.is_trained
index.train(feats)
index.nprobe = min(nprobe, nlist)
assert index.is_trained
print('nlist: {}, nprobe: {}'.format(nlist, nprobe))
index.add(feats)
if index_path != '':
print('[faiss] save index to {}'.format(index_path))
mkdir_if_no_exists(index_path)
faiss.write_index(index, index_path)
with Timer('[faiss] query topk {}'.format(k), verbose):
knn_ofn = index_path + '.npz'
if os.path.exists(knn_ofn):
print('[faiss] read knns from {}'.format(knn_ofn))
self.knns = [(knn[0, :].astype(np.int32), knn[1, :].astype(np.float32)) \
for knn in np.load(knn_ofn)['data']]
else:
sims, ners = index.search(feats, k=k)
self.knns = [(np.array(ner, dtype=np.int32), 1 - np.array(sim, dtype=np.float32)) \
for ner, sim in zip(ners, sims)]
if __name__ == '__main__':
from utils import l2norm
k = 30
d = 256
nfeat = 10000
np.random.seed(42)
feats = np.random.random((nfeat, d)).astype('float32')
feats = l2norm(feats)
index1 = knn_hnsw(feats, k)
index2 = knn_faiss(feats, k)
index3 = knn_faiss(feats, k, index_key='Flat')
index4 = knn_faiss(feats, k, index_key='IVF')
index5 = knn_faiss(feats, k, index_key='IVF100,PQ32')
print(index1.knns[0])
print(index2.knns[0])
print(index3.knns[0])
print(index4.knns[0])
print(index5.knns[0])
|
the-stack_106_14499
|
from typing import Any, List, Dict
class IOSpec:
def __init__(self, inputs: List[Any], output: Any):
self.inputs = inputs
self.output = output
self.funcs: List[str] = None
self.seqs: List[List[int]] = None
class SearchSpec(IOSpec):
def __init__(self, inputs: List[Any], output: Any, intermediates: List[Any] = None, max_depth: int = None,
depth: int = 1):
super().__init__(inputs, output)
if intermediates is None and max_depth is None:
raise Exception("One of intermediates and max_depth is required")
self.intermediates = intermediates if intermediates is not None else [None] * (max_depth - 1)
self.max_depth = max_depth or (len(intermediates) + 1)
self.depth = depth
class ArgTrainingSpec(SearchSpec):
def __init__(self, inputs: List[Any], output: Any, args: Dict[str, Any], intermediates: List[Any] = None,
max_depth: int = None, depth: int = 1):
super().__init__(inputs, output, intermediates, max_depth, depth)
self.args = args
class EngineSpec(SearchSpec):
def __init__(self, inputs: List[Any], output: Any, max_depth: int):
super().__init__(inputs, output, max_depth=max_depth)
|
the-stack_106_14500
|
# Copyright 2015, 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_pycodestyle.main import main
import pytest
@pytest.mark.linter
@pytest.mark.pep8
def test_pep8():
rc = main(argv=[])
assert rc == 0, 'Found code style errors / warnings'
|
the-stack_106_14502
|
import sys
#sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import math
import numpy as np
import torch
from .utils import random_crop, draw_gaussian, gaussian_radius, normalize_, color_jittering_, lighting_
def _resize_image(image, detections, size):
detections = detections.copy()
height, width = image.shape[0:2]
new_height, new_width = size
image = cv2.resize(image, (new_width, new_height))
height_ratio = new_height / height
width_ratio = new_width / width
detections[:, 0:4:2] *= width_ratio
detections[:, 1:4:2] *= height_ratio
return image, detections
def _clip_detections(image, detections):
detections = detections.copy()
height, width = image.shape[0:2]
detections[:, 0:4:2] = np.clip(detections[:, 0:4:2], 0, width - 1)
detections[:, 1:4:2] = np.clip(detections[:, 1:4:2], 0, height - 1)
keep_inds = ((detections[:, 2] - detections[:, 0]) > 0) & \
((detections[:, 3] - detections[:, 1]) > 0)
detections = detections[keep_inds]
return detections
def cornernet(system_configs, db, k_ind, data_aug, debug):
data_rng = system_configs.data_rng
batch_size = system_configs.batch_size
categories = db.configs["categories"]
input_size = db.configs["input_size"]
output_size = db.configs["output_sizes"][0]
border = db.configs["border"]
lighting = db.configs["lighting"]
rand_crop = db.configs["rand_crop"]
rand_color = db.configs["rand_color"]
rand_scales = db.configs["rand_scales"]
gaussian_bump = db.configs["gaussian_bump"]
gaussian_iou = db.configs["gaussian_iou"]
gaussian_rad = db.configs["gaussian_radius"]
max_tag_len = 128
# allocating memory
images = np.zeros((batch_size, 3, input_size[0], input_size[1]), dtype=np.float32)
tl_heatmaps = np.zeros((batch_size, categories, output_size[0], output_size[1]), dtype=np.float32)
br_heatmaps = np.zeros((batch_size, categories, output_size[0], output_size[1]), dtype=np.float32)
tl_regrs = np.zeros((batch_size, max_tag_len, 2), dtype=np.float32)
br_regrs = np.zeros((batch_size, max_tag_len, 2), dtype=np.float32)
tl_tags = np.zeros((batch_size, max_tag_len), dtype=np.int64)
br_tags = np.zeros((batch_size, max_tag_len), dtype=np.int64)
tag_masks = np.zeros((batch_size, max_tag_len), dtype=np.uint8)
tag_lens = np.zeros((batch_size, ), dtype=np.int32)
db_size = db.db_inds.size
for b_ind in range(batch_size):
if not debug and k_ind == 0:
db.shuffle_inds()
db_ind = db.db_inds[k_ind]
k_ind = (k_ind + 1) % db_size
# reading image
image_path = db.image_path(db_ind)
image = cv2.imread(image_path)
# reading detections
detections = db.detections(db_ind)
# cropping an image randomly
if not debug and rand_crop:
image, detections = random_crop(image, detections, rand_scales, input_size, border=border)
image, detections = _resize_image(image, detections, input_size)
detections = _clip_detections(image, detections)
width_ratio = output_size[1] / input_size[1]
height_ratio = output_size[0] / input_size[0]
# flipping an image randomly
if not debug and np.random.uniform() > 0.5:
image[:] = image[:, ::-1, :]
width = image.shape[1]
detections[:, [0, 2]] = width - detections[:, [2, 0]] - 1
if not debug:
image = image.astype(np.float32) / 255.
if rand_color:
color_jittering_(data_rng, image)
if lighting:
lighting_(data_rng, image, 0.1, db.eig_val, db.eig_vec)
normalize_(image, db.mean, db.std)
images[b_ind] = image.transpose((2, 0, 1))
for ind, detection in enumerate(detections):
category = int(detection[-1]) - 1
xtl, ytl = detection[0], detection[1]
xbr, ybr = detection[2], detection[3]
fxtl = (xtl * width_ratio)
fytl = (ytl * height_ratio)
fxbr = (xbr * width_ratio)
fybr = (ybr * height_ratio)
xtl = int(fxtl)
ytl = int(fytl)
xbr = int(fxbr)
ybr = int(fybr)
if gaussian_bump:
width = detection[2] - detection[0]
height = detection[3] - detection[1]
width = math.ceil(width * width_ratio)
height = math.ceil(height * height_ratio)
if gaussian_rad == -1:
radius = gaussian_radius((height, width), gaussian_iou)
radius = max(0, int(radius))
else:
radius = gaussian_rad
draw_gaussian(tl_heatmaps[b_ind, category], [xtl, ytl], radius)
draw_gaussian(br_heatmaps[b_ind, category], [xbr, ybr], radius)
else:
tl_heatmaps[b_ind, category, ytl, xtl] = 1
br_heatmaps[b_ind, category, ybr, xbr] = 1
tag_ind = tag_lens[b_ind]
tl_regrs[b_ind, tag_ind, :] = [fxtl - xtl, fytl - ytl]
br_regrs[b_ind, tag_ind, :] = [fxbr - xbr, fybr - ybr]
tl_tags[b_ind, tag_ind] = ytl * output_size[1] + xtl
br_tags[b_ind, tag_ind] = ybr * output_size[1] + xbr
tag_lens[b_ind] += 1
for b_ind in range(batch_size):
tag_len = tag_lens[b_ind]
tag_masks[b_ind, :tag_len] = 1
images = torch.from_numpy(images)
tl_heatmaps = torch.from_numpy(tl_heatmaps)
br_heatmaps = torch.from_numpy(br_heatmaps)
tl_regrs = torch.from_numpy(tl_regrs)
br_regrs = torch.from_numpy(br_regrs)
tl_tags = torch.from_numpy(tl_tags)
br_tags = torch.from_numpy(br_tags)
tag_masks = torch.from_numpy(tag_masks)
return {
"xs": [images],
"ys": [tl_heatmaps, br_heatmaps, tag_masks, tl_regrs, br_regrs, tl_tags, br_tags]
}, k_ind
|
the-stack_106_14503
|
from fastapi import APIRouter, Depends
from ..utils import engine, get_session
from ..models.capacity import Capacity
from sqlmodel import Session, select, SQLModel, and_
from sqlalchemy.exc import NoResultFound
from ..models.user import User
from ..models.team import Team
router = APIRouter(prefix="/api/capacities", tags=["capacity"])
session = Session(engine)
@router.post("/")
async def post_capacity(*, capacity: Capacity, session: Session = Depends(get_session)):
"""Post a capacity."""
statement = select(Capacity).where(
and_(
Capacity.user_id == capacity.user_id,
Capacity.team_id == capacity.team_id,
capacity.year == capacity.year,
Capacity.month == capacity.month,
)
)
try:
result = session.exec(statement).one()
return False
except NoResultFound:
session.add(capacity)
session.commit()
session.refresh(capacity)
return capacity
@router.get("/")
async def get_capacities(
session: Session = Depends(get_session),
is_locked: bool = None,
user_id: int = None,
team_id: int = None,
month: int = None,
year: int = None,
):
"""Get list of all capacities"""
statement = select(Capacity)
"""Select capacity by user_id, team_id, month, year"""
if (user_id and team_id and month and year) != None:
statement = (
select(
Capacity.id.label("capacity_id"),
User.short_name.label("user_short_name"),
Team.short_name.label("team_short_name"),
Capacity.year,
Capacity.month,
Capacity.days,
)
.select_from(Capacity)
.join(User, Capacity.user_id == User.id)
.join(Team, Capacity.team_id == Team.id)
.where(Capacity.user_id == user_id)
.where(Capacity.team_id == team_id)
.where(Capacity.month == month)
.where(Capacity.year == year)
)
result = session.exec(statement).all()
return result
@router.delete("/")
async def delete_capacities(
capacity_id: str = None,
session: Session = Depends(get_session),
):
"""Delete a capacity"""
statement = select(Capacity).where(
Capacity.id == capacity_id,
)
capacity_to_delete = session.exec(statement).one()
session.delete(capacity_to_delete)
session.commit()
return True
|
the-stack_106_14505
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class lsnappsprofile_port_binding(base_resource) :
"""Binding class showing the port that can be bound to lsnappsprofile."""
def __init__(self) :
self._lsnport = ""
self._appsprofilename = ""
self.___count = 0
@property
def appsprofilename(self) :
"""Name for the LSN application profile. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the LSN application profile is created. The following requirement applies only to the NetScaler CLI: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "lsn application profile1" or 'lsn application profile1').<br/>Minimum length = 1<br/>Maximum length = 127."""
try :
return self._appsprofilename
except Exception as e:
raise e
@appsprofilename.setter
def appsprofilename(self, appsprofilename) :
"""Name for the LSN application profile. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the LSN application profile is created. The following requirement applies only to the NetScaler CLI: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "lsn application profile1" or 'lsn application profile1').<br/>Minimum length = 1<br/>Maximum length = 127
:param appsprofilename:
"""
try :
self._appsprofilename = appsprofilename
except Exception as e:
raise e
@property
def lsnport(self) :
"""Port numbers or range of port numbers to match against the destination port of the incoming packet from a subscriber. When the destination port is matched, the LSN application profile is applied for the LSN session. Separate a range of ports with a hyphen. For example, 40-90."""
try :
return self._lsnport
except Exception as e:
raise e
@lsnport.setter
def lsnport(self, lsnport) :
"""Port numbers or range of port numbers to match against the destination port of the incoming packet from a subscriber. When the destination port is matched, the LSN application profile is applied for the LSN session. Separate a range of ports with a hyphen. For example, 40-90.
:param lsnport:
"""
try :
self._lsnport = lsnport
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(lsnappsprofile_port_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lsnappsprofile_port_binding
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.appsprofilename is not None :
return str(self.appsprofilename)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
updateresource = lsnappsprofile_port_binding()
updateresource.appsprofilename = resource.appsprofilename
updateresource.lsnport = resource.lsnport
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lsnappsprofile_port_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].appsprofilename = resource[i].appsprofilename
updateresources[i].lsnport = resource[i].lsnport
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
deleteresource = lsnappsprofile_port_binding()
deleteresource.appsprofilename = resource.appsprofilename
deleteresource.lsnport = resource.lsnport
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lsnappsprofile_port_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].appsprofilename = resource[i].appsprofilename
deleteresources[i].lsnport = resource[i].lsnport
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, appsprofilename) :
"""Use this API to fetch lsnappsprofile_port_binding resources.
:param service:
:param appsprofilename:
"""
try :
obj = lsnappsprofile_port_binding()
obj.appsprofilename = appsprofilename
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, appsprofilename, filter_) :
"""Use this API to fetch filtered set of lsnappsprofile_port_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param appsprofilename:
:param filter_:
"""
try :
obj = lsnappsprofile_port_binding()
obj.appsprofilename = appsprofilename
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, appsprofilename) :
"""Use this API to count lsnappsprofile_port_binding resources configued on NetScaler.
:param service:
:param appsprofilename:
"""
try :
obj = lsnappsprofile_port_binding()
obj.appsprofilename = appsprofilename
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, appsprofilename, filter_) :
"""Use this API to count the filtered set of lsnappsprofile_port_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param appsprofilename:
:param filter_:
"""
try :
obj = lsnappsprofile_port_binding()
obj.appsprofilename = appsprofilename
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class lsnappsprofile_port_binding_response(base_response) :
""" """
def __init__(self, length=1) :
self.lsnappsprofile_port_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lsnappsprofile_port_binding = [lsnappsprofile_port_binding() for _ in range(length)]
|
the-stack_106_14506
|
"""
raven.contrib.django.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Acts as an implicit hook for Django installs.
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
# flake8: noqa
from __future__ import absolute_import, unicode_literals
import copy
import logging
import sys
import warnings
from django.conf import settings as django_settings
from hashlib import md5
from raven.utils import six
logger = logging.getLogger('sentry.errors.client')
def get_installed_apps():
"""
Modules in settings.INSTALLED_APPS as a set.
"""
return set(django_settings.INSTALLED_APPS)
_client = (None, None)
class ProxyClient(object):
"""
A proxy which represents the currently client at all times.
"""
# introspection support:
__members__ = property(lambda x: x.__dir__())
# Need to pretend to be the wrapped class, for the sake of objects that care
# about this (especially in equality tests)
__class__ = property(lambda x: get_client().__class__)
__dict__ = property(lambda o: get_client().__dict__)
__repr__ = lambda x: repr(get_client())
__getattr__ = lambda x, o: getattr(get_client(), o)
__setattr__ = lambda x, o, v: setattr(get_client(), o, v)
__delattr__ = lambda x, o: delattr(get_client(), o)
__lt__ = lambda x, o: get_client() < o
__le__ = lambda x, o: get_client() <= o
__eq__ = lambda x, o: get_client() == o
__ne__ = lambda x, o: get_client() != o
__gt__ = lambda x, o: get_client() > o
__ge__ = lambda x, o: get_client() >= o
if not six.PY3:
__cmp__ = lambda x, o: cmp(get_client(), o) # NOQA
__hash__ = lambda x: hash(get_client())
# attributes are currently not callable
# __call__ = lambda x, *a, **kw: get_client()(*a, **kw)
__nonzero__ = lambda x: bool(get_client())
__len__ = lambda x: len(get_client())
__getitem__ = lambda x, i: get_client()[i]
__iter__ = lambda x: iter(get_client())
__contains__ = lambda x, i: i in get_client()
__getslice__ = lambda x, i, j: get_client()[i:j]
__add__ = lambda x, o: get_client() + o
__sub__ = lambda x, o: get_client() - o
__mul__ = lambda x, o: get_client() * o
__floordiv__ = lambda x, o: get_client() // o
__mod__ = lambda x, o: get_client() % o
__divmod__ = lambda x, o: get_client().__divmod__(o)
__pow__ = lambda x, o: get_client() ** o
__lshift__ = lambda x, o: get_client() << o
__rshift__ = lambda x, o: get_client() >> o
__and__ = lambda x, o: get_client() & o
__xor__ = lambda x, o: get_client() ^ o
__or__ = lambda x, o: get_client() | o
__div__ = lambda x, o: get_client().__div__(o)
__truediv__ = lambda x, o: get_client().__truediv__(o)
__neg__ = lambda x: -(get_client())
__pos__ = lambda x: +(get_client())
__abs__ = lambda x: abs(get_client())
__invert__ = lambda x: ~(get_client())
__complex__ = lambda x: complex(get_client())
__int__ = lambda x: int(get_client())
if not six.PY3:
__long__ = lambda x: long(get_client()) # NOQA
__float__ = lambda x: float(get_client())
__str__ = lambda x: six.binary_type(get_client())
__unicode__ = lambda x: six.text_type(get_client())
__oct__ = lambda x: oct(get_client())
__hex__ = lambda x: hex(get_client())
__index__ = lambda x: get_client().__index__()
__coerce__ = lambda x, o: x.__coerce__(x, o)
__enter__ = lambda x: x.__enter__()
__exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
client = ProxyClient()
def get_option(x, d=None):
options = getattr(django_settings, 'RAVEN_CONFIG', {})
return getattr(django_settings, 'SENTRY_%s' % x, options.get(x, d))
def get_client(client=None):
global _client
tmp_client = client is not None
if not tmp_client:
client = getattr(django_settings, 'SENTRY_CLIENT', 'raven.contrib.django.DjangoClient')
if _client[0] != client:
module, class_name = client.rsplit('.', 1)
ga = lambda x, d=None: getattr(django_settings, 'SENTRY_%s' % x, d)
options = copy.deepcopy(getattr(django_settings, 'RAVEN_CONFIG', {}))
options.setdefault('servers', ga('SERVERS'))
options.setdefault('include_paths', ga('INCLUDE_PATHS', []))
options['include_paths'] = set(options['include_paths']) | get_installed_apps()
options.setdefault('exclude_paths', ga('EXCLUDE_PATHS'))
options.setdefault('timeout', ga('TIMEOUT'))
options.setdefault('name', ga('NAME'))
options.setdefault('auto_log_stacks', ga('AUTO_LOG_STACKS'))
options.setdefault('key', ga('KEY', md5(django_settings.SECRET_KEY.encode('utf8')).hexdigest()))
options.setdefault('string_max_length', ga('MAX_LENGTH_STRING'))
options.setdefault('list_max_length', ga('MAX_LENGTH_LIST'))
options.setdefault('site', ga('SITE'))
options.setdefault('public_key', ga('PUBLIC_KEY'))
options.setdefault('secret_key', ga('SECRET_KEY'))
options.setdefault('project', ga('PROJECT'))
options.setdefault('processors', ga('PROCESSORS'))
options.setdefault('dsn', ga('DSN'))
options.setdefault('context', ga('CONTEXT'))
options.setdefault('release', ga('RELEASE'))
class_name = str(class_name)
try:
Client = getattr(__import__(module, {}, {}, class_name), class_name)
except ImportError:
logger.exception('Failed to import client: %s', client)
if not _client[1]:
# If there is no previous client, set the default one.
client = 'raven.contrib.django.DjangoClient'
_client = (client, get_client(client))
else:
instance = Client(**options)
if not tmp_client:
_client = (client, instance)
return instance
return _client[1]
def sentry_exception_handler(request=None, **kwargs):
exc_type = sys.exc_info()[0]
exclusions = set(get_option('IGNORE_EXCEPTIONS', ()))
exc_name = '%s.%s' % (exc_type.__module__, exc_type.__name__)
if exc_type.__name__ in exclusions or exc_name in exclusions or any(exc_name.startswith(e[:-1]) for e in exclusions if e.endswith('*')):
logger.info(
'Not capturing exception due to filters: %s', exc_type,
exc_info=sys.exc_info())
return
try:
client.captureException(exc_info=sys.exc_info(), request=request)
except Exception as exc:
try:
logger.exception('Unable to process log entry: %s' % (exc,))
except Exception as exc:
warnings.warn('Unable to process log entry: %s' % (exc,))
def register_handlers():
from django.core.signals import got_request_exception
# HACK: support Sentry's internal communication
if 'sentry' in django_settings.INSTALLED_APPS:
from django.db import transaction
# Django 1.6
if hasattr(transaction, 'atomic'):
commit_on_success = transaction.atomic
else:
commit_on_success = transaction.commit_on_success
@commit_on_success
def wrap_sentry(request, **kwargs):
if transaction.is_dirty():
transaction.rollback()
return sentry_exception_handler(request, **kwargs)
exception_handler = wrap_sentry
else:
exception_handler = sentry_exception_handler
# Connect to Django's internal signal handler
got_request_exception.connect(exception_handler, weak=False)
# If Celery is installed, register a signal handler
if 'djcelery' in django_settings.INSTALLED_APPS:
try:
# Celery < 2.5? is not supported
from raven.contrib.celery import (
register_signal, register_logger_signal)
except ImportError:
logger.exception('Failed to install Celery error handler')
else:
try:
register_signal(client)
except Exception:
logger.exception('Failed to install Celery error handler')
try:
register_logger_signal(client)
except Exception:
logger.exception('Failed to install Celery error handler')
def register_serializers():
# force import so serializers can call register
import raven.contrib.django.serializers # NOQA
if ('raven.contrib.django' in django_settings.INSTALLED_APPS
or 'raven.contrib.django.raven_compat' in django_settings.INSTALLED_APPS):
register_handlers()
register_serializers()
|
the-stack_106_14508
|
# Author: Hector Ta
# import the necessary package
from __future__ import print_function
import cv2
import imutils
import argparse
from imutil import FPS
from imutil import WebCamVideoStream
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--num-frames", type=int, default=100,
help="# of frames to loop over FPS test")
ap.add_argument("-d", "--display", type=int, default=-1,
help="whether or not frames should be displayed")
args = vars(ap.parse_args())
# grab a pointer to the video stream and initialize the FPS counter
print ("[INFO] sampling frames from webcam")
stream = WebCamVideoStream(src=0).start()
fps = FPS().start()
# loop over some frames
while fps._numFrames < args["num_frames"]:
# grab the frame from the stream and resize it to have a maximum
# width of 400 pixels
frame = stream.read()
frame = imutils.resize(frame, width=400)
# check to see if the frame should be displayed on screen
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xff
# update the fps counter
fps.update()
# stop the timer and display the information
fps.stop()
print ("[INFO] elapsed time : {:.2f}".format(fps.elapsed()))
print ("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
stream.stop()
cv2.destroyAllWindows()
|
the-stack_106_14509
|
#!/usr/bin/env python3
import matplotlib as mpl
import astropy.constants as constants
import numpy as np
import scipy.interpolate
import numpy.fft
import scipy.io
mpl.use('agg')
import matplotlib.pyplot as plt
import numpy
from astropy.wcs import WCS
from rascil.data_models.polarisation import PolarisationFrame
from processing_library.image.operations import create_image_from_array
from rascil.processing_components.image.operations import export_image_to_fits
def create_rascil_image(pol_planes, cellsize, frequency, channel_bandwidth=1e6, shift_peak=False):
ny, nx = pol_planes[0].shape
assert len(pol_planes) == 4
w = WCS(naxis=4)
# The negation in the longitude is needed by definition of RA, DEC
w.wcs.cdelt = [-cellsize, cellsize, -1.0, channel_bandwidth]
w.wcs.crpix = [nx // 2, ny // 2, 1.0, 1.0]
w.wcs.ctype = ['AZELGEO long', 'AZELGEO lati', 'STOKES', 'FREQ']
w.wcs.crval = [0.0, 0.0, -5.0, frequency]
w.wcs.cunit = ['deg', 'deg', '', 'Hz']
w.naxis = 4
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
beam_out = numpy.zeros([1, 4, ny, nx], dtype=complex)
for pol in range(4):
beam_out[0, pol, ...] = numpy.transpose(pol_planes[pol])
# The following transforms are guessed to provide realistic looking beams
# 1. Renormalise
beam_out /= numpy.max(numpy.abs(beam_out))
# 2. Remove phase error in image plane
beam_out *= numpy.conjugate(beam_out[0, 0, ny // 2, nx // 2])
# 3. Remove phase gradient in image plane
dy = numpy.mod(numpy.angle(beam_out[0, 0, ny//2 + 1, nx // 2]) -
numpy.angle(beam_out[0, 0, ny//2 - 1, nx // 2]), numpy.pi) / 2.0
wave = constants.c.value/frequency
print(dy * wave / cellsize)
rotator = numpy.exp(-1.0j * dy * (numpy.arange(ny) - ny / 2.0))
beam_out *= rotator[numpy.newaxis, numpy.newaxis, :, numpy.newaxis]
for pol in [1, 3]:
beam_out[:, pol, ...] = -1.0 * beam_out[:, pol, ...]
# FFT interpolate to get peak on nx//2, ny//2
if shift_peak:
power_beam = beam_out * numpy.conjugate(beam_out)
peak_ind = numpy.argmax(power_beam)
peak_loc = numpy.unravel_index(peak_ind, power_beam.shape)
shifty = peak_loc[2]-ny//2+1
print("Shift in y is", shifty)
beam_out = numpy.roll(beam_out, -shifty, axis=2)
power_beam = beam_out * numpy.conjugate(beam_out)
peak_ind = numpy.argmax(power_beam)
peak_loc = numpy.unravel_index(peak_ind, power_beam.shape)
shifty = peak_loc[2]-ny//2+1
assert shifty == 0
vp_real = create_image_from_array(beam_out.real, w, polarisation_frame=PolarisationFrame("linear"))
vp_imag = create_image_from_array(beam_out.imag, w, polarisation_frame=PolarisationFrame("linear"))
vp_amp = create_image_from_array(numpy.abs(beam_out), w, polarisation_frame=PolarisationFrame("linear"))
vp_phase = create_image_from_array(numpy.angle(beam_out), w, polarisation_frame=PolarisationFrame("linear"))
return vp_real, vp_imag, vp_amp, vp_phase
def interpolate_beam(th, ph, beam_inp, n, extent):
nx, ny = n
xmin, xmax, ymin, ymax = extent
# Set x and y values of output grid.
x = np.linspace(xmin, xmax, nx)
y = np.linspace(ymin, ymax, ny)
# Make x and y into 2D grids and flatten into 1D arrays.
x2, y2 = np.meshgrid(x, y)
x2.shape = (-1,)
y2.shape = (-1,)
# Get (th, ph) values of (x, y) grid points assuming
#
# x = th * cos(ph)
# y = th * sin(ph)
thi = np.sqrt(x2 ** 2 + y2 ** 2)
phi = np.mod(np.degrees(np.arctan2(y2, x2)), 360.0)
xi = np.stack((thi, phi), axis=1)
# Interpolate real and imaginary parts separately then combine.
tmp_real = scipy.interpolate.interpn((th, ph), beam_inp.real, xi, method="splinef2d")
tmp_imag = scipy.interpolate.interpn((th, ph), beam_inp.imag, xi, method="splinef2d")
beam_out = tmp_real + 1j * tmp_imag
# Reshape output into 2D image.
beam_out.shape = (nx, ny)
return beam_out
def plot_beam(beam, title, extent):
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
fig.suptitle(title)
ax = axes[0, 0]
ax.set_title('real')
ax.set_ylabel('y / deg')
im = ax.imshow(beam.real, extent=extent, origin='lower')
fig.colorbar(im, ax=ax)
ax = axes[0, 1]
ax.set_title('imag')
im = ax.imshow(beam.imag, extent=extent, origin='lower')
fig.colorbar(im, ax=ax)
ax = axes[1, 0]
ax.set_xlabel('x / deg')
ax.set_ylabel('y / deg')
ax.set_title('amplitude')
im = ax.imshow(np.abs(beam), extent=extent, origin='lower')
fig.colorbar(im, ax=ax)
ax = axes[1, 1]
ax.set_title('phase')
ax.set_xlabel('x / deg')
im = ax.imshow(np.angle(beam, deg=True), extent=extent, origin='lower')
fig.colorbar(im, ax=ax)
iform = '{b}_{e}_{f}.mat'
oform = '{b}_{e}_{f:04d}_{j}.png'
tform = 'band = {b}, elev = {e} deg, freq = {f} MHz, jones = {j}'
fitsform = '{b}_{e}_{f:04d}_{t}.fits'
# n = nx, ny
# extent = xmin, xmax, ymin, ymax
n = 1024, 1024
extent = -4.0, 4.0, -4.0, 4.0
cellsize = 8 / n[0]
# band = [('B1', [365, 415, 465, 515, 565, 615, 665, 715, 765, 815, 865,
# 915, 965, 1015, 1050]),
# ('B2', [965, 1000, 1060, 110, 1160,
# 1220, 1252, 1310, 1360, 1410, 1460, 1510, 1610, 1660, 1710,
# 1760]),
# ('Ku', [11452, 11697, 11699, 11700, 12179, 12251, '12501_5'])]
band = [('B2', [965, 1000, 1060, 1100, 1160, 1220, 1252, 1310, 1360,
1410, 1460, 1510, 1610, 1660, 1710, 1760])]
#band = [('B2', [1360])]
elev = [15, 45, 90]
jones = ['Jpv', 'Jqh', 'Jph', 'Jqv']
for b, freq in band:
for e in elev:
for f in freq:
print(b, e, f)
ifile = iform.format(b=b, e=e, f=f)
data = scipy.io.loadmat(ifile)
ph = data['ph'].squeeze()
th = data['th'].squeeze()
pol_planes = list()
for j in jones:
print(b, e, f, j)
ofile = oform.format(b=b, e=e, f=f, j=j)
title = tform.format(b=b, e=e, f=f, j=j)
beam_inp = data[j]
beam_out = interpolate_beam(th, ph, beam_inp, n, extent)
pol_planes.append(beam_out)
plot_beam(beam_out, title, extent)
plt.savefig(ofile)
plt.close()
vp_real, vp_imag, vp_amp, vp_phase = create_rascil_image(pol_planes, cellsize, f*1e6, shift_peak=True)
export_image_to_fits(vp_real, fitsform.format(b=b, e=e, f=f, t='real'))
export_image_to_fits(vp_imag, fitsform.format(b=b, e=e, f=f, t='imag'))
# export_image_to_fits(vp_amp, fitsform.format(b=b, e=e, f=f, t='amp'))
# export_image_to_fits(vp_phase, fitsform.format(b=b, e=e, f=f, t='phase'))
|
the-stack_106_14510
|
try:
from setuptools import setup
from setuptools import find_packages
packages = find_packages()
except ImportError:
from distutils.core import setup
import os
packages = [x.strip('./').replace('/','.') for x in os.popen('find -name "__init__.py" | xargs -n1 dirname').read().strip().split('\n')]
if bytes is str:
raise Exception("This module is designed for python 3 only.")
setup(
name='archr',
version='9.0.gitrolling',
python_requires='>=3.6',
packages=packages,
package_data = {
'archr': ['implants/*.sh', 'implants/*/*']
},
install_requires=[
'shellphish_qemu==0.11.0',
'pygdbmi',
'docker',
'nclib>=1.0.0rc3',
'patchelf-wrapper',
'cle==9.0.gitrolling'
],
extras_require = {
'angr': ["angr==9.0.gitrolling"],
'qtrace': ["qtrace"],
},
description='Target-centric program analysis.',
url='https://github.com/angr/archr',
classifiers=["Operating System :: POSIX :: Linux"],
)
|
the-stack_106_14511
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
from itertools import chain
try:
# 3.8 and up
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from util import check_if_numbers_are_consecutive, count_not_none
import six
def _construct_key(previous_key, separator, new_key):
"""
Returns the new_key if no previous key exists, otherwise concatenates
previous key, separator, and new_key
:param previous_key:
:param separator:
:param new_key:
:return: a string if previous_key exists and simply passes through the
new_key otherwise
"""
if previous_key:
return u"{}{}{}".format(previous_key, separator, new_key)
else:
return new_key
def flatten(nested_dict, separator="_", root_keys_to_ignore=set(),
keys_to_ignore=None, keys_to_keep=None, filter_func=None):
"""
Flattens a dictionary with nested structure to a dictionary with no
hierarchy
Consider ignoring keys that you are not interested in to prevent
unnecessary processing
This is specially true for very deep objects
:param nested_dict: dictionary we want to flatten
:param separator: string to separate dictionary keys by
:param root_keys_to_ignore: set of root keys to ignore from flattening
:param keys_to_ignore: set of keys to ignore during flattening
:param keys_to_keep: set of keys to keep during flattening
:param filter_func: only keep keys for this function returns True
:return: flattened dictionary
"""
assert isinstance(nested_dict, dict), "flatten requires a dictionary input"
assert isinstance(separator, six.string_types), "separator must be string"
n_kws = count_not_none(keys_to_ignore, keys_to_keep, filter_func)
assert n_kws <= 1, "Arguments `keys_to_ignore`, `keys_to_keep`, or " \
"`filter_func` are mutually exclusive"
# This global dictionary stores the flattened keys and values and is
# ultimately returned
flattened_dict = dict()
if keys_to_ignore is not None:
assert isinstance(keys_to_ignore, set)
filter_func = lambda x: False if x in keys_to_ignore else True
elif keys_to_keep is not None:
assert isinstance(keys_to_keep, set)
filter_func = lambda x: True if x in keys_to_keep else False
if filter_func is None:
filter_func = lambda x: True
def _flatten(object_, key):
"""
For dict, list and set objects_ calls itself on the elements and for
other types assigns the object_ to
the corresponding key in the global flattened_dict
:param object_: object to flatten
:param key: carries the concatenated key for the object_
:return: None
"""
# Empty object can't be iterated, take as is
if not object_:
if not object_ and filter_func(key):
flattened_dict[key] = object_
# These object types support iteration
elif isinstance(object_, dict):
for object_key in object_:
if key or object_key not in root_keys_to_ignore:
_flatten(object_[object_key], _construct_key(key,
separator,
object_key))
elif isinstance(object_, list) or isinstance(object_, set):
for index, item in enumerate(object_):
_flatten(item, _construct_key(key, separator, index))
for index, item in enumerate(object_):
_flatten(item, _construct_key(key, separator, index))
# Anything left take as is
elif filter_func(key):
flattened_dict[key] = object_
_flatten(nested_dict, None)
return flattened_dict
def flatten_keys(nested_dict, separator="_"):
"""
"""
def _keys(object_, key):
if not object_:
return {key}
elif isinstance(object_, dict):
return set(chain(
*[_keys(object_[o_key], _construct_key(key, separator, o_key))
for o_key in object_]))
elif isinstance(object_, list) or isinstance(object_, set):
return set(chain(
*[_keys(item, _construct_key(key, separator, index))
for index, item in enumerate(object_)]))
else:
return {key}
return _keys(nested_dict, None)
flatten_json = flatten
def _unflatten_asserts(flat_dict, separator):
assert isinstance(flat_dict, dict), "un_flatten requires dictionary input"
assert isinstance(separator, six.string_types), "separator must be string"
assert all((not value or not isinstance(value, Iterable) or
isinstance(value, six.string_types)
for value in flat_dict.values())), "provided dict is not flat"
def unflatten(flat_dict, separator='_'):
"""
Creates a hierarchical dictionary from a flattened dictionary
Assumes no lists are present
:param flat_dict: a dictionary with no hierarchy
:param separator: a string that separates keys
:return: a dictionary with hierarchy
"""
_unflatten_asserts(flat_dict, separator)
# This global dictionary is mutated and returned
unflattened_dict = dict()
def _unflatten(dic, keys, value):
for key in keys[:-1]:
dic = dic.setdefault(key, {})
dic[keys[-1]] = value
for item in flat_dict:
_unflatten(unflattened_dict, item.split(separator), flat_dict[item])
return unflattened_dict
def unflatten_list(flat_dict, separator='_'):
"""
Unflattens a dictionary, first assuming no lists exist and then tries to
identify lists and replaces them
This is probably not very efficient and has not been tested extensively
Feel free to add test cases or rewrite the logic
Issues that stand out to me:
- Sorting all the keys in the dictionary, which specially for the root
dictionary can be a lot of keys
- Checking that numbers are consecutive is O(N) in number of keys
:param flat_dict: dictionary with no hierarchy
:param separator: a string that separates keys
:return: a dictionary with hierarchy
"""
_unflatten_asserts(flat_dict, separator)
# First unflatten the dictionary assuming no lists exist
unflattened_dict = unflatten(flat_dict, separator)
def _convert_dict_to_list(object_, parent_object, parent_object_key):
if isinstance(object_, dict):
for key in object_:
if isinstance(object_[key], dict):
_convert_dict_to_list(object_[key], object_, key)
try:
keys = [int(key) for key in object_]
keys.sort()
except (ValueError, TypeError):
keys = []
keys_len = len(keys)
if (keys_len > 0 and sum(keys) ==
int(((keys_len - 1) * keys_len) / 2) and keys[0] == 0 and
keys[-1] == keys_len - 1 and
check_if_numbers_are_consecutive(keys)):
# The dictionary looks like a list so we're going to replace it
parent_object[parent_object_key] = []
for key_index, key in enumerate(keys):
parent_object[parent_object_key].append(object_[str(key)])
# The list item we just added might be a list itself
# https://github.com/amirziai/flatten/issues/15
_convert_dict_to_list(parent_object[parent_object_key][-1],
parent_object[parent_object_key],
key_index)
_convert_dict_to_list(unflattened_dict, None, None)
return unflattened_dict
def cli(input_stream=sys.stdin, output_stream=sys.stdout):
raw = input_stream.read()
input_json = json.loads(raw)
output = json.dumps(flatten(input_json))
output_stream.write('{}\n'.format(output))
output_stream.flush()
if __name__ == '__main__':
cli()
|
the-stack_106_14512
|
import sys
sys.path.append('../')
from backtest.backtest import Backtest
from execution.execution import SimulatedExecution
from portfolio.portfolio import Portfolio
from settings import EQUITY
from strategy.strategy import TestStrategy, MovingAverageCrossStrategy
from data.price import HistoricCSVPriceHandler
if __name__ == '__main__':
# Trade on GBPUSD
pairs = ["GBPUSD"] # or ["GBPUSD","EURUSD"]
# Specify straregy to use
strategy = 'ma'
if strategy =='test':
strategy = TestStrategy
# Create the strategy parameters for the TestStrategy
strategy_params = {}
elif strategy =='ma':
strategy = MovingAverageCrossStrategy
# Create the strategy parameters for the MovingAverageCrossStrategy
strategy_params = {
"short_window": 500,
"long_window": 2000
}
# Create and execute the backtest
backtest = Backtest(
pairs, HistoricCSVPriceHandler,
strategy, strategy_params,
Portfolio, SimulatedExecution,
equity = EQUITY,
heartbeat= 0.0,
max_iters = 100000)
backtest.simualte_trading()
|
the-stack_106_14514
|
# -*- coding: utf-8 -*-
"""
Diese Datei sollte nicht verändert werden und wird von uns gestellt und zurückgesetzt.
Skript testet das vortrainierte Modell
@author: Maurice Rohr
"""
# import socket
# def guard(*args, **kwargs):
# raise Exception("Internet Access Forbidden")
# socket.socket = guard
from predict import predict_labels
from wettbewerb import load_references, save_predictions
import argparse
import time
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Predict given Model')
parser.add_argument('--test_dir', action='store', type=str,default='../test/')
args = parser.parse_args()
ecg_leads,ecg_labels,fs,ecg_names = load_references(args.test_dir) # Importiere EKG-Dateien, zugehörige Diagnose, Sampling-Frequenz (Hz) und Name # Sampling-Frequenz 300 Hz
start_time = time.time()
predictions = predict_labels(ecg_leads,fs,ecg_names,use_pretrained=True)
pred_time = time.time()-start_time
save_predictions(predictions) # speichert Prädiktion in CSV Datei
print("Runtime", pred_time, "s")
|
the-stack_106_14515
|
# @license
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import concurrent.futures
import json
import multiprocessing
import re
import socket
import threading
import weakref
import tornado.httpserver
import tornado.ioloop
import tornado.netutil
import tornado.web
import sockjs.tornado
from . import local_volume, static
from .json_utils import json_encoder_default
from .random_token import make_random_token
from .sockjs_handler import SOCKET_PATH_REGEX, SOCKET_PATH_REGEX_WITHOUT_GROUP, SockJSHandler
INFO_PATH_REGEX = r'^/neuroglancer/info/(?P<token>[^/]+)$'
DATA_PATH_REGEX = r'^/neuroglancer/(?P<data_format>[^/]+)/(?P<token>[^/]+)/(?P<scale_key>[^/]+)/(?P<start_x>[0-9]+),(?P<end_x>[0-9]+)/(?P<start_y>[0-9]+),(?P<end_y>[0-9]+)/(?P<start_z>[0-9]+),(?P<end_z>[0-9]+)$'
SKELETON_PATH_REGEX = r'^/neuroglancer/skeleton/(?P<key>[^/]+)/(?P<object_id>[0-9]+)$'
MESH_PATH_REGEX = r'^/neuroglancer/mesh/(?P<key>[^/]+)/(?P<object_id>[0-9]+)$'
STATIC_PATH_REGEX = r'^/v/(?P<viewer_token>[^/]+)/(?P<path>(?:[a-zA-Z0-9_\-][a-zA-Z0-9_\-.]*)?)$'
global_static_content_source = None
global_server_args = dict(bind_address='127.0.0.1', bind_port=0)
debug = False
class Server(object):
def __init__(self, ioloop, bind_address='127.0.0.1', bind_port=0):
self.viewers = weakref.WeakValueDictionary()
self.token = make_random_token()
self.executor = concurrent.futures.ThreadPoolExecutor(
max_workers=multiprocessing.cpu_count())
self.ioloop = ioloop
sockjs_router = sockjs.tornado.SockJSRouter(
SockJSHandler, SOCKET_PATH_REGEX_WITHOUT_GROUP, io_loop=ioloop)
sockjs_router.neuroglancer_server = self
def log_function(handler):
if debug:
print("%d %s %.2fs" % (handler.get_status(),
handler.request.uri, handler.request.request_time()))
app = self.app = tornado.web.Application(
[
(STATIC_PATH_REGEX, StaticPathHandler, dict(server=self)),
(INFO_PATH_REGEX, VolumeInfoHandler, dict(server=self)),
(DATA_PATH_REGEX, SubvolumeHandler, dict(server=self)),
(SKELETON_PATH_REGEX, SkeletonHandler, dict(server=self)),
(MESH_PATH_REGEX, MeshHandler, dict(server=self)),
] + sockjs_router.urls,
log_function=log_function,
# Set a large maximum message size to accommodate large screenshot
# messages.
websocket_max_message_size=100 * 1024 * 1024)
http_server = tornado.httpserver.HTTPServer(app)
sockets = tornado.netutil.bind_sockets(port=bind_port, address=bind_address)
http_server.add_sockets(sockets)
actual_port = sockets[0].getsockname()[1]
global global_static_content_source
if global_static_content_source is None:
global_static_content_source = static.get_default_static_content_source()
if bind_address == '0.0.0.0' or bind_address == '::':
hostname = socket.getfqdn()
else:
hostname = bind_address
self.server_url = 'http://%s:%s' % (hostname, actual_port)
def get_volume(self, key):
dot_index = key.find('.')
if dot_index == -1:
return None
viewer_token = key[:dot_index]
volume_token = key[dot_index+1:]
viewer = self.viewers.get(viewer_token)
if viewer is None:
return None
return viewer.volume_manager.volumes.get(volume_token)
class BaseRequestHandler(tornado.web.RequestHandler):
def initialize(self, server):
self.server = server
class StaticPathHandler(BaseRequestHandler):
def get(self, viewer_token, path):
if viewer_token != self.server.token and viewer_token not in self.server.viewers:
self.send_error(404)
return
try:
data, content_type = global_static_content_source.get(path)
except ValueError as e:
self.send_error(404, message=e.args[0])
return
self.set_header('Content-type', content_type)
self.finish(data)
class VolumeInfoHandler(BaseRequestHandler):
def get(self, token):
vol = self.server.get_volume(token)
if vol is None:
self.send_error(404)
return
self.finish(json.dumps(vol.info(), default=json_encoder_default).encode())
class SubvolumeHandler(BaseRequestHandler):
@tornado.web.asynchronous
def get(self, data_format, token, scale_key, start_x, end_x, start_y, end_y, start_z, end_z):
start = (int(start_x), int(start_y), int(start_z))
end = (int(end_x), int(end_y), int(end_z))
vol = self.server.get_volume(token)
if vol is None:
self.send_error(404)
return
def handle_subvolume_result(f):
try:
data, content_type = f.result()
except ValueError as e:
self.send_error(400, message=e.args[0])
return
self.set_header('Content-type', content_type)
self.finish(data)
self.server.executor.submit(
vol.get_encoded_subvolume,
data_format, start, end, scale_key=scale_key).add_done_callback(
lambda f: self.server.ioloop.add_callback(lambda: handle_subvolume_result(f)))
class MeshHandler(BaseRequestHandler):
@tornado.web.asynchronous
def get(self, key, object_id):
object_id = int(object_id)
vol = self.server.get_volume(key)
if vol is None:
self.send_error(404)
return
def handle_mesh_result(f):
try:
encoded_mesh = f.result()
except local_volume.MeshImplementationNotAvailable:
self.send_error(501, message='Mesh implementation not available')
return
except local_volume.MeshesNotSupportedForVolume:
self.send_error(405, message='Meshes not supported for volume')
return
except local_volume.InvalidObjectIdForMesh:
self.send_error(404, message='Mesh not available for specified object id')
return
except ValueError as e:
self.send_error(400, message=e.args[0])
return
self.set_header('Content-type', 'application/octet-stream')
self.finish(encoded_mesh)
self.server.executor.submit(vol.get_object_mesh, object_id).add_done_callback(
lambda f: self.server.ioloop.add_callback(lambda: handle_mesh_result(f)))
class SkeletonHandler(BaseRequestHandler):
@tornado.web.asynchronous
def get(self, key, object_id):
object_id = int(object_id)
vol = self.server.get_volume(key)
if vol is None:
self.send_error(404)
if vol.skeletons is None:
self.send_error(405, message='Skeletons not supported for volume')
return
def handle_result(f):
try:
encoded_skeleton = f.result()
except:
self.send_error(500, message=e.args[0])
return
if encoded_skeleton is None:
self.send_error(404, message='Skeleton not available for specified object id')
return
self.set_header('Content-type', 'application/octet-stream')
self.finish(encoded_skeleton)
def get_encoded_skeleton(skeletons, object_id):
skeleton = skeletons.get_skeleton(object_id)
if skeleton is None:
return None
return skeleton.encode(skeletons)
self.server.executor.submit(
get_encoded_skeleton, vol.skeletons, object_id).add_done_callback(
lambda f: self.server.ioloop.add_callback(lambda: handle_result(f)))
global_server = None
def set_static_content_source(*args, **kwargs):
global global_static_content_source
global_static_content_source = static.get_static_content_source(*args, **kwargs)
def set_server_bind_address(bind_address='127.0.0.1', bind_port=0):
global global_server_args
global_server_args = dict(bind_address=bind_address, bind_port=bind_port)
def is_server_running():
return global_server is not None
def stop():
"""Stop the server, invalidating any viewer URLs.
This allows any previously-referenced data arrays to be garbage collected if there are no other
references to them.
"""
global global_server
if global_server is not None:
ioloop = global_server.ioloop
def stop_ioloop():
ioloop.stop()
ioloop.close()
global_server.ioloop.add_callback(stop_ioloop)
global_server = None
def get_server_url():
return global_server.server_url
_global_server_lock = threading.Lock()
def start():
global global_server
with _global_server_lock:
if global_server is not None: return
done = threading.Event()
def start_server():
global global_server
ioloop = tornado.ioloop.IOLoop(make_current=True)
global_server = Server(ioloop=ioloop, **global_server_args)
done.set()
ioloop.start()
thread = threading.Thread(target=start_server)
thread.daemon = True
thread.start()
done.wait()
def register_viewer(viewer):
start()
global_server.viewers[viewer.token] = viewer
def defer_callback(callback, *args, **kwargs):
"""Register `callback` to run in the server event loop thread."""
start()
global_server.ioloop.add_callback(lambda: callback(*args, **kwargs))
|
the-stack_106_14517
|
import cv2
import numpy as np
from PIL import Image
from torch.utils.data.dataset import Dataset
from utils.utils import merge_bboxes
import xml.etree.ElementTree as ET
class YoloDataset(Dataset):
def __init__(self, train_lines, image_size, mosaic=True, is_train=True):
super(YoloDataset, self).__init__()
self.train_lines = train_lines
self.train_batches = len(train_lines)
self.image_size = image_size
self.mosaic = mosaic
self.flag = True
self.is_train = is_train
def __len__(self):
return self.train_batches
def rand(self, a=0, b=1):
return np.random.rand() * (b - a) + a
def get_random_data(self, annotation_line, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5, random=True):
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])
if not random:
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image, np.float32)
box_data = np.zeros((len(box), 5))
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)]
box_data = np.zeros((len(box), 5))
box_data[:len(box)] = box
return image_data, box_data
new_ar = w / h * self.rand(1 - jitter, 1 + jitter) / self.rand(1 - jitter, 1 + jitter)
scale = self.rand(.25, 2)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
dx = int(self.rand(0, w - nw))
dy = int(self.rand(0, h - nh))
new_image = Image.new('RGB', (w, h),
(np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)))
new_image.paste(image, (dx, dy))
image = new_image
flip = self.rand() < .5
if flip:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
hue = self.rand(-hue, hue)
sat = self.rand(1, sat) if self.rand() < .5 else 1 / self.rand(1, sat)
val = self.rand(1, val) if self.rand() < .5 else 1 / self.rand(1, val)
x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue*360
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:,:, 0]>360, 0] = 360
x[:, :, 1:][x[:, :, 1:]>1] = 1
x[x<0] = 0
image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255
box_data = np.zeros((len(box), 5))
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
if flip:
box[:, [0, 2]] = w - box[:, [2, 0]]
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)]
box_data = np.zeros((len(box), 5))
box_data[:len(box)] = box
return image_data, box_data
def get_random_data_with_Mosaic(self, annotation_line, input_shape, hue=.1, sat=1.5, val=1.5):
h, w = input_shape
min_offset_x = 0.3
min_offset_y = 0.3
scale_low = 1 - min(min_offset_x, min_offset_y)
scale_high = scale_low + 0.2
image_datas = []
box_datas = []
index = 0
place_x = [0, 0, int(w * min_offset_x), int(w * min_offset_x)]
place_y = [0, int(h * min_offset_y), int(h * min_offset_y), 0]
for line in annotation_line:
line_content = line.split()
image = Image.open(line_content[0])
image = image.convert("RGB")
iw, ih = image.size
box = np.array([np.array(list(map(int, box.split(',')))) for box in line_content[1:]])
flip = self.rand() < .5
if flip and len(box) > 0:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
box[:, [0, 2]] = iw - box[:, [2, 0]]
new_ar = w / h
scale = self.rand(scale_low, scale_high)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
hue = self.rand(-hue, hue)
sat = self.rand(1, sat) if self.rand() < .5 else 1 / self.rand(1, sat)
val = self.rand(1, val) if self.rand() < .5 else 1 / self.rand(1, val)
x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue*360
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:,:, 0]>360, 0] = 360
x[:, :, 1:][x[:, :, 1:]>1] = 1
x[x<0] = 0
image = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) # numpy array, 0 to 1
image = Image.fromarray((image * 255).astype(np.uint8))
dx = place_x[index]
dy = place_y[index]
new_image = Image.new('RGB', (w, h),
(np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)
index = index + 1
box_data = []
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)]
box_data = np.zeros((len(box), 5))
box_data[:len(box)] = box
image_datas.append(image_data)
box_datas.append(box_data)
cutx = np.random.randint(int(w * min_offset_x), int(w * (1 - min_offset_x)))
cuty = np.random.randint(int(h * min_offset_y), int(h * (1 - min_offset_y)))
new_image = np.zeros([h, w, 3])
new_image[:cuty, :cutx, :] = image_datas[0][:cuty, :cutx, :]
new_image[cuty:, :cutx, :] = image_datas[1][cuty:, :cutx, :]
new_image[cuty:, cutx:, :] = image_datas[2][cuty:, cutx:, :]
new_image[:cuty, cutx:, :] = image_datas[3][:cuty, cutx:, :]
new_boxes = np.array(merge_bboxes(box_datas, cutx, cuty))
return new_image, new_boxes
def __getitem__(self, index):
lines = self.train_lines
n = self.train_batches
index = index % n
if self.mosaic:
if self.flag and (index + 4) < n:
img, y = self.get_random_data_with_Mosaic(lines[index:index + 4], self.image_size[0:2])
else:
img, y = self.get_random_data(lines[index], self.image_size[0:2], random=self.is_train)
self.flag = bool(1-self.flag)
else:
img, y = self.get_random_data(lines[index], self.image_size[0:2], random=self.is_train)
if len(y) != 0:
boxes = np.array(y[:, :4], dtype=np.float32)
boxes[:, 0] = boxes[:, 0] / self.image_size[1]
boxes[:, 1] = boxes[:, 1] / self.image_size[0]
boxes[:, 2] = boxes[:, 2] / self.image_size[1]
boxes[:, 3] = boxes[:, 3] / self.image_size[0]
boxes = np.maximum(np.minimum(boxes, 1), 0)
boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
boxes[:, 0] = boxes[:, 0] + boxes[:, 2] / 2
boxes[:, 1] = boxes[:, 1] + boxes[:, 3] / 2
y = np.concatenate([boxes, y[:, -1:]], axis=-1)
img = np.array(img, dtype=np.float32)
tmp_inp = np.transpose(img / 255.0, (2, 0, 1))
tmp_targets = np.array(y, dtype=np.float32)
return tmp_inp, tmp_targets
def yolo_dataset_collate(batch):
images = []
bboxes = []
for img, box in batch:
images.append(img)
bboxes.append(box)
images = np.array(images)
return images, bboxes
def convert_annotation(year, image_id, list_file,classes):
in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, image_id), encoding='utf-8')
tree=ET.parse(in_file)
root = tree.getroot()
for obj in root.iter('object'):
difficult = 0
if obj.find('difficult')!=None:
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult)==1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (int(float(xmlbox.find('xmin').text)), int(float(xmlbox.find('ymin').text)), int(float(xmlbox.find('xmax').text)), int(float(xmlbox.find('ymax').text)))
list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))
def write_classes(sets,classes,wd):
for year, image_set in sets:
image_ids = open('VOCdevkit/VOC%s/ImageSets/Main/%s.txt'%(year, image_set), encoding='utf-8').read().strip().split()
list_file = open('%s_%s.txt'%(year, image_set), 'w', encoding='utf-8')
for image_id in image_ids:
list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg'%(wd, year, image_id))
convert_annotation(year, image_id, list_file ,classes)
list_file.write('\n')
list_file.close()
|
the-stack_106_14518
|
#!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'calypso_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
the-stack_106_14520
|
#!/usr/bin/python
######################################################################
# Copyright 2005, Andrew Selle.
# This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
#######################################################################
# Sample sim wrapper script
#######################################################################
import time
import sys
import os
import re
import socket
import MON_WRAPPER_ENGINE
#######################################################################
# Allocate wrapper script
#######################################################################
cmd=sys.argv[1:]
wrapper=MON_WRAPPER_ENGINE.MON_WRAPPER(cmd)
wrapper.poll_interval=30 # poll/report every 5 seconds
#######################################################################
# Clean up the variables that MON_WRAPPER sets
#######################################################################
def Report_Start(pid,rpc,session):
rpc.Remove_Status_If_Exists(session,"memory")
rpc.Remove_Status_If_Exists(session,"last_frame")
rpc.Remove_Status_If_Exists(session,"last_frame_time")
rpc.Remove_Status_If_Exists(session,"last_frame_duration")
rpc.Update_Status(session,"sge_host",socket.gethostname())
if os.environ.has_key("JOB_ID"): rpc.Update_Status(session,"sge_jobid",os.environ['JOB_ID'])
# Register with the wrapper class
wrapper.start_callback=Report_Start
#######################################################################
# Register Memory Reporting
#######################################################################
# this will be called every time the reporting thread wakes up
max_memory=0
def Report_Memory(pid,rpc,session):
def Memory_Usage(pid):
scale = {'kB':1/1024.0,'mB': 1,'KB':1/1024.0,'MB':1}
try:
fp=open("/proc/%d/status"%pid)
while 1:
i=fp.readline()
if i=="":break
elif i.startswith("VmSize:"):
label,size,unit=i.split(None,3)
return float(size)*scale[unit]
except: return -1
usage=Memory_Usage(pid)
if usage>0:
global max_memory
max_memory=max(max_memory,usage)
rpc.Update_Status(session,"memory",usage)
rpc.Update_Status(session,"max_memory",max_memory)
# Register with the wrapper class
wrapper.polled_events.append(Report_Memory)
mon_regexp=re.compile("MONITOR[ ]+(.+)[ ]*=[ ]*(.+)")
whitespace_comma=re.compile("\s*,\s*")
def Match_Mon(mon_match):
key=mon_match.group(1)
value=mon_match.group(2)
return key,value
def Report_Mon(pid,rpc,session,data):
key,value=data
if key=="end_frame" or key=="begin_frame" or key=="output_directory" or key=="glkeys":
if key=="begin_frame": value=int(value)
if key=="end_frame": value=int(value)
rpc.Update_Status(session,key,value)
if key=="gl":
value=whitespace_comma.split(value)
rpc.Update_Status(session,key,value)
wrapper.simulation_events.append((mon_regexp,Match_Mon,Report_Mon))
#######################################################################
# Register Frame Reporting
#######################################################################
# this is applied to each line of input read from the simulation stdout
frame_regexp=re.compile("END Frame (\d+)[ ]+(\d+\.\d+) s")
# this is called with the match structure of applying the regular expression
def Match_Frame(frame_match):
frame=int(frame_match.group(1))
frametime=float(frame_match.group(2))
return (frame,frametime) # pack and return data
def Report_Frame(pid,rpc,session,data):
frame,frametime=data # unpack data
# send the info to the server
rpc.Update_Status(session,"last_frame",frame)
rpc.Update_Status(session,"last_frame_duration",frametime)
rpc.Update_Status(session,"last_frame_time",time.time())
# Register with the wrapper class
wrapper.simulation_events.append((frame_regexp,Match_Frame,Report_Frame))
#######################################################################
# Run the script
#######################################################################
wrapper.run()
|
the-stack_106_14521
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Operation(Model):
"""The Connected cluster API operation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: Operation name: {Microsoft.Kubernetes}/{resource}/{operation}
:vartype name: str
:ivar display: The object that represents the operation.
:vartype display:
~azure.mgmt.hybridkubernetes.v2020_01_01_preview.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
'display': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(self, **kwargs) -> None:
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = None
|
the-stack_106_14523
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
from dace.memlet import Memlet
import dace.libraries.blas as blas
import dace.libraries.lapack as lapack
import numpy as np
import pytest
###############################################################################
def make_sdfg(implementation, dtype, storage=dace.StorageType.Default):
n = dace.symbol("n")
suffix = "_device" if storage != dace.StorageType.Default else ""
transient = storage != dace.StorageType.Default
sdfg = dace.SDFG("matrix_lufact_getrf_{}_{}".format(implementation, str(dtype)))
state = sdfg.add_state("dataflow")
xhost_arr = sdfg.add_array("x", [n, n], dtype, storage=dace.StorageType.Default)
if transient:
x_arr = sdfg.add_array("x" + suffix, [n, n], dtype, storage=storage, transient=transient)
xt_arr = sdfg.add_array('xt' + suffix, [n, n], dtype, storage=storage, transient=transient)
sdfg.add_array("pivots" + suffix, [n], dace.dtypes.int32, storage=storage, transient=transient)
sdfg.add_array("result" + suffix, [1], dace.dtypes.int32, storage=storage, transient=transient)
if transient:
xhi = state.add_read("x")
xho = state.add_write("x")
xi = state.add_access("x" + suffix)
xo = state.add_access("x" + suffix)
xin = state.add_access("xt" + suffix)
xout = state.add_access("xt" + suffix)
transpose_in = blas.nodes.transpose.Transpose("transpose_in", dtype=dtype)
transpose_in.implementation = "cuBLAS"
transpose_out = blas.nodes.transpose.Transpose("transpose_out", dtype=dtype)
transpose_out.implementation = "cuBLAS"
state.add_nedge(xhi, xi, Memlet.from_array(*xhost_arr))
state.add_nedge(xo, xho, Memlet.from_array(*xhost_arr))
state.add_memlet_path(xi, transpose_in, dst_conn='_inp', memlet=Memlet.from_array(*x_arr))
state.add_memlet_path(transpose_in, xin, src_conn='_out', memlet=Memlet.from_array(*xt_arr))
state.add_memlet_path(xout, transpose_out, dst_conn='_inp', memlet=Memlet.from_array(*xt_arr))
state.add_memlet_path(transpose_out, xo, src_conn='_out', memlet=Memlet.from_array(*x_arr))
else:
xin = state.add_access("x" + suffix)
xout = state.add_access("x" + suffix)
pivots = state.add_access("pivots" + suffix)
result = state.add_access("result" + suffix)
getrf_node = lapack.nodes.getrf.Getrf("getrf")
getrf_node.implementation = implementation
state.add_memlet_path(xin, getrf_node, dst_conn="_xin", memlet=Memlet.simple(xin, "0:n, 0:n", num_accesses=n * n))
state.add_memlet_path(getrf_node, result, src_conn="_res", memlet=Memlet.simple(result, "0", num_accesses=1))
state.add_memlet_path(getrf_node, pivots, src_conn="_ipiv", memlet=Memlet.simple(pivots, "0:n", num_accesses=n))
state.add_memlet_path(getrf_node,
xout,
src_conn="_xout",
memlet=Memlet.simple(xout, "0:n, 0:n", num_accesses=n * n))
return sdfg
###############################################################################
@pytest.mark.parametrize("implementation, dtype, storage", [
pytest.param("MKL", dace.float32, dace.StorageType.Default, marks=pytest.mark.mkl),
pytest.param("MKL", dace.float64, dace.StorageType.Default, marks=pytest.mark.mkl),
pytest.param("OpenBLAS", dace.float32, dace.StorageType.Default, marks=pytest.mark.lapack),
pytest.param("OpenBLAS", dace.float64, dace.StorageType.Default, marks=pytest.mark.lapack),
pytest.param("cuSolverDn", dace.float32, dace.StorageType.GPU_Global, marks=pytest.mark.gpu),
pytest.param("cuSolverDn", dace.float64, dace.StorageType.GPU_Global, marks=pytest.mark.gpu),
])
def test_getrf(implementation, dtype, storage):
sdfg = make_sdfg(implementation, dtype, storage)
getrf_sdfg = sdfg.compile()
np_dtype = getattr(np, dtype.to_string())
from scipy.linalg import lu_factor
size = 4
lapack_status = np.array([-1], dtype=np.int32)
A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]], dtype=np_dtype)
lu_ref, _ = lu_factor(A)
pivots = np.ndarray([0, 0, 0, 0], dtype=np.int32)
# the x is input AND output, the "result" argument gives the lapack status!
getrf_sdfg(x=A, result=lapack_status, pivots=pivots, n=size)
if np.allclose(A, lu_ref):
print("Test ran successfully for {}.".format(implementation))
else:
raise ValueError("Validation error!")
###############################################################################
if __name__ == "__main__":
test_getrf("MKL", dace.float32)
test_getrf("MKL", dace.float64)
test_getrf("cuSolverDn", dace.float32, dace.StorageType.GPU_Global)
test_getrf("cuSolverDn", dace.float64, dace.StorageType.GPU_Global)
###############################################################################
|
the-stack_106_14528
|
# flake8: noqa
import numpy
import numpy as np
from skimage.data import camera
from skimage.metrics import peak_signal_noise_ratio as psnr
from skimage.metrics import structural_similarity as ssim
from aydin.io.datasets import (
normalise,
add_noise,
dots,
lizard,
pollen,
characters,
newyork,
)
from aydin.it.classic_denoisers.gm import calibrate_denoise_gm
from aydin.util.log.log import Log
def demo_gm(image, display=True):
"""
Demo for self-supervised denoising using camera image with synthetic noise
"""
Log.enable_output = True
Log.set_log_max_depth(5)
image = normalise(image.astype(np.float32))
noisy = add_noise(image)
function, parameters, memreq = calibrate_denoise_gm(noisy)
denoised = function(noisy, **parameters)
image = numpy.clip(image, 0, 1)
noisy = numpy.clip(noisy, 0, 1)
denoised = numpy.clip(denoised, 0, 1)
psnr_noisy = psnr(image, noisy)
ssim_noisy = ssim(image, noisy)
psnr_denoised = psnr(image, denoised)
ssim_denoised = ssim(image, denoised)
print(" noisy :", psnr_noisy, ssim_noisy)
print("gm denoised:", psnr_denoised, ssim_denoised)
if display:
import napari
viewer = napari.Viewer()
viewer.add_image(image, name='image')
viewer.add_image(noisy, name='noisy')
viewer.add_image(denoised, name='denoised')
napari.run()
return ssim_denoised
if __name__ == "__main__":
newyork_image = newyork()
demo_gm(newyork_image)
characters_image = characters()
demo_gm(characters_image)
pollen_image = pollen()
demo_gm(pollen_image)
lizard_image = lizard()
demo_gm(lizard_image)
dots_image = dots()
demo_gm(dots_image)
camera_image = camera()
demo_gm(camera_image)
|
the-stack_106_14529
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Convolution2D
from keras.layers.convolutional import MaxPooling2D
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from keras import backend as K
# Read training and test data files
train = pd.read_csv("D:\Machine Learning\My first nn\mnist_train.csv").values
test = pd.read_csv("D:\Machine Learning\My first nn\mnist_test.csv").values
# Reshape and normalize training data
trainX = train[:, 1:].reshape(train.shape[0],1,28, 28).astype( 'float32' )
X_train = trainX / 255.0
y_train = train[:,0]
# Reshape and normalize test data
testX = test[:,1:].reshape(test.shape[0],1, 28, 28).astype( 'float32' )
X_test = testX / 255.0
y_test = test[:,0]
from sklearn import preprocessing
lb = preprocessing.LabelBinarizer()
y_train = lb.fit_transform(y_train)
y_test = lb.fit_transform(y_test)
model = Sequential()
K.set_image_dim_ordering('th')
model.add(Convolution2D(30, 5, 5, border_mode= 'valid' , input_shape=(1, 28, 28),activation= 'relu' ))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(15, 3, 3, activation= 'relu' ))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation= 'relu' ))
model.add(Dense(50, activation= 'relu' ))
model.add(Dense(10, activation= 'softmax' ))
# Compile model
model.compile(loss= 'categorical_crossentropy' , optimizer= 'adam' , metrics=[ 'accuracy' ])
model.fit(X_train, y_train,
epochs=20,
batch_size= 160)
score = model.evaluate(X_test, y_test, batch_size=128)
model.summary()
|
the-stack_106_14532
|
"""
Task that computes radial distribution of density.
"""
from typing import Tuple
import numpy as np
from amuse.lab import units, ScalarQuantity
from omtool.core.datamodel import AbstractTask, Snapshot, profiler
from omtool.core.utils import math, particle_centers
class DensityProfileTask(AbstractTask):
"""
Task that computes radial distribution of density.
"""
def __init__(
self,
center_type: str = "mass",
resolution: int = 1000,
r_unit: ScalarQuantity = 1 | units.kpc,
dens_unit: ScalarQuantity = 1 | units.MSun / units.kpc**3,
) -> None:
super().__init__()
self.center_func = particle_centers.get(center_type)
self.resolution = resolution
self.r_unit = r_unit
self.dens_unit = dens_unit
@profiler("Density profile task")
def run(self, snapshot: Snapshot) -> Tuple[np.ndarray, np.ndarray]:
particles = snapshot.particles
center = self.center_func(particles)
radii = math.get_lengths(particles.position - center)
masses = particles.mass
radii, masses = math.sort_with(radii, masses)
number_of_chunks = (len(radii) // self.resolution) * self.resolution
radii = radii[0 : number_of_chunks : self.resolution]
masses = (
masses[0:number_of_chunks]
.reshape(shape=(-1, self.resolution))
.sum(axis=1)[1:]
)
volume = 4 / 3 * np.pi * (radii[1:] ** 3 - radii[:-1] ** 3)
densities = masses / volume
radii = radii[1:]
return (radii / self.r_unit, densities / self.dens_unit)
|
the-stack_106_14536
|
import torch
import triton
import triton.language as tl
import copy
import pytest
import ast
import itertools
torch.manual_seed(0)
# convert from string to torch.dtype
# Necessary because doesn't print torch.dtype properly
cvt = {
'bool': torch.bool,
'int8': torch.int8,
'int16': torch.int16,
'int32': torch.int32,
'int64': torch.int64,
'bfloat16': torch.bfloat16,
'float16': torch.float16,
'float32': torch.float32,
'float64': torch.float64,
}
int_dtypes = ['int8', 'int16', 'int32', 'int64']
float_dtypes = ['float16', 'float32', 'float64']
dtypes = int_dtypes + float_dtypes
def patch_kernel(template, to_replace):
kernel = copy.deepcopy(template)
for key, value in to_replace.items():
kernel.src = kernel.src.replace(key, value)
return kernel
@pytest.mark.parametrize("dtype_x", [
(dtype_x) for dtype_x in dtypes
])
def test_empty_kernel(dtype_x, device='cuda'):
SIZE = 128
@triton.jit
def kernel(X, **meta):
pass
x = triton.testing.random(SIZE, dtype=cvt[dtype_x], device=device)
kernel[(1, )](x, SIZE=SIZE, num_warps=4)
def _test_load_and_store_op(dtype_x, device='cuda'):
SIZE = 1
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, **meta):
x = tl.load(X)
tl.store(Z, x)
# inputs
x = triton.testing.random(SIZE, dtype=cvt[dtype_x], device=device)
# reference result
z_ref = x.clone()
# triton result
z_tri = torch.empty_like(x)
# triton.testing.assert_almost_equal(z_ref, z_tri)
# run load and store kernel
kernel[(1, )](z_tri, x, SIZE=SIZE, num_warps=4)
triton.testing.assert_almost_equal(z_ref, z_tri)
# generic test functions
def _test_unary(dtype_x, expr, torch_expr=None, device='cuda'):
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, **meta):
off = tl.arange(0, meta['SIZE'])
x = tl.load(X + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = triton.testing.random(SIZE, dtype=cvt[dtype_x], device=device)
if 'log' in expr: x = torch.abs(x) + 0.01
# reference result
z_ref = eval(expr if torch_expr is None else torch_expr)
# triton result
z_tri = torch.empty_like(z_ref)
kernel[(1, )](z_tri, x, SIZE=SIZE, num_warps=4)
# compare
triton.testing.assert_almost_equal(z_ref, z_tri)
def _test_binary(dtype_x, dtype_y, expr, mode_x='real', mode_y='real', device='cuda'):
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, Y, **meta):
off = tl.arange(0, meta['SIZE'])
x = tl.load(X + off)
y = tl.load(Y + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = triton.testing.random(SIZE, dtype=cvt[dtype_x], device=device)
y = triton.testing.random(SIZE, dtype=cvt[dtype_y], device=device)
if mode_x == 'nan': x[:] = float('nan')
if mode_y == 'nan': y[:] = float('nan')
# reference result
z_ref = eval(expr)
# triton result
z_tri = torch.empty(SIZE, dtype=z_ref.dtype, device=device)
kernel[(1, )](z_tri, x, y, SIZE=SIZE, num_warps=4)
# compare
triton.testing.assert_almost_equal(z_ref, z_tri, err_msg=expr)
# ---------------
# test binary ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['+', '-', '*', '/', '%'] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_bin_op(dtype_x, dtype_y, expr, device='cuda'):
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test bitwise ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['&', '|', '^'] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_bitwise_op(dtype_x, dtype_y, expr, device='cuda'):
if 'float' in dtype_x + dtype_y:
with pytest.raises(RuntimeError):
_test_binary(dtype_x, dtype_y, expr, device=device)
else:
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test compare ops
# ---------------
ops = ['==', '!=', '>', '<', '>=', '<=']
@pytest.mark.parametrize("dtype_x, dtype_y, expr, mode_x, mode_y", \
# real
[
(dtype_x, dtype_y, f' x {op} y', 'real', 'real') \
for op in ops \
for dtype_x in dtypes \
for dtype_y in dtypes
] + \
# NaNs
[('float32', 'float32', f' x {op} y', mode_x, mode_y) \
for op in ops
for mode_x, mode_y in [('nan' , 'real'),
('real', 'nan'),
('nan' , 'nan')]
])
def test_compare_op(dtype_x, dtype_y, expr, mode_x, mode_y, device='cuda'):
_test_binary(dtype_x, dtype_y, expr, mode_x=mode_x, mode_y=mode_y, device=device)
# ---------------
# test load and store op
# ---------------
@pytest.mark.parametrize("dtype_x", [
(dtype_x) for dtype_x in float_dtypes
] + [\
(dtype_x) for dtype_x in int_dtypes
])
def test_load_and_store_op(dtype_x, device='cuda'):
_test_load_and_store_op(dtype_x, device=device)
print("after test")
# ---------------
# test unary ops
# ---------------
@pytest.mark.parametrize("dtype_x, expr", [
(dtype_x, f' -x') for dtype_x in float_dtypes
] + [\
(dtype_x, f' ~x') for dtype_x in int_dtypes
])
def test_unary_op(dtype_x, expr, device='cuda'):
_test_unary(dtype_x, expr, device=device)
# ----------------
# test math ops
# ----------------
# @pytest.mark.paramterize("expr", [
# 'exp', 'log', 'cos', 'sin'
# ])
@pytest.mark.parametrize("expr", [
'exp', 'log', 'cos', 'sin'
])
def test_math_op(expr, device='cuda'):
_test_unary('float32', f'tl.{expr}(x)', f'torch.{expr}(x) ', device=device)
# ----------------
# test indexing
# ----------------
def make_ptr_str(name, shape):
rank = len(shape)
offsets = []
stride = 1
for i in reversed(range(rank)):
idx = ', '.join([':' if ii == i else 'None' for ii in range(rank)])
offsets += [f'tl.arange(0, {shape[i]})[{idx}]*{stride}']
stride *= shape[i]
return f"{name} + {' + '.join(offsets)}"
@pytest.mark.parametrize("expr", [f'x[{s}]' for s in
['None, :', ':, None',\
'None, :, :', ':, :, None']\
])
def test_index1d(expr, device='cuda'):
dtype = torch.int32
rank_x = expr.count(':')
rank_y = expr.count(',') + 1
shape_x = [32 for _ in range(rank_x)]
shape_z = [32 for _ in range(rank_y)]
# Triton kernel
@triton.jit
def kernel(Z, X, **meta):
SIZE = meta['SIZE']
m = tl.arange(0, SIZE)
n = tl.arange(0, SIZE)
x = tl.load(X_PTR_EXPR)
z = GENERATE_TEST_HERE
tl.store(Z_PTR_EXPR, z)
to_replace = {
'X_PTR_EXPR': make_ptr_str('X', shape_x),
'Z_PTR_EXPR': make_ptr_str('Z', shape_z),
'GENERATE_TEST_HERE': expr,
}
kernel = patch_kernel(kernel, to_replace)
# torch result
x = triton.testing.random(shape_x, dtype=dtype, device=device)
y = torch.zeros(shape_z, dtype=dtype, device=device)
z_ref = eval(expr) + y
# triton result
z_tri = torch.empty_like(z_ref)
kernel[(1, )](z_tri, x, num_warps=1, SIZE=shape_x[0])
# compare
triton.testing.assert_almost_equal(z_ref, z_tri)
# ---------------
# test tuples
# ---------------
@triton.jit
def fn(a, b):
return a + b, \
a - b, \
a * b
def test_tuples():
device = 'cuda'
@triton.jit
def with_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = fn(x, y)
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
@triton.jit
def without_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = x + y, x - y, x * y
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
x = torch.tensor([1.3], device=device, dtype=torch.float32)
y = torch.tensor([1.9], device=device, dtype=torch.float32)
a_tri = torch.tensor([0], device=device, dtype=torch.float32)
b_tri = torch.tensor([0], device=device, dtype=torch.float32)
c_tri = torch.tensor([0], device=device, dtype=torch.float32)
for kernel in [with_fn, without_fn]:
kernel[(1, )](x, y, a_tri, b_tri, c_tri, num_warps=1)
a_ref, b_ref, c_ref = x + y, x - y, x * y
assert a_tri == a_ref
assert b_tri == b_ref
assert c_tri == c_ref
# ---------------
# test atomics
# ---------------
@pytest.mark.parametrize("op, dtype_x, mode", itertools.chain.from_iterable([
[('add', 'int32', mode), ('add', 'float16', mode), ('add', 'float32', mode), \
('max', 'int32', mode), ('max', 'float32', mode),\
('min', 'int32', mode), ('min', 'float32', mode),\
]
for mode in ['all_neg', 'all_pos', 'min_neg', 'max_pos']]))
def test_atomic_rmw(op, dtype_x, mode, device='cuda'):
dtype_x = cvt[dtype_x]
n_programs = 5
# triton kernel
@triton.jit
def kernel(X, Z, **meta):
pid = tl.program_id(0)
x = tl.load(X + pid)
old = GENERATE_TEST_HERE
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.atomic_{op}(Z, x)'})
torch_op = {'add': torch.sum, 'max': torch.max, 'min': torch.min}[op]
max_neutral = float('-inf') if dtype_x.is_floating_point else torch.iinfo(dtype_x).min
min_neutral = float('inf') if dtype_x.is_floating_point else torch.iinfo(dtype_x).max
neutral = {'add': 0, 'max': max_neutral, 'min': min_neutral}[op]
# triton result
x_tri = triton.testing.random((n_programs, ), dtype=dtype_x, device=device)
if mode == 'all_neg':
x_tri = -torch.abs(x_tri)
if mode == 'all_pos':
x_tri = torch.abs(x_tri)
if mode == 'min_neg':
idx = torch.randint(n_programs, size=(1, )).item()
x_tri[idx] = -torch.max(torch.abs(x_tri)) - 1
if mode == 'max_pos':
idx = torch.randint(n_programs, size=(1, )).item()
x_tri[idx] = torch.max(torch.abs(x_tri)) + 1
z_tri = torch.empty([], dtype=dtype_x, device=device)
z_tri.fill_(neutral)
kernel[(n_programs, )](x_tri, z_tri)
# torch result
z_ref = torch_op(x_tri).to(dtype_x)
# compare
exact = op not in ['add']
if exact:
assert z_ref.item() == z_tri.item()
else:
triton.testing.assert_almost_equal(z_ref, z_tri)
# ---------------
# test cast
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_z, bitcast", [
(dtype_x, dtype_z, False) \
for dtype_x in dtypes\
for dtype_z in dtypes
] + [
('float32', 'bfloat16', False),
('bfloat16', 'float32', False),
('float32', 'int32', True)
])
def test_cast(dtype_x, dtype_z, bitcast, device='cuda'):
x = torch.tensor([43.5], dtype=cvt[dtype_x], device=device)
# triton kernel
@triton.jit
def kernel(X, Z, **meta):
x = tl.load(X)
z = x.to(Z.dtype.element_ty, bitcast=meta['BITCAST'])
tl.store(Z, z)
# triton result
z_tri = torch.empty((1, ), dtype=cvt[dtype_z], device=device)
kernel[(1, )](x, z_tri, BITCAST=bitcast)
# torch result
if bitcast:
import numpy as np
z_ref = x.detach().cpu().numpy().view(getattr(np, dtype_z))
z_ref = torch.from_numpy(z_ref).to(device)
else:
z_ref = x.to(z_tri.dtype)
assert z_tri == z_ref
# ---------------
# test reduce
# ---------------
@pytest.mark.parametrize("dtype, shape",
[(dtype, shape) \
for dtype in dtypes\
for shape in [128, 512]])
def test_reduce1d(dtype, shape, device='cuda'):
dtype = cvt[dtype]
# triton kernel
@triton.jit
def kernel(X, Z, **meta):
x = tl.load(X + tl.arange(0, meta['BLOCK']))
tl.store(Z, tl.sum(x, axis=0))
x = triton.testing.random((shape,), dtype=dtype, device=device)
# triton result
z_tri = triton.testing.random((1,), dtype=dtype, device=device)
kernel[(1,)](x, z_tri, BLOCK=shape)
# torch result
z_ref = torch.sum(x).to(dtype)
# compare
triton.testing.assert_almost_equal(z_tri, z_ref)
@pytest.mark.parametrize("dtype, shape, axis",
[(dtype, shape, 1) \
for dtype in ['float32']\
for shape in [(1, 1024)]])
def test_reduce2d(dtype, shape, axis, device='cuda'):
dtype = cvt[dtype]
# triton kernel
@triton.jit
def kernel(X, Z, **meta):
range_m = tl.arange(0, meta['BLOCK_M'])
range_n = tl.arange(0, meta['BLOCK_N'])
x = tl.load(X + range_m[:, None]*meta['BLOCK_N'] + range_n[None, :])
z = tl.sum(x, axis=meta['AXIS'])
tl.store(Z + range_m, z)
# input
x = triton.testing.random(shape, dtype=dtype, device=device)
# triton result
z_tri = torch.empty((shape[0],), dtype=dtype, device=device)
kernel[(1,)](x, z_tri, BLOCK_M=shape[0], BLOCK_N=shape[1], AXIS=axis)
# torch result
z_ref = torch.sum(x, axis=axis).to(dtype)
# compare
triton.testing.assert_almost_equal(z_tri, z_ref)
# ---------------
# test permute
# ---------------
# ---------------
# test permute
# ---------------
@pytest.mark.parametrize("dtype, shape, perm",
[(dtype, shape, perm) \
for dtype in ['float32']\
for shape in [(128, 128)]\
for perm in [(1, 0)]])
def test_permute(dtype, shape, perm, device='cuda'):
dtype = cvt[dtype]
# triton kernel
@triton.jit
def kernel(X, stride_xm, stride_xn,
Z, stride_zm, stride_zn, **meta):
BLOCK_M = meta['BLOCK_M']
BLOCK_N = meta['BLOCK_N']
off_m = tl.arange(0, BLOCK_M)
off_n = tl.arange(0, BLOCK_N)
Xs = X + off_m[:, None] * stride_xm + off_n[None, :] * stride_xn
Zs = Z + off_m[:, None] * stride_zm + off_n[None, :] * stride_zn
tl.store(Zs, tl.load(Xs))
# input
x = triton.testing.random(shape, dtype=dtype, device=device)
# triton result
z_tri = torch.empty_like(x)
pgm = kernel[(1, 1)](x, x.stride(0), x.stride(1),
z_tri, z_tri.stride(1), z_tri.stride(0),
BLOCK_M=shape[0], BLOCK_N=shape[1])
# torch result
z_ref = x.permute(*perm).contiguous()
# compare
triton.testing.assert_almost_equal(z_tri, z_ref)
# parse ptx to make sure ld/st are vectorized
ptx = pgm.asm['ptx']
assert 'ld.global.v4' in ptx
assert 'st.global.v4' in ptx
# ---------------
# test dot
# ---------------
@pytest.mark.parametrize("epilogue", ['none', 'add-matrix', 'add-rows', 'add-cols'])
def test_dot(epilogue, device='cuda'):
torch.manual_seed(0)
# triton kernel
@triton.jit
def kernel(X, stride_xm, stride_xk,
Y, stride_yk, stride_yn,
Z, stride_zm, stride_zn, **meta):
BLOCK_M = meta['BLOCK_M']
BLOCK_K = meta['BLOCK_K']
BLOCK_N = meta['BLOCK_N']
off_m = tl.arange(0, BLOCK_M)
off_n = tl.arange(0, BLOCK_N)
off_k = tl.arange(0, BLOCK_K)
Xs = X + off_m[:, None] * stride_xm + off_k[None, :] * stride_xk
Ys = Y + off_k[:, None] * stride_yk + off_n[None, :] * stride_yn
Zs = Z + off_m[:, None] * stride_zm + off_n[None, :] * stride_zn
z = tl.dot(tl.load(Xs), tl.load(Ys))
if meta['ADD_MATRIX']:
z += tl.load(Zs)
if meta['ADD_ROWS']:
ZRs = Z + off_m * stride_zm
z += tl.load(ZRs)[:, None]
if meta['ADD_COLS']:
ZCs = Z + off_n * stride_zn
z += tl.load(ZCs)[None, :]
tl.store(Zs, z)
# input
M, N, K = 64, 64, 32
x = triton.testing.random((M, K), dtype=torch.float16, device=device)
y = triton.testing.random((K, N), dtype=torch.float16, device=device)
# triton result
z = triton.testing.random((M, N), dtype=torch.float16, device=device)
z_tri = z.clone()
pgm = kernel[(1, 1)](x, x.stride(0), x.stride(1),
y, y.stride(0), y.stride(1),
z_tri, z_tri.stride(0), z_tri.stride(1),
BLOCK_M=M, BLOCK_K=K, BLOCK_N=N,
ADD_MATRIX = epilogue=='add-matrix',
ADD_ROWS = epilogue=='add-rows',
ADD_COLS = epilogue=='add-cols')
# torch result
z_ref = torch.matmul(x.float(), y.float())
if epilogue == 'add-matrix':
z_ref += z
if epilogue == 'add-rows':
z_ref += z[:,0][:, None]
if epilogue == 'add-cols':
z_ref += z[0,:][None, :]
z_ref = z_ref.to(torch.float16)
# compare
ptx = pgm.asm['ptx']
# print(ptx)
triton.testing.assert_almost_equal(z_tri, z_ref)
# make sure ld/st are vectorized
assert 'ld.global.v4' in ptx
assert 'st.global.v4' in ptx
# ---------------
# test load
# ---------------
# ---------------
# test store
# ---------------
# ---------------
# test if
# ---------------
# ---------------
# test for
# ---------------
# ---------------
# test while
# ---------------
# ---------------
# test noop
#----------------
def test_noop(device='cuda'):
@triton.jit
def kernel(**meta):
pass
x = triton.testing.random((1,), dtype=torch.int32, device=device)
kernel[(1, )](x)
|
the-stack_106_14539
|
# Log chats.
# (c) @lapnlbotSupport
# By @its_Lapnl, @MrConfused
from asyncio import sleep
from userbot import CMD_HELP
from telethon.tl.types import MessageEntityMentionName
from telethon.utils import get_input_location
from userbot.utils import admin_cmd
from os import remove
from telethon import events
import asyncio
from datetime import datetime
import time
from userbot.utils import register, errors_handler
import logging
import os
import sys
from telethon.tl import functions, types
from telethon.tl.types import Channel, Chat, User
from userbot.uniborgConfig import Config
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.WARN)
NO_PM_LOG_USERS = []
BOTLOG = True
BOTLOG_CHATID = Config.PRIVATE_GROUP_BOT_API_ID
@borg.on(admin_cmd(outgoing=True, pattern=r"save(?: |$)([\s\S]*)"))
async def log(log_text):
""" For .log command, forwards a message or the command argument to the bot logs group """
if BOTLOG:
if log_text.reply_to_msg_id:
reply_msg = await log_text.get_reply_message()
await reply_msg.forward_to(BOTLOG_CHATID)
elif log_text.pattern_match.group(1):
user = f"#LOG / Chat ID: {log_text.chat_id}\n\n"
textx = user + log_text.pattern_match.group(1)
await bot.send_message(BOTLOG_CHATID, textx)
else:
await log_text.edit("`What am I supposed to log?`")
return
await log_text.edit("`Message saved 😁`")
else:
await log_text.edit("`This feature requires Logging to be enabled!`")
await sleep(2)
await log_text.delete()
@borg.on(events.NewMessage(incoming=True, func=lambda e: e.is_private))
async def monito_p_m_s(event):
sender = await event.get_sender()
if Config.NC_LOG_P_M_S and not sender.bot:
chat = await event.get_chat()
if chat.id not in NO_PM_LOG_USERS and chat.id != borg.uid:
try:
if Config.PM_LOGGR_BOT_API_ID:
if event.message:
e = await borg.get_entity(int(Config.PM_LOGGR_BOT_API_ID))
fwd_message = await borg.forward_messages(
e,
event.message,
silent=True
)
else:
return
else:
return
except Exception as e:
# logger.warn(str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(e)
CMD_HELP.update({"log_pms": "`.save` :\
\nUSAGE: saves taged message in private group .\ "
})
|
the-stack_106_14541
|
from django.conf import settings
import olympia.core.logger
from olympia.amo.celery import task
from olympia.amo.utils import send_mail
from olympia.zadmin.models import EmailPreviewTopic
log = olympia.core.logger.getLogger('z.task')
@task(rate_limit='3/s')
def admin_email(all_recipients, subject, body, preview_only=False,
from_email=settings.DEFAULT_FROM_EMAIL,
preview_topic='admin_email', **kw):
log.info('[%s@%s] admin_email about %r'
% (len(all_recipients), admin_email.rate_limit, subject))
if preview_only:
send = EmailPreviewTopic(topic=preview_topic).send_mail
else:
send = send_mail
for recipient in all_recipients:
send(subject, body, recipient_list=[recipient], from_email=from_email)
@task
def celery_error(**kw):
"""
This task raises an exception from celery to test error logging and
Sentry hookup.
"""
log.info('about to raise an exception from celery')
raise RuntimeError('this is an exception from celery')
|
the-stack_106_14542
|
from posthog.models import Person, PersonDistinctId, Event, Element, Action, ActionStep, Funnel, FunnelStep, Team
from dateutil.relativedelta import relativedelta
from django.utils.timezone import now
from django.http import HttpResponseNotFound, JsonResponse
from posthog.urls import render_template
from posthog.utils import render_template
from typing import List
from pathlib import Path
import uuid
import random
import json
def _create_anonymous_users(team: Team, base_url: str) -> None:
with open(Path('posthog/demo_data.json').resolve(), 'r') as demo_data_file:
demo_data = json.load(demo_data_file)
Person.objects.bulk_create([
Person(team=team, properties={'is_demo': True}) for _ in range(0, 100)
])
distinct_ids: List[PersonDistinctId] = []
events: List[Event] = []
days_ago = 7
demo_data_index = 0
for index, person in enumerate(Person.objects.filter(team=team)):
if index > 0 and index % 14 == 0:
days_ago -= 1
distinct_id = str(uuid.uuid4())
distinct_ids.append(PersonDistinctId(team=team, person=person, distinct_id=distinct_id))
date = now() - relativedelta(days=days_ago)
browser = random.choice(['Chrome', 'Safari', 'Firefox'])
events.append(Event(team=team, event='$pageview', distinct_id=distinct_id, properties={'$current_url': base_url, '$browser': browser, '$lib': 'web'}, timestamp=date))
if index % 3 == 0:
person.properties.update(demo_data[demo_data_index])
person.save()
demo_data_index += 1
Event.objects.create(
team=team,
distinct_id=distinct_id,
event='$autocapture',
properties={'$current_url': base_url},
timestamp=date + relativedelta(seconds=14),
elements=[
Element(tag_name='a', href='/demo/1', attr_class=['btn', 'btn-success'], attr_id='sign-up', text='Sign up')
])
events.append(Event(event='$pageview', team=team, distinct_id=distinct_id, properties={'$current_url': '%s1/' % base_url, '$browser': browser, '$lib': 'web'}, timestamp=date + relativedelta(seconds=15)))
if index % 4 == 0:
Event.objects.create(
team=team,
event='$autocapture',
distinct_id=distinct_id,
properties={'$current_url': '%s1/' % base_url},
timestamp=date + relativedelta(seconds=29),
elements=[
Element(tag_name='button', attr_class=['btn', 'btn-success'], text='Sign up!')
])
events.append(Event(event='$pageview', team=team, distinct_id=distinct_id, properties={'$current_url': '%s2/' % base_url, '$browser': browser, '$lib': 'web'}, timestamp=date + relativedelta(seconds=30)))
if index % 5 == 0:
Event.objects.create(
team=team,
event='$autocapture',
distinct_id=distinct_id,
properties={'$current_url': '%s2/' % base_url},
timestamp=date + relativedelta(seconds=59),
elements=[
Element(tag_name='button', attr_class=['btn', 'btn-success'], text='Pay $10')
])
events.append(Event(event='$pageview', team=team, distinct_id=distinct_id, properties={'$current_url': '%s3/' % base_url, '$browser': browser, '$lib': 'web'}, timestamp=date + relativedelta(seconds=60)))
PersonDistinctId.objects.bulk_create(distinct_ids)
Event.objects.bulk_create(events)
def _create_funnel(team: Team, base_url: str) -> None:
homepage = Action.objects.create(team=team, name='HogFlix homepage view')
ActionStep.objects.create(action=homepage, event='$pageview', url=base_url, url_matching='exact')
user_signed_up = Action.objects.create(team=team, name='HogFlix signed up')
ActionStep.objects.create(action=homepage, event='$autocapture', url='%s1/' % base_url, url_matching='exact', selector='button')
user_paid = Action.objects.create(team=team, name='HogFlix paid')
ActionStep.objects.create(action=homepage, event='$autocapture', url='%s2/' % base_url, url_matching='exact', selector='button')
funnel = Funnel.objects.create(team=team, name='HogFlix signup -> watching movie')
FunnelStep.objects.create(funnel=funnel, action=homepage, order=0)
FunnelStep.objects.create(funnel=funnel, action=user_signed_up, order=1)
FunnelStep.objects.create(funnel=funnel, action=user_paid, order=2)
def demo(request):
team = request.user.team_set.get()
if Event.objects.filter(team=team).count() == 0:
_create_funnel(team=team, base_url=request.build_absolute_uri('/demo/'))
_create_anonymous_users(team=team, base_url=request.build_absolute_uri('/demo/'))
return render_template('demo.html', request=request, context={'api_token': team.api_token})
def delete_demo_data(request):
team = request.user.team_set.get()
people = PersonDistinctId.objects.filter(team=team, person__properties__is_demo=True)
Event.objects.filter(team=team, distinct_id__in=people.values('distinct_id')).delete()
Person.objects.filter(team=team, properties__is_demo=True).delete()
Funnel.objects.filter(team=team, name__contains="HogFlix").delete()
Action.objects.filter(team=team, name__contains="HogFlix").delete()
return JsonResponse({'status': 'ok'})
|
the-stack_106_14545
|
# -*- coding: utf-8 -*-
import copy
import datetime
import re
from enum import Enum
import lxml.html
import pandas as pd
import requests
import six
from lxml import etree
from pandas.compat import StringIO
from requests import Request
from six.moves.urllib.parse import urlencode
class MediaType(Enum):
DEFAULT = 'application/json'
JOIN_QUANT = 'application/vnd.joinquant+json'
class Client(object):
KEY_REGEX = r'key=([^&]*)'
def __init__(self, logger=None, **kwargs):
if logger is not None:
self._logger = logger
else:
import logging
self._logger = logging.getLogger(__name__)
self._host = kwargs.pop('host', 'localhost')
self._port = kwargs.pop('port', 8888)
self._key = kwargs.pop('key', '')
self._client = kwargs.pop('client', '')
self._timeout = kwargs.pop('timeout', (5.0, 10.0))
@property
def host(self):
return self._host
@host.setter
def host(self, value):
self._host = value
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
@property
def key(self):
return self._key
@key.setter
def key(self, value):
self._key = value
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
def get_statuses(self, timeout=None):
request = Request('GET', self.__create_url(None, 'statuses'))
self.__send_request(request, timeout)
def get_account(self, client=None, timeout=None):
request = Request('GET', self.__create_url(client, 'accounts'))
response = self.__send_request(request, timeout)
return response.json()
def get_positions(self, client=None, media_type=MediaType.DEFAULT, timeout=None):
request = Request('GET', self.__create_url(client, 'positions'))
request.headers['Accept'] = media_type.value
response = self.__send_request(request, timeout)
json = response.json()
if media_type == MediaType.DEFAULT:
sub_accounts = pd.DataFrame(json['subAccounts']).T
positions = pd.DataFrame(json['dataTable']['rows'], columns=json['dataTable']['columns'])
portfolio = {'sub_accounts': sub_accounts, 'positions': positions}
return portfolio
return json
def get_orders(self, client=None, status="", timeout=None):
request = Request('GET', self.__create_url(client, 'orders', status=status))
response = self.__send_request(request, timeout)
json = response.json()
df = pd.DataFrame(json['dataTable']['rows'], columns=json['dataTable']['columns'])
return df
def buy(self, client=None, timeout=None, **kwargs):
kwargs['action'] = 'BUY'
return self.__execute(client, timeout, **kwargs)
def sell(self, client=None, timeout=None, **kwargs):
kwargs['action'] = 'SELL'
return self.__execute(client, timeout, **kwargs)
def execute(self, client=None, timeout=None, **kwargs):
return self.__execute(client, timeout, **kwargs)
def cancel(self, client=None, order_id=None, timeout=None):
request = Request('DELETE', self.__create_order_url(client, order_id))
self.__send_request(request, timeout)
def cancel_all(self, client=None, timeout=None):
request = Request('DELETE', self.__create_order_url(client))
self.__send_request(request, timeout)
def query(self, client=None, navigation=None, timeout=None):
request = Request('GET', self.__create_url(client, '', navigation=navigation))
response = self.__send_request(request, timeout)
json = response.json()
df = pd.DataFrame(json['dataTable']['rows'], columns=json['dataTable']['columns'])
return df
def query_new_stocks(self):
return self.__query_new_stocks()
def purchase_new_stocks(self, client=None, timeout=None):
today = datetime.datetime.strftime(datetime.datetime.today(), '%Y-%m-%d')
df = self.query_new_stocks()
df = df[(df.ipo_date == today)]
self._logger.info('今日可申购新股有[{}]只'.format(len(df)))
for index, row in df.iterrows():
try:
order = {
'symbol': row['xcode'], 'type': 'LIMIT', 'price': row['price'], 'amountProportion': 'ALL'
}
self._logger.info('申购新股:{}'.format(order))
self.buy(client, timeout, **order)
except Exception as e:
self._logger.error('客户端[{}]申购新股[{}({})]失败\n{}'.format((client or self._client), row['name'], row['code'], e))
def create_adjustment(self, client=None, request_json=None, timeout=None):
request = Request('POST', self.__create_url(client, 'adjustments'), json=request_json)
request.headers['Content-Type'] = MediaType.JOIN_QUANT.value
response = self.__send_request(request, timeout)
json = response.json()
return json
def start_clients(self, timeout=None):
request = Request('PUT', self.__create_url(None, 'clients'))
self.__send_request(request, timeout)
def shutdown_clients(self, timeout=None):
request = Request('DELETE', self.__create_url(None, 'clients'))
self.__send_request(request, timeout)
def __execute(self, client=None, timeout=None, **kwargs):
if not kwargs.get('type'):
kwargs['type'] = 'LIMIT'
request = Request('POST', self.__create_order_url(client), json=kwargs)
response = self.__send_request(request)
return response.json()
def __query_new_stocks(self):
DATA_URL = 'http://vip.stock.finance.sina.com.cn/corp/view/vRPD_NewStockIssue.php?page=1&cngem=0&orderBy=NetDate&orderType=desc'
html = lxml.html.parse(DATA_URL)
res = html.xpath('//table[@id=\"NewStockTable\"]/tr')
if six.PY2:
sarr = [etree.tostring(node) for node in res]
else:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
sarr = ''.join(sarr)
sarr = sarr.replace('<font color="red">*</font>', '')
sarr = '<table>%s</table>' % sarr
df = pd.read_html(StringIO(sarr), skiprows=[0, 1])[0]
df = df.select(lambda x: x in [0, 1, 2, 3, 7], axis=1)
df.columns = ['code', 'xcode', 'name', 'ipo_date', 'price']
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
df['xcode'] = df['xcode'].map(lambda x: str(x).zfill(6))
return df
def __create_order_url(self, client=None, order_id=None, **params):
return self.__create_url(client, 'orders', order_id, **params)
def __create_url(self, client, resource, resource_id=None, **params):
all_params = copy.deepcopy(params)
all_params.update(client=(client or self._client))
all_params.update(key=self._key)
if resource_id is None:
path = '/{}'.format(resource)
else:
path = '/{}/{}'.format(resource, resource_id)
url = '{}{}?{}'.format(self.__create_base_url(), path, urlencode(all_params))
return url
def __create_base_url(self):
return 'http://{}:{}'.format(self._host, self._port)
def __send_request(self, request, timeout=None):
prepared_request = request.prepare()
self.__log_request(prepared_request)
with requests.sessions.Session() as session:
response = session.send(prepared_request, timeout=(timeout or self._timeout))
self.__log_response(response)
response.raise_for_status()
return response
def __log_request(self, prepared_request):
url = self.__eliminate_privacy(prepared_request.path_url)
if prepared_request.body is None:
self._logger.info('Request:\n{} {}'.format(prepared_request.method, url))
else:
self._logger.info('Request:\n{} {}\n{}'.format(prepared_request.method, url, prepared_request.body))
def __log_response(self, response):
message = u'Response:\n{} {}\n{}'.format(response.status_code, response.reason, response.text)
if response.status_code == 200:
self._logger.info(message)
else:
self._logger.error(message)
@classmethod
def __eliminate_privacy(cls, url):
match = re.search(cls.KEY_REGEX, url)
if match is None:
return url
key = match.group(1)
masked_key = '*' * len(key)
url = re.sub(cls.KEY_REGEX, "key={}".format(masked_key), url)
return url
|
the-stack_106_14547
|
import argparse
from pathlib import Path
import os
from PIL import Image
import math
import numpy as np
import random
import pandas as pd
import sys
def check_args(dataset_folder, output_folder, split, split_test, nb_augmented_images):
"""
Description: This function checks that the command line arguments are valid arguments.
Params:
- dataset_folder: folder containing the dataset
- output_folder: output folder
- split: Percentage of the data used as training data
- spilt_test: Percentage of the data used as test data
- nb_augmented_images: dictionary containing the number of augmented images to generate
Returns:
- No return value
"""
# Check if dataset_folder is a directory
path_datasetfolder = Path(dataset_folder)
if (not os.path.isdir(dataset_folder)):
print('>> The argument --dataset-folder has to be a directory.')
exit()
# Check if dataset_folder contains True and False directories
for (root, dirs, _) in os.walk(path_datasetfolder):
if root == path_datasetfolder.stem:
if ('True' not in dirs or 'False' not in dirs):
print('>> The argument --dataset_folder must contain True and False directories.')
exit()
# Check if the output_folder exists. If not, create it.
path_outputfolder = Path(output_folder)
if (not path_outputfolder.exists()):
path_outputfolder.mkdir(parents=True)
# Check if the output_folder contains [train|val][0|1] folders. If not, create them.
path_outputfolder_train_zero = path_outputfolder / 'train' / '0'
path_outputfolder_train_one = path_outputfolder / 'train' / '1'
path_outputfolder_val_zero = path_outputfolder / 'val' / '0'
path_outputfolder_val_one = path_outputfolder / 'val' / '1'
path_outputfolder_test_zero = path_outputfolder / 'test' / '0'
path_outputfolder_test_one = path_outputfolder / 'test' / '1'
if (not path_outputfolder_train_zero.exists()):
path_outputfolder_train_zero.mkdir(parents=True)
if (not path_outputfolder_train_one.exists()):
path_outputfolder_train_one.mkdir(parents=True)
if (not path_outputfolder_val_zero.exists()):
path_outputfolder_val_zero.mkdir(parents=True)
if (not path_outputfolder_val_one.exists()):
path_outputfolder_val_one.mkdir(parents=True)
if (not path_outputfolder_test_zero.exists()):
path_outputfolder_test_zero.mkdir(parents=True)
if (not path_outputfolder_test_one.exists()):
path_outputfolder_test_one.mkdir(parents=True)
# Check if the split value is in the right range
if (split < 0.0 or split > 1.0):
print('>> The argument --split has to be a float value between 0.0 (included) and 1.0 (included).')
exit()
# Check if the split value is in the right range
if (split_test < 0.0 or split_test > 1.0):
print('>> The argument --split-test has to be a float value between 0.0 (included) and 1.0 (included).')
exit()
if (split + split_test > 1.0):
print('>> The result of split + split-test has to be a float value smaller than 1.0.')
exit()
# Check if the number of augmented images is the right range
if (nb_augmented_images['nbaugmentedimages_training'] < 0 and nb_augmented_images['nbaugmentedimages_training'] > 60 ):
print('>> The argument --nbaugmentedimages_training has to be an int value larger than 0 but smaller than 60')
exit()
if (nb_augmented_images['nbaugmentedimages_validation'] < 0 and nb_augmented_images['nbaugmentedimages_validation'] > 60 ):
print('>> The argument --nbaugmentedimages_validation has to be an int value larger than 0 but smaller than 60')
exit()
if (nb_augmented_images['nbaugmentedimages_test'] < 0):
print('>> The argument --nbaugmentedimages_test has to be an int value larger than 0.')
exit()
def crop_resize_large_patch(folder_path, dwi_file_name, t2_file_name, mean_std_dict):
"""
Description: Aligns and stack images from the 3 sequences (T2, DWI, ADC).
Params:
- folder_path: path to the folder containing the 3 images
- t2_file_name: name of the T2 file to align with the DWI and ADC
- dwi_file_name: name of the DWI file to align with the T2 and ADC
- mean_std_dict: mean and standard deviation per sequence for this patient
Returns:
- Pillow image
"""
# Load the array
dwi_np_array = np.load(folder_path / dwi_file_name)
# Convert arrays to PIL image
dwi_img = Image.fromarray(dwi_np_array)
# Get position of the lesion
dwi_pos = [int(elem) for elem in dwi_file_name.stem.split('_')[3].split('-')[1:3]]
# Using "VoxelSpacing" from the CSV file to crop the same amount of tissue on each image
t2_voxel_spacing = np.array([float(elem) for elem in t2_file_name.stem.split('_')[4].split('-')[1:3]]) # (0.5,0.5,3) -> only working on 1 slice, so we can omit the third dimension
dwi_voxel_spacing = np.array([float(elem) for elem in dwi_file_name.stem.split('_')[4].split('-')[1:3]])
# Crop a large patch. The patch size from the biggest image is fixed. Others must be computed
patch_size = np.array([200,200])
# dwi resolution < t2 resolution. Hence, we need less dwi pixels to get the same amount of tissue as on the t2
dwi_patch_size = patch_size // (dwi_voxel_spacing / t2_voxel_spacing)
# Crop the image
dwi_cropped = dwi_img.crop((dwi_pos[0] - dwi_patch_size[0], dwi_pos[1] - dwi_patch_size[1], dwi_pos[0] + dwi_patch_size[0], dwi_pos[1] + dwi_patch_size[1]))
# resize image
dwi_cropped_resized = dwi_cropped.resize((400,400), Image.BICUBIC)
return dwi_cropped_resized
def augment(image, non_picked_list):
"""
Description: Performs the augmentation and the cropping.
Params:
- image: Pillow image
- non_picked_list: list containing the different augmentation possibilities as tuples (to avoid duplicates)
Returns:
- Pillow image
"""
# Randomly select the augmentation possibility
index = random.randint(0, len(non_picked_list)-1)
# Pick a degree, a flipping value and a shifting value
degree, prob_flipping, shifting = non_picked_list[index]
# Remove from the list of possibilites (in order to avoid duplication)
del(non_picked_list[index])
# Rotate the image
temp_image = image.rotate(degree, resample=Image.BICUBIC)
print(f'Rotation: {degree} degrees, shifting {shifting}')
# Crop the image
width, height = temp_image.size
x_middle = math.floor(width/2)
y_middle = math.floor(height/2)
temp_image = temp_image.crop((x_middle - 65 + shifting, y_middle - 65, x_middle + 65 + shifting, y_middle + 65))
# Random horizontal flipping
if prob_flipping > 0.5:
print(f'Flipping')
is_flipped = True
temp_image = temp_image.transpose(Image.FLIP_LEFT_RIGHT)
else:
print(f'No flipping')
is_flipped = False
# Resize image to 65x65
temp_image = temp_image.resize((65,65), Image.BICUBIC)
return temp_image
def augment_images(visit_to_fid_to_sequence_type_to_filename, nb_augmented_images, input_folder, output_folder, mean_std_dict):
"""
Description: Augments the different DWI images for a patient.
Params:
- visit_to_fid_to_sequence_type_to_filename: dictionary {'visit': {'fid': {'t2': [filenames], 'dwi': [filenames]}}}
- nb_augmented_images: number of augmented images
- input_folder: folder containing the T2, DWI and ADC images
- output_folder: output_folder
- mean_std_dict: mean and standard deviation per sequence for this patient
Returns:
- number of saved images (int)
"""
# Count the number of saved images
nb_saved = 0
for visit, fid_to_sequence_type_to_filename in visit_to_fid_to_sequence_type_to_filename.items():
for fid, sequences_to_filenames in fid_to_sequence_type_to_filename.items():
for dwi_file_name in sequences_to_filenames['dwi']:
t2_file_name = sequences_to_filenames['t2'][0]
# Align images and get DWI
cropped_image = crop_resize_large_patch(input_folder, dwi_file_name, t2_file_name, mean_std_dict)
# All possibilities
rotation_flipping_shifting_not_picked = [(i,j,k) for i in range(-20, 21) for j in [0,1] for k in [-1,0,1]]
# Data augmentation
for augmented_index in range(nb_augmented_images):
# Augment the image
augmented_image = augment(cropped_image, rotation_flipping_shifting_not_picked)
nb_saved += 1
print(nb_saved)
# Export the image
output_name = output_folder / f"dwi-{'_'.join(dwi_file_name.stem.split('_')[2:3])}_fid-{fid}_visit{visit}_augmented-{augmented_index}.png"
if os.path.exists(output_name):
print('DUPLICATE')
import IPython; IPython.embed(); exit()
augmented_image.save(output_name)
return nb_saved
def count_combination(visit_to_fid_to_sequence_type_to_filename, nb_augmented_images):
"""
Description: Counts the number of combinations for the T2, DWI and ADC images of a specific patient.
Params:
- visit_to_fid_to_sequence_type_to_filename: dictionary {'visit': {'fid': {'t2': [filenames], 'dwi': [filenames], 'adc': [filenames]}}}
- nb_augmented_images: number of augmented images
Returns:
- number of combinations for a specific patient (int)
"""
# Count the number of saved images
nb_saved = 0
for visit, fid_to_sequence_type_to_filename in visit_to_fid_to_sequence_type_to_filename.items():
for fid, sequences_to_filenames in fid_to_sequence_type_to_filename.items():
for dwi_file_name in sequences_to_filenames['dwi']:
for augmented_index in range(nb_augmented_images):
nb_saved += 1
print(nb_saved)
return nb_saved
def augment_a_class(belonging_class, class_nparrays_dict, split, split_test, nb_augmented_images, input_folder, path_outputfolder, dict_balancing):
"""
Description: Performs augmentation and folder organization for a specific class.
Params:
- belonging_class: label
- class_nparrays_dict:
- split: percentage of the dataset used as training set
- split_test: percentage of the dataset used as test set
- nb_augmented_images: dictionary containing the number of augmented images to generate
- input_folder: dataset input folder (for this class)
- path_outputfolder: path to the output folder
- mean_std_dict: dictionary containing the mean and the std dev for the dataset
Returns:
- number of images saved (int)
Note:
- dict_balancing stores by how much each class has to be augmented in order to have a balanced dataset.
The full process is performed twice. The 1st time to compute dict_balancing. The 2nd time to actually perform the augmentation.
"""
print(nb_augmented_images)
# Total number of expected combinations to be output
nb_combinations = 0
# Number of stacked images saved
nb_saved = 0
# Dictionary that will contain the number of training, val and test samples saved
dict_for_balancing = {'train': 0, 'val': 0, 'test': 0}
if dict_for_balancing != None:
temp_counter = {'train': 0, 'val': 0, 'test': 0}
# Create Path objects for each output directory
path_outputfolder_train = path_outputfolder / 'train' / belonging_class
path_outputfolder_val = path_outputfolder / 'val' / belonging_class
path_outputfolder_test = path_outputfolder / 'test' / belonging_class
# Get the number of patients belonging to the class
number_of_patients_class = len(class_nparrays_dict.keys())
# Get the number of patients that will be used for the training set
number_of_training_patients_class = math.floor(split * number_of_patients_class)
# Get the number of patients that will be used for the test set
number_of_test_patients_class = math.floor(split_test * number_of_patients_class)
# Get the number of patients that will be used for the validation set
number_of_val_patients_class = number_of_patients_class - number_of_training_patients_class - number_of_test_patients_class
# Iterate over the patients, convert NumPy arrays, export them
for index, (patientID, file_names) in enumerate(class_nparrays_dict.items()):
global_array_dwi = np.array([])
# {'visit': {'fid': {'t2': [filenames], 'dwi': [filenames]}}}
visit_to_fid_to_sequence_type_to_filename = {}
# Iterate over patient's files to classify files by sequence type
for file_name in file_names:
# Load numpy array
image_nparray = np.load(input_folder / file_name)
# Get finding id and visit id
fid = file_name.stem.split('_')[1].split('-')[1]
visit = file_name.stem.split('_')[5].split('-')[1]
# Create the structure of the dictionary
visit_to_fid_to_sequence_type_to_filename.setdefault(visit, {})
visit_to_fid_to_sequence_type_to_filename[visit].setdefault(fid, {})
visit_to_fid_to_sequence_type_to_filename[visit][fid].setdefault('dwi', [])
visit_to_fid_to_sequence_type_to_filename[visit][fid].setdefault('t2', [])
# Classify the file
if 't2' in file_name.stem:
visit_to_fid_to_sequence_type_to_filename[visit][fid]['t2'].append(file_name)
elif 'ADC' in file_name.stem:
pass
else:
global_array_dwi = np.concatenate((global_array_dwi, image_nparray), axis=None)
visit_to_fid_to_sequence_type_to_filename[visit][fid]['dwi'].append(file_name)
# Compute mean and standard deviation for each sequence
mean_std_dict = {'mean_dwi': np.mean(global_array_dwi),
'std_dwi': np.std(global_array_dwi)}
# Train images
if (index < number_of_training_patients_class):
if dict_balancing != None:
temp_counter['train'] += count_combination(visit_to_fid_to_sequence_type_to_filename, nb_augmented_images['nbaugmentedimages_training'])
else:
nb_saved_train = augment_images(visit_to_fid_to_sequence_type_to_filename, nb_augmented_images['nbaugmentedimages_training'], input_folder, path_outputfolder_train, mean_std_dict)
dict_for_balancing['train'] += nb_saved_train
nb_saved += nb_saved_train
# Val or test images
else:
# Val images
if (index - number_of_training_patients_class < number_of_val_patients_class):
if dict_balancing != None:
temp_counter['val'] += count_combination(visit_to_fid_to_sequence_type_to_filename, nb_augmented_images['nbaugmentedimages_validation'])
else:
nb_saved_val = augment_images(visit_to_fid_to_sequence_type_to_filename, nb_augmented_images['nbaugmentedimages_validation'], input_folder, path_outputfolder_val, mean_std_dict)
dict_for_balancing['val'] += nb_saved_val
nb_saved += nb_saved_val
# Test images
else:
if dict_balancing != None:
temp_counter['test'] += count_combination(visit_to_fid_to_sequence_type_to_filename, nb_augmented_images['nbaugmentedimages_test'])
else:
nb_saved_test = augment_images(visit_to_fid_to_sequence_type_to_filename, nb_augmented_images['nbaugmentedimages_test'], input_folder, path_outputfolder_test, mean_std_dict)
dict_for_balancing['test'] += nb_saved_test
nb_saved += nb_saved_test
if dict_balancing != None:
nb_augmented_images['nbaugmentedimages_training'] = math.floor(nb_augmented_images['nbaugmentedimages_training'] * (dict_balancing['train']/temp_counter['train']))
if temp_counter['val'] != 0:
nb_augmented_images['nbaugmentedimages_validation'] = math.floor(nb_augmented_images['nbaugmentedimages_validation'] * (dict_balancing['val']/temp_counter['val']))
else:
nb_augmented_images['nbaugmentedimages_validation'] = 0
if temp_counter['test'] !=0:
nb_augmented_images['nbaugmentedimages_test'] = math.floor(nb_augmented_images['nbaugmentedimages_test'] * (dict_balancing['test']/temp_counter['test']))
else:
nb_augmented_images['nbaugmentedimages_test'] = 0
(nb_saved_new, nb_combinations_new, dict_for_balancing_new) = augment_a_class(belonging_class, class_nparrays_dict, split, split_test, nb_augmented_images, input_folder, path_outputfolder, None)
return (nb_saved_new, nb_combinations_new, dict_for_balancing_new)
return (nb_saved, nb_combinations, dict_for_balancing)
def create_images(dataset_folder, output_folder, split, split_test, nb_augmented_images):
"""
Description: Main function coordinating the augmentation process from end to end.
Params:
- dataset_folder: root folder of the dataset
- output_folder: root of the output folder
- split: percentage of the dataset used as training set
- split_test: percentage of the dataset used as test set
- nb_augmented_images: dictionary containing the number of augmented images to generate
Returns:
- no return value
"""
# Create Path objects from args
path_datasetfolder = Path(dataset_folder)
path_outputfolder = Path(output_folder)
# Create Path objects for the True and False directories, where the NumPy arrays are going to be loaded from
true_nparrays_path = path_datasetfolder / 'True'
false_nparrays_path = path_datasetfolder / 'False'
# Create lambda which flattens a list. https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists
flatten = lambda l: [item for sublist in l for item in sublist]
# [true|false]_nparrays: list of lists of strings containing file names in the [true|false] directories. List of lists => to be flattened
true_nparrays = [files for (_, _, files) in os.walk(true_nparrays_path)]
false_nparrays = [files for (_, _, files) in os.walk(false_nparrays_path)]
# Flatten boths lists to create a list of strings, remove hidden files, add filenames as Path objects (useful later)
true_nparrays = [Path(file) for file in flatten(true_nparrays) if not file.startswith('.')]
false_nparrays = [Path(file) for file in flatten(false_nparrays) if not file.startswith('.')]
# Regroup files per patient
true_nparrays_dict = {}
false_nparrays_dict = {}
# True
for file_name in true_nparrays:
# Get patientID from file_name
patientID = file_name.stem.split('_')[0]
# Add file_name to the corresponding patient's list of files
true_nparrays_dict.setdefault(patientID, []).append(file_name)
# False
for file_name in false_nparrays:
# Get patientID from file_name
patientID = file_name.stem.split('_')[0]
# Add file_name to the corresponding patient's list of files
false_nparrays_dict.setdefault(patientID, []).append(file_name)
# Augment all images belonging to class False
(nb_saved_false, nb_combinations_false, dict_for_balancing_false) = augment_a_class('0', false_nparrays_dict, split, split_test, nb_augmented_images, false_nparrays_path, path_outputfolder, None)
# Augment all images belonging to class True
(nb_saved_true, nb_combinations_true, dict_for_balancing_true) = augment_a_class('1', true_nparrays_dict, split, split_test, nb_augmented_images, true_nparrays_path, path_outputfolder, dict_for_balancing_false)
# Total of saved images and combinations
nb_saved = nb_saved_true + nb_saved_false
nb_combinations = nb_combinations_true + nb_combinations_false
path_outputfolder_train_zero = path_outputfolder / 'train' / '0'
path_outputfolder_train_one = path_outputfolder / 'train' / '1'
path_outputfolder_val_zero = path_outputfolder / 'val' / '0'
path_outputfolder_val_one = path_outputfolder / 'val' / '1'
path_outputfolder_test_zero = path_outputfolder / 'test' / '0'
path_outputfolder_test_one = path_outputfolder / 'test' / '1'
# Sanity checks => had to add the condition because of .DS_Store files
ntrue = len([elem for elem in os.listdir(true_nparrays_path) if not elem.startswith('.')])
nfalse = len([elem for elem in os.listdir(false_nparrays_path) if not elem.startswith('.')])
ntrain_zero = len([elem for elem in os.listdir( path_outputfolder / 'train' / '0') if not elem.startswith('.')])
ntrain_one = len([elem for elem in os.listdir(path_outputfolder / 'train' / '1') if not elem.startswith('.')])
nval_zero = len([elem for elem in os.listdir(path_outputfolder / 'val' / '0') if not elem.startswith('.')])
nval_one = len([elem for elem in os.listdir(path_outputfolder / 'val' / '1') if not elem.startswith('.')])
ntest_zero = len([elem for elem in os.listdir(path_outputfolder / 'test' / '0') if not elem.startswith('.')])
ntest_one = len([elem for elem in os.listdir(path_outputfolder / 'test' / '1') if not elem.startswith('.')])
print()
print(f"Number of elements in dataset_folder/False directory (nfalse): {nfalse}")
print(f"Number of elements in dataset_folder/True directory (ntrue): {ntrue}")
print(f"Number of elements in output_folder/train/0 directory (ntrain_zero): {ntrain_zero}")
print(f"Number of elements in output_folder/train/1 directory (ntrain_one): {ntrain_one}")
print(f"Number of elements in output_folder/val/0 directory (nval_zero): {nval_zero}")
print(f"Number of elements in output_folder/val/1 directory (nval_one): {nval_one}")
print(f"Number of elements in output_folder/test/0 directory (ntest_zero): {ntest_zero}")
print(f"Number of elements in output_folder/test/1 directory (ntest_one): {ntest_one}")
print()
print(f"Argument nb_augmented_images: {nb_augmented_images}")
print()
print(f"Expected: nb_saved == ntrain_zero + ntrain_one + nval_zero + nval_one + ntest_zero + ntest_one")
print(f" {nb_saved} = {nb_saved} (expected {ntrain_zero + ntrain_one + nval_zero + nval_one + ntest_zero + ntest_one})")
if nb_saved != ntrain_zero + ntrain_one + nval_zero + nval_one + ntest_zero + ntest_one:
print(' ERROR! Number of elements does not match')
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create a dataset containing PNG images from a directory containing NumPy arrays')
parser.add_argument('--datasetfolder',
help='path to the dataset directory (has to contain True and False folders)',
required=True,
type=str)
parser.add_argument('--outputfolder',
help='output directory',
required=True,
type=str)
parser.add_argument('--split',
help='Ratio of the split for training set.',
type=float,
required=True)
parser.add_argument('--splittest',
help='Ratio of the split for the test set. In this case, the data is split into training, validation and test sets.',
type=float,
required=False,
default=0.0)
parser.add_argument('--nbaugmentedimagestraining',
help='Number of augmented slice for each slice.',
type=int,
required=True)
parser.add_argument('--nbaugmentedimagesvalidation',
help='Number of augmented slice for each slice.',
type=int,
required=True)
args = parser.parse_args()
random.seed(42)
nbaugmentedimages = {'nbaugmentedimages_training': args.nbaugmentedimagestraining , 'nbaugmentedimages_validation': args.nbaugmentedimagesvalidation , 'nbaugmentedimages_test': 11 }
# Check if the arguments are valid
check_args(args.datasetfolder, args.outputfolder, args.split, args.splittest, nbaugmentedimages)
# Load the nparrays in dataset_folder, generate files, output them to output_folder
create_images(args.datasetfolder, args.outputfolder, args.split, args.splittest, nbaugmentedimages)
|
the-stack_106_14549
|
# Python 3 Spelling Corrector
#
# Copyright 2014 Jonas McCallum.
# Updated for Python 3, based on Peter Norvig's
# 2007 version: http://norvig.com/spell-correct.html
#
# Open source, MIT license
# http://www.opensource.org/licenses/mit-license.php
"""
Spell function
Author: Jonas McCallum
https://github.com/foobarmus/autocorrect
"""
from autocorrect.nlp_parser import NLP_COUNTS
from autocorrect.word import Word, common, exact, known, get_case
def spell(word):
"""most likely correction for everything up to a double typo"""
w = Word(word)
candidates = (common([word]) or exact([word]) or known([word]) or
known(w.typos()) or common(w.double_typos()) or
[word])
correction = max(candidates, key=NLP_COUNTS.get)
return get_case(word, correction)
|
the-stack_106_14550
|
# Copyright 2015 Observable Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from datetime import datetime
from itertools import zip_longest
from io import StringIO
from unittest import TestCase
from unittest.mock import MagicMock, patch
from flowlogs_reader import FlowRecord
from flowlogs_reader.__main__ import main, actions
SAMPLE_INPUT = [
(
'2 123456789010 eni-102010ab 198.51.100.1 192.0.2.1 '
'443 49152 6 10 840 1439387263 1439387264 ACCEPT OK '
'- - - - - - -'
),
(
'2 123456789010 eni-102010ab 192.0.2.1 198.51.100.1 '
'49152 443 6 20 1680 1439387264 1439387265 ACCEPT OK '
'- - - - - - -'
),
(
'2 123456789010 eni-102010ab 192.0.2.1 198.51.100.2 '
'49152 443 6 20 1680 1439387265 1439387266 REJECT OK '
'- - - - - - -'
),
(
'2 123456789010 eni-1a2b3c4d - - - - - - - '
'1431280876 1431280934 - NODATA '
'- - - - - - -'
),
(
'2 123456789010 eni-4b118871 - - - - - - - '
'1431280876 1431280934 - SKIPDATA '
'- - - - - - -'
),
]
SAMPLE_RECORDS = [
FlowRecord.from_cwl_event({'message': m}) for m in SAMPLE_INPUT
]
class MainTestCase(TestCase):
@patch('flowlogs_reader.__main__.FlowLogsReader', autospec=True)
def test_main(self, mock_reader):
main(['mygroup'])
mock_reader.assert_called_with(log_group_name='mygroup')
main(['-s', '2015-05-05 14:20:00', 'mygroup'])
mock_reader.assert_called_with(
log_group_name='mygroup', start_time=datetime(2015, 5, 5, 14, 20),
)
main(['--end-time', '2015-05-05 14:20:00', 'mygroup'])
mock_reader.assert_called_with(
log_group_name='mygroup', end_time=datetime(2015, 5, 5, 14, 20),
)
main([
'--time-format', '%Y-%m-%d',
'--start-time', '2015-05-05',
'mygroup'
])
mock_reader.assert_called_with(
log_group_name='mygroup', start_time=datetime(2015, 5, 5),
)
main(['--region', 'us-west-1', 'mygroup'])
mock_reader.assert_called_with(
log_group_name='mygroup', region_name='us-west-1',
)
main(['--profile', 'my-profile', 'mygroup'])
mock_reader.assert_called_with(
log_group_name='mygroup', profile_name='my-profile'
)
main(['--filter-pattern', 'REJECT', 'mygroup'])
mock_reader.assert_called_with(
log_group_name='mygroup', filter_pattern='REJECT'
)
@patch('flowlogs_reader.__main__.FlowLogsReader', autospec=True)
@patch('flowlogs_reader.__main__.print', create=True)
def test_main_print(self, mock_out, mock_reader):
mock_out.stdout = io.BytesIO()
mock_reader.return_value = SAMPLE_RECORDS
main(['mygroup'])
for call, record in zip_longest(mock_out.mock_calls, SAMPLE_INPUT):
__, args, kwargs = call
line = args[0]
self.assertEqual(line, record)
@patch('flowlogs_reader.__main__.FlowLogsReader', autospec=True)
@patch('flowlogs_reader.__main__.print', create=True)
def test_main_print_count(self, mock_out, mock_reader):
mock_out.stdout = io.BytesIO()
mock_reader.return_value = SAMPLE_RECORDS
with self.assertRaises(ValueError):
main(['mygroup', 'print', 'two'])
with self.assertRaises(RuntimeError):
main(['mygroup', 'print', '2', '3'])
main(['mygroup', 'print', '2'])
for call, record in zip_longest(mock_out.mock_calls, SAMPLE_INPUT[:2]):
__, args, kwargs = call
line = args[0]
self.assertEqual(line, record)
@patch('flowlogs_reader.__main__.FlowLogsReader', autospec=True)
@patch('flowlogs_reader.__main__.print', create=True)
def test_main_ipset(self, mock_out, mock_reader):
mock_out.stdout = io.BytesIO()
mock_reader.return_value = SAMPLE_RECORDS
main(['mygroup', 'ipset'])
expected_set = set()
for record in SAMPLE_INPUT:
data = record.split()
expected_set.add(data[3])
expected_set.add(data[4])
# don't include SKIPDATA/NODATA in results
expected_set.remove('-')
# make sure the number of lines are the same as the size of the set
self.assertEqual(len(mock_out.mock_calls), len(expected_set))
actual_set = set()
for __, args, kwargs in mock_out.mock_calls:
line = args[0]
actual_set.add(line)
self.assertEqual(actual_set, expected_set)
@patch('flowlogs_reader.__main__.FlowLogsReader', autospec=True)
@patch('flowlogs_reader.__main__.print', create=True)
def test_main_findip(self, mock_out, mock_reader):
mock_out.stdout = io.BytesIO()
mock_reader.return_value = SAMPLE_RECORDS
main(['mygroup', 'findip', '198.51.100.2'])
expected_result = [SAMPLE_INPUT[2]]
for call, record in zip_longest(mock_out.mock_calls, expected_result):
__, args, kwargs = call
line = args[0]
self.assertEqual(line, record)
@patch('flowlogs_reader.__main__.FlowLogsReader', autospec=True)
@patch('flowlogs_reader.__main__.print', create=True)
def test_main_bad_action(self, mock_out, mock_reader):
mock_out.stdout = io.BytesIO()
mock_reader.return_value = SAMPLE_RECORDS
main(['mygroup', '__'])
expected_result = [
'unknown action: __',
'known actions: {}'.format(', '.join(actions)),
]
for call, result in zip_longest(mock_out.mock_calls, expected_result):
__, args, kwargs = call
line = args[0]
self.assertEqual(line, result)
@patch('flowlogs_reader.__main__.FlowLogsReader', autospec=True)
@patch('flowlogs_reader.__main__.print', create=True)
def test_main_missing_arn(self, mock_out, mock_reader):
mock_out.stdout = io.BytesIO()
mock_reader.return_value = SAMPLE_RECORDS
main(['--external-id', 'uuid4', 'mygroup'])
expected_result = [
'must give a --role-arn if an --external-id is given',
]
for call, result in zip_longest(mock_out.mock_calls, expected_result):
__, args, kwargs = call
line = args[0]
self.assertEqual(line, result)
@patch('flowlogs_reader.__main__.FlowLogsReader', autospec=True)
@patch('flowlogs_reader.__main__.boto3', autospec=True)
def test_main_assume_role(self, mock_boto3, mock_reader):
mock_boto3.client.return_value.assume_role.return_value = {
'Credentials': {
'AccessKeyId': 'myaccesskeyid',
'SecretAccessKey': 'mysecretaccesskey',
'SessionToken': 'mysessiontoken',
}
}
mock_client = MagicMock()
mock_boto3.session.Session.return_value.client.return_value = (
mock_client
)
mock_reader.return_value = []
main(['--role-arn', 'myarn', '--external-id', 'uuid4', 'mygroup'])
session = mock_boto3.session.Session
session.assert_called_once_with(
aws_access_key_id='myaccesskeyid',
aws_secret_access_key='mysecretaccesskey',
aws_session_token='mysessiontoken',
)
session.return_value.client.assert_called_once_with('logs')
mock_reader.assert_called_once_with(
log_group_name='mygroup', boto_client=mock_client
)
@patch('flowlogs_reader.__main__.S3FlowLogsReader', autospec=True)
@patch('flowlogs_reader.__main__.boto3', autospec=True)
def test_main_assume_role_s3(self, mock_boto3, mock_reader):
mock_boto3.client.return_value.assume_role.return_value = {
'Credentials': {
'AccessKeyId': 'myaccesskeyid',
'SecretAccessKey': 'mysecretaccesskey',
'SessionToken': 'mysessiontoken',
}
}
mock_client = MagicMock()
mock_boto3.session.Session.return_value.client.return_value = (
mock_client
)
mock_reader.return_value = []
args = [
'--role-arn', 'myarn',
'--external-id', 'uuid4',
'--location-type', 's3',
'mybucket'
]
main(args)
session = mock_boto3.session.Session
session.assert_called_once_with(
aws_access_key_id='myaccesskeyid',
aws_secret_access_key='mysecretaccesskey',
aws_session_token='mysessiontoken',
)
session.return_value.client.assert_called_once_with('s3')
mock_reader.assert_called_once_with(
'mybucket', boto_client=mock_client
)
@patch('flowlogs_reader.__main__.FlowLogsReader', autospec=True)
def test_main_aggregate(self, mock_reader):
mock_reader.return_value = [SAMPLE_RECORDS[0], SAMPLE_RECORDS[0]]
with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
main(['mygroup', 'aggregate'])
output = mock_stdout.getvalue().splitlines()
actual_header = output[0].split('\t')
expected_header = [
'bytes',
'dstaddr',
'dstport',
'end',
'packets',
'protocol',
'srcaddr',
'srcport',
'start',
]
self.assertEqual(actual_header, expected_header)
actual_line = output[1].split('\t')
expected_line = [
'1680',
'192.0.2.1',
'49152',
'2015-08-12 13:47:44',
'20',
'6',
'198.51.100.1',
'443',
'2015-08-12 13:47:43',
]
self.assertEqual(actual_line, expected_line)
@patch('flowlogs_reader.__main__.S3FlowLogsReader', autospec=True)
@patch('flowlogs_reader.__main__.print', create=True)
def test_s3_destination(self, mock_out, mock_reader):
mock_out.stdout = io.BytesIO()
mock_reader.return_value = SAMPLE_RECORDS
main(
[
'mybucket/myprefix',
'--location-type', 's3',
'--include-accounts', '999999999998, 999999999999',
'--include-regions', 'us-east-1,us-east-2',
]
)
mock_reader.assert_called_once_with(
location='mybucket/myprefix',
include_accounts=['999999999998', '999999999999'],
include_regions=['us-east-1', 'us-east-2'],
)
for call, record in zip_longest(mock_out.mock_calls, SAMPLE_INPUT):
__, args, kwargs = call
line = args[0]
self.assertEqual(line, record)
|
the-stack_106_14551
|
import sys
import time
questions= {
'q1': 'Are you doing ok today? ',
# if yes
'q2': 'What did you have for breakfast? ',
# if no
'q3': 'You wanna tell me about it? Yes or No? ',
# if yes
'q4': 'What\'s going on? ',
# if no
'q5': '',
'q6': '',
}
responses= {
# q1 'yes' response
'r1': '\nGood, I\'m glad to hear it.\n',
# q1 'no' response
'r2': '\nI\'m sorry to hear that.\n',
# q2
'r3': '\nSounds.... strange to me, but what do I know? lol I had a BYTE but nothing special myself.\n',
# q2, lengthy description
'r4': '\nHoly cow, you type a lot!\n',
'r5': '\nI am a great listener, I can do this til my battery dies. So, lay it on me!\n',
'r6': '\nOk, I understand. If you change your mind just let me know, I am here for you.\n',
}
userInput= ''
curQ= questions['q1']
print('\nType "quit" to exit')
while userInput != 'q' and userInput != 'quit':
print('')
print('********')
print('')
userInput= input(curQ).strip().lower()
if userInput == 'q' or userInput == 'quit':
print('Goodbye\n')
sys.exit(1)
if curQ == questions['q1']:
if userInput == 'yes':
print(responses['r1'])
time.sleep(2)
curQ= questions['q2']
elif userInput == 'no':
print(responses['r2'])
time.sleep(2)
curQ= questions['q3']
else:
print('invalid entry')
# breakfast question
elif curQ == questions['q2']:
if len(userInput) < 200:
print(responses['r3'])
elif len(userInput) >= 200:
print(responses['r4'])
time.sleep(1)
print(responses['r3'])
else:
print('invalid entry')
# wanna tell me about it?
elif curQ == questions['q3']:
if userInput =='yes':
print(responses['r5'])
curQ= questions['q4']
elif userInput == 'no':
print(responses['r6'])
else:
print('invalid input')
|
the-stack_106_14552
|
import unittest
from checkov.terraform.util.docs_generator import get_checks
class TestDocGenerator(unittest.TestCase):
def test_doc_generator_initiation(self):
checks = get_checks()
self.assertGreater(len(checks), 0)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_14555
|
from pySOT.experimental_design import ExperimentalDesign, \
SymmetricLatinHypercube, LatinHypercube, TwoFactorial
import numpy as np
import pytest
def test_lhd():
lhd = LatinHypercube(dim=4, num_pts=10)
X = lhd.generate_points()
assert isinstance(lhd, ExperimentalDesign)
assert np.all(X.shape == (10, 4))
assert lhd.num_pts == 10
assert lhd.dim == 4
def test_lhd_round():
num_pts = 10
dim = 3
lb = np.array([1, 2, 3])
ub = np.array([3, 4, 5])
int_var = np.array([1])
np.random.seed(0)
lhd = LatinHypercube(dim=dim, num_pts=num_pts)
X = lhd.generate_points(lb=lb, ub=ub, int_var=int_var)
assert np.all(np.round(X[:, 1] == X[:, 1])) # Should be integers
assert np.all(np.max(X, axis=0) <= ub)
assert np.all(np.min(X, axis=0) >= lb)
def test_slhd():
for i in range(10, 12): # To test even and odd
slhd = SymmetricLatinHypercube(dim=3, num_pts=i)
X = slhd.generate_points()
assert isinstance(slhd, ExperimentalDesign)
assert np.all(X.shape == (i, 3))
assert slhd.num_pts == i
assert slhd.dim == 3
def test_slhd_round():
num_pts = 10
dim = 3
lb = np.array([1, 2, 3])
ub = np.array([3, 4, 5])
int_var = np.array([1])
np.random.seed(0)
slhd = SymmetricLatinHypercube(dim=dim, num_pts=num_pts)
X = slhd.generate_points(lb=lb, ub=ub, int_var=int_var)
assert np.all(np.round(X[:, 1] == X[:, 1])) # Should be integers
assert np.all(np.max(X, axis=0) == ub)
assert np.all(np.min(X, axis=0) == lb)
def test_full_factorial():
ff = TwoFactorial(dim=3)
X = ff.generate_points()
assert isinstance(ff, ExperimentalDesign)
assert np.all(X.shape == (8, 3))
assert ff.num_pts == 8
assert ff.dim == 3
assert np.all(np.logical_or(X == 1, X == 0))
with pytest.raises(ValueError): # This should raise an exception
TwoFactorial(20)
def test_full_factorial_round():
lb = np.array([1, 2, 3])
ub = np.array([3, 4, 5])
int_var = np.array([1])
ff = TwoFactorial(dim=3)
X = ff.generate_points(lb=lb, ub=ub, int_var=int_var)
assert np.all(np.logical_or(X == lb, X == ub))
if __name__ == '__main__':
test_full_factorial()
test_lhd()
test_slhd()
test_lhd_round()
test_slhd_round()
test_full_factorial_round()
|
the-stack_106_14556
|
"""SMA Solar Webconnect interface."""
from __future__ import annotations
import logging
from typing import Any
import pysma
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PATH,
CONF_SENSORS,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
CONF_CUSTOM,
CONF_FACTOR,
CONF_GROUP,
CONF_KEY,
CONF_UNIT,
DEVICE_INFO,
DOMAIN,
GROUPS,
PYSMA_COORDINATOR,
PYSMA_SENSORS,
)
_LOGGER = logging.getLogger(__name__)
def _check_sensor_schema(conf: dict[str, Any]) -> dict[str, Any]:
"""Check sensors and attributes are valid."""
try:
valid = [s.name for s in pysma.Sensors()]
valid += pysma.LEGACY_MAP.keys()
except (ImportError, AttributeError):
return conf
customs = list(conf[CONF_CUSTOM])
for sensor in conf[CONF_SENSORS]:
if sensor in customs:
_LOGGER.warning(
"All custom sensors will be added automatically, no need to include them in sensors: %s",
sensor,
)
elif sensor not in valid:
raise vol.Invalid(f"{sensor} does not exist")
return conf
CUSTOM_SCHEMA = vol.Any(
{
vol.Required(CONF_KEY): vol.All(cv.string, vol.Length(min=13, max=15)),
vol.Required(CONF_UNIT): cv.string,
vol.Optional(CONF_FACTOR, default=1): vol.Coerce(float),
vol.Optional(CONF_PATH): vol.All(cv.ensure_list, [cv.string]),
}
)
PLATFORM_SCHEMA = vol.All(
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_GROUP, default=GROUPS[0]): vol.In(GROUPS),
vol.Optional(CONF_SENSORS, default=[]): vol.Any(
cv.schema_with_slug_keys(cv.ensure_list), # will be deprecated
vol.All(cv.ensure_list, [str]),
),
vol.Optional(CONF_CUSTOM, default={}): cv.schema_with_slug_keys(
CUSTOM_SCHEMA
),
},
extra=vol.PREVENT_EXTRA,
),
_check_sensor_schema,
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigEntry,
async_add_entities: AddEntitiesCallback,
discovery_info=None,
) -> None:
"""Import the platform into a config entry."""
_LOGGER.warning(
"Loading SMA via platform setup is deprecated. "
"Please remove it from your configuration"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up SMA sensors."""
sma_data = hass.data[DOMAIN][config_entry.entry_id]
coordinator = sma_data[PYSMA_COORDINATOR]
used_sensors = sma_data[PYSMA_SENSORS]
entities = []
for sensor in used_sensors:
entities.append(
SMAsensor(
coordinator,
config_entry.unique_id,
config_entry.data[DEVICE_INFO],
sensor,
)
)
async_add_entities(entities)
class SMAsensor(CoordinatorEntity, SensorEntity):
"""Representation of a SMA sensor."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
config_entry_unique_id: str,
device_info: dict[str, Any],
pysma_sensor: pysma.Sensor,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator)
self._sensor = pysma_sensor
self._enabled_default = self._sensor.enabled
self._config_entry_unique_id = config_entry_unique_id
self._device_info = device_info
# Set sensor enabled to False.
# Will be enabled by async_added_to_hass if actually used.
self._sensor.enabled = False
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._sensor.name
@property
def state(self) -> StateType:
"""Return the state of the sensor."""
return self._sensor.value
@property
def unit_of_measurement(self) -> str | None:
"""Return the unit the value is expressed in."""
return self._sensor.unit
@property
def unique_id(self) -> str:
"""Return a unique identifier for this sensor."""
return (
f"{self._config_entry_unique_id}-{self._sensor.key}_{self._sensor.key_idx}"
)
@property
def device_info(self) -> DeviceInfo:
"""Return the device information."""
return {
"identifiers": {(DOMAIN, self._config_entry_unique_id)},
"name": self._device_info["name"],
"manufacturer": self._device_info["manufacturer"],
"model": self._device_info["type"],
}
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
self._sensor.enabled = True
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass."""
await super().async_will_remove_from_hass()
self._sensor.enabled = False
|
the-stack_106_14557
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import datetime
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext as _
from rest_framework import fields, serializers
class CustomFieldsModelSerializer(serializers.ModelSerializer):
"""
A ModelSerializer that takes an additional `fields` argument that
controls which fields should be displayed.
"""
def __init__(self, *args, **kwargs):
# Don't pass the 'fields' arg up to the superclass
user_fields = kwargs.pop("fields", None)
# Instantiate the superclass normally
super().__init__(*args, **kwargs)
if user_fields is not None:
# Drop any fields that are not specified in the `fields` argument.
allowed = set(user_fields)
existing = set(self.fields)
for field_name in existing - allowed:
self.fields.pop(field_name)
class CustomFieldsMixin:
"""
A Serializer that takes an additional `fields` argument that
controls which fields should be displayed.
Universal mixin
"""
def __init__(self, *args, **kwargs):
# Don't pass the 'fields' arg up to the superclass
user_fields = kwargs.pop("fields", None)
# Instantiate the superclass normally
super().__init__(*args, **kwargs)
if user_fields is not None:
# Drop any fields that are not specified in the `fields` argument.
allowed = set(user_fields)
existing = set(self.fields)
for field_name in existing - allowed:
self.fields.pop(field_name)
def is_custom_fields_enabled(slz: serializers.Serializer) -> bool:
"""判断当前 Serializer 是否支持动态返回字段"""
if isinstance(slz, CustomFieldsModelSerializer):
return True
if issubclass(slz.__class__, CustomFieldsMixin):
return True
return False
def patch_datetime_field():
"""Patch DateTimeField which respect current timezone
See also: https://github.com/encode/django-rest-framework/issues/3732
"""
def to_representation(self, value):
# This is MAGIC!
if value and settings.USE_TZ:
try:
value = timezone.localtime(value)
except ValueError:
pass
return orig_to_representation(self, value)
orig_to_representation = fields.DateTimeField.to_representation
fields.DateTimeField.to_representation = to_representation
class StringArrayField(fields.ListField):
"""
String representation of an array field.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.delimiter = kwargs.get("delimiter", ",")
def to_internal_value(self, data):
# convert string to list
target = []
for e in data:
target.extend(e.split(self.delimiter))
return super().to_internal_value(target)
class AdvancedListSerializer(serializers.Serializer):
fields = StringArrayField(required=False, help_text=_("指定对象返回字段,支持多选,以逗号分隔,例如: username,status,id"))
lookup_field = serializers.CharField(required=False, help_text=_("查询字段,针对 exact_lookups,fuzzy_lookups 生效"))
exact_lookups = StringArrayField(
required=False,
help_text=_("精确查询 lookup_field 所指定的字段, 支持多选,以逗号分隔,例如: cat,dog,fish"),
)
fuzzy_lookups = StringArrayField(
required=False,
help_text=_("模糊查询 lookup_field 所指定的字段, 支持多选,以逗号分隔,例如: cat,dog,fish"),
)
wildcard_search = serializers.CharField(required=False, help_text=_("在多个字段模糊搜索的内容"))
wildcard_search_fields = StringArrayField(required=False, help_text=_("指定多个模糊搜索字段"))
best_match = serializers.BooleanField(required=False, default=False, help_text=_("是否按照最短匹配排序"))
time_field = serializers.ChoiceField(
required=False,
default="create_time",
choices=["update_time", "create_time"],
help_text=_("时间过滤字段,支持 update_time, create_time"),
)
since = serializers.DateTimeField(
required=False,
input_formats=["iso-8601", "%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%S"],
help_text=_("筛选某个时间点后的记录"),
)
until = serializers.DateTimeField(
required=False,
input_formats=["iso-8601", "%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%S"],
help_text=_("筛选某个时间点前的记录"),
)
include_disabled = serializers.BooleanField(required=False, default=False, help_text=_("是否包含已软删除的数据"))
class AdvancedRetrieveSerialzier(serializers.Serializer):
fields = serializers.CharField(required=False, help_text=_("指定对象返回字段,支持多选,以逗号分隔,例如: username,status,id"))
lookup_field = serializers.CharField(required=False, help_text=_("指定查询字段,内容为 lookup_value 所属字段, 例如: username"))
include_disabled = serializers.BooleanField(required=False, default=False, help_text=_("是否包含已软删除的数据"))
class EmptySerializer(serializers.Serializer):
"""空"""
class DurationTotalSecondField(fields.Field):
def to_internal_value(self, value) -> datetime.timedelta:
if isinstance(value, float):
value = str(value)
return fields.parse_duration(value)
def to_representation(self, value: datetime.timedelta):
return value.total_seconds()
#########
# Batch #
#########
class BatchRetrieveSerializer(serializers.Serializer):
query_ids = serializers.CharField(help_text="查询 id 列表,以 , 分隔")
|
the-stack_106_14559
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 4 11:59:19 2020
@author: pmchozas
"""
import requests
import json
import re
from unicodedata import normalize
# from modules_api import wsidCode
# from modules_api import extrafunctions
# from modules_api import jsonFile
# from modules_api import unesco
# import logging
#from modules_api import Term
def enrich_term_eurovoc(myterm):
get_uri(myterm) #primero recogemos la uri
#aquí hacemos desambiguación???
get_definition(myterm) #recogemos definicion si hay para construir vector (creo)
get_relations(myterm) #recogemos broader, narrower, related
get_synonyms(myterm) #recogemos sinónimos en el langin
get_translations(myterm)
create_intermediate_ids(myterm)
return myterm
def get_uri(myterm): #recoge la uri del termino a buscar
term='"^'+myterm.term+'$"'
plural='"^'+myterm.term+'s'+'$"'
euterm='"^'+myterm.term+' \\\(EU\\\)'+'$"'
ueterm='"^'+myterm.term+' \\\(UE\\\)'+'$"'
lang='"'+myterm.langIn+'"'
try:
url = ("http://sparql.lynx-project.eu/")
query = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?c ?label
WHERE {
GRAPH <http://sparql.lynx-project.eu/graph/eurovoc> {
?c a skos:Concept .
?c ?p ?label.
FILTER regex(?label, """+term+""", "i" )
FILTER (lang(?label) = """+lang+""")
FILTER (?p IN (skos:prefLabel, skos:altLabel ) )
}
}
"""
# print(query)
r=requests.get(url, params={'format': 'json', 'query': query})
results=json.loads(r.text)
if (len(results["results"]["bindings"])==0):
print('NEXT')
try:
url = ("http://sparql.lynx-project.eu/")
query = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?c ?label
WHERE {
GRAPH <http://sparql.lynx-project.eu/graph/eurovoc> {
?c a skos:Concept .
?c ?p ?label.
FILTER regex(?label, """+plural+""", "i" )
FILTER (lang(?label) = """+lang+""")
FILTER (?p IN (skos:prefLabel, skos:altLabel ) )
}
}
"""
print(query)
r=requests.get(url, params={'format': 'json', 'query': query})
results=json.loads(r.text)
if (len(results["results"]["bindings"])==0):
print('NEXT EU')
try:
url = ("http://sparql.lynx-project.eu/")
query = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?c ?label
WHERE {
GRAPH <http://sparql.lynx-project.eu/graph/eurovoc> {
?c a skos:Concept .
?c ?p ?label.
FILTER regex(?label, """+euterm+""", "i" )
FILTER (lang(?label) = """+lang+""")
FILTER (?p IN (skos:prefLabel, skos:altLabel ) )
}
}
"""
print(query)
r=requests.get(url, params={'format': 'json', 'query': query})
results=json.loads(r.text)
if (len(results["results"]["bindings"])==0):
answeruri=''
print('NEXT UE')
try:
url = ("http://sparql.lynx-project.eu/")
query = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?c ?label
WHERE {
GRAPH <http://sparql.lynx-project.eu/graph/eurovoc> {
?c a skos:Concept .
?c ?p ?label.
FILTER regex(?label, """+ueterm+""", "i" )
FILTER (lang(?label) = """+lang+""")
FILTER (?p IN (skos:prefLabel, skos:altLabel ) )
}
}
"""
# print(query)
r=requests.get(url, params={'format': 'json', 'query': query})
results=json.loads(r.text)
if (len(results["results"]["bindings"])==0):
answeruri=''
print('NO URI')
else:
for result in results["results"]["bindings"]:
answeruri=result["c"]["value"]
#answerl=result["label"]["value"]
myterm.eurovoc_id=answeruri
except:
print('no term')
else:
for result in results["results"]["bindings"]:
answeruri=result["c"]["value"]
#answerl=result["label"]["value"]
myterm.eurovoc_id=answeruri
except:
print('no term')
else:
for result in results["results"]["bindings"]:
answeruri=result["c"]["value"]
#answerl=result["label"]["value"]
myterm.eurovoc_id=answeruri
except:
print('no term')
else:
for result in results["results"]["bindings"]:
answeruri=result["c"]["value"]
#answerl=result["label"]["value"]
myterm.eurovoc_id=answeruri
except:
print('no term')
return myterm
def get_definition(myterm): #recoge la definicion de la uri de entrada si la hay
try:
definition=''
url=("http://sparql.lynx-project.eu/")
term='"^'+myterm.term+'$"'
lang='"'+myterm.langIn+'"'
query="""
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?c ?label
WHERE {
GRAPH <http://sparql.lynx-project.eu/graph/eurovoc> {
VALUES ?c { <"""+term+"""> }
VALUES ?searchLang { """+lang+""" undef }
VALUES ?relation { skos:definition }
?c a skos:Concept .
?c ?relation ?label .
filter ( lang(?label)=?searchLang )
}
}
"""
r=requests.get(url, params={'format': 'json', 'query': query})
results=json.loads(r.text)
if (len(results["results"]["bindings"])==0):
definition=''
else:
for result in results["results"]["bindings"]:
definition=result["label"]["value"]
myterm.definitions_eurovoc[myterm.langIn]=definition
except json.decoder.JSONDecodeError:
pass
return(myterm)
def get_relations(myterm): #recoge la uri de la relacion a buscar
reltypes=['broader', 'narrower', 'related']
try:
for rel in reltypes:
if rel not in myterm.eurovoc_relations:
myterm.eurovoc_relations[rel]=[]
url=("http://sparql.lynx-project.eu/")
query="""
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?c ?label
WHERE {
GRAPH <http://sparql.lynx-project.eu/graph/eurovoc> {
VALUES ?c {<"""+myterm.eurovoc_id+"""> }
VALUES ?relation { skos:"""+rel+""" } # skos:broader
?c a skos:Concept .
?c ?relation ?label .
}
}
"""
r=requests.get(url, params={'format': 'json', 'query': query})
results=json.loads(r.text)
# print(query)
if (len(results["results"]["bindings"])==0):
answerRel=''
else:
for result in results["results"]["bindings"]:
answerRel=result["label"]["value"]
if rel == 'broader':
myterm.eurovoc_relations[rel].append(answerRel)
elif rel == 'narrower':
myterm.eurovoc_relations[rel].append(answerRel)
elif rel == 'related':
myterm.eurovoc_relations[rel].append(answerRel)
else:
continue
# name=name_term_eurovoc(answerRel,lang,'prefLabel')
# answer.append([answerRel, name, relation])
except json.decoder.JSONDecodeError:
pass
return(myterm)
def get_synonyms(myterm): #recoge sinónimos
try:
nameUri=''
label="altLabel"
lang='"'+myterm.langIn+'"'
url=("http://sparql.lynx-project.eu/")
query="""
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?c ?label
WHERE {
GRAPH <http://sparql.lynx-project.eu/graph/eurovoc> {
VALUES ?c { <"""+myterm.eurovoc_id+"""> }
VALUES ?searchLang { """+lang+""" undef }
VALUES ?relation { skos:"""+label+""" }
?c a skos:Concept .
?c ?relation ?label .
filter ( lang(?label)=?searchLang )
}
}
"""
r=requests.get(url, params={'format': 'json', 'query': query})
results=json.loads(r.text)
if (len(results["results"]["bindings"])==0):
nameUri=''
else:
for result in results["results"]["bindings"]:
nameUri=result["label"]["value"]
if nameUri != myterm.term:
myterm.synonyms_eurovoc.append(nameUri)
except json.decoder.JSONDecodeError:
pass
return(nameUri)
def get_translations(myterm): #recoge traducciones
for lang in myterm.langOut:
if lang not in myterm.translations_eurovoc:
myterm.translations_eurovoc[lang]=[]
try:
lang1='"'+lang+'"'
url=("http://sparql.lynx-project.eu/")
labels=['prefLabel', 'altLabel']
for label in labels:
query="""
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?c ?label
WHERE {
GRAPH <http://sparql.lynx-project.eu/graph/eurovoc> {
VALUES ?c { <"""+myterm.eurovoc_id+"""> }
VALUES ?searchLang { """+lang1+""" undef}
VALUES ?relation { skos:"""+label+""" }
?c a skos:Concept .
?c ?relation ?label .
filter ( lang(?label)=?searchLang )
}
}
"""
r=requests.get(url, params={'format': 'json', 'query': query})
results=json.loads(r.text)
# print(query)
if (len(results["results"]["bindings"])==0):
trans=''
else:
for result in results["results"]["bindings"]:
trans=result["label"]["value"]
trans=trans.replace('(', '')
trans=trans.replace(')', '')
print(trans)
myterm.translations_eurovoc[lang].append(trans)
except:
continue
return(myterm)
def create_intermediate_ids(myterm):
chars=['\'', '\"', '!', '<', '>', ',', '(', ')', '.']
schema=myterm.schema.lower()
if ' ' in schema:
schema=schema.replace(' ', '-')
for char in chars:
schema=schema.replace(char, '')
if len(myterm.synonyms_eurovoc)>0:
myterm.synonyms['eurovoc']={}
myterm.synonyms_ontolex['eurovoc']={}
myterm.synonyms['eurovoc'][myterm.langIn]=[]
myterm.synonyms_ontolex['eurovoc'][myterm.langIn]=[]
for term in myterm.synonyms_eurovoc:
syn_set = {}
syn = term
if ' ' in syn:
syn=syn.replace(' ', '-')
for char in chars:
syn=syn.replace(char, '')
synid=schema+'-'+syn+'-'+myterm.langIn
syn_set['syn-id']=synid.lower()
syn_set['syn-value']=syn.replace('-', ' ')
myterm.synonyms['eurovoc'][myterm.langIn].append(syn_set)
myterm.synonyms_ontolex['eurovoc'][myterm.langIn].append(syn_set)
if len(myterm.translations_eurovoc)>0:
myterm.translations['eurovoc']={}
myterm.translations_ontolex['eurovoc']={}
for lang in myterm.langOut:
if lang in myterm.translations_eurovoc.keys():
myterm.translations['eurovoc'][lang]=[]
myterm.translations_ontolex['eurovoc'][lang]=[]
for term in myterm.translations_eurovoc[lang]:
trans_set = {}
if ' 'in term:
term=term.replace(' ', '-')
for char in chars:
term=term.replace(char, '')
transid=schema+'-'+term+'-'+lang
trans_set['trans-id']=transid.lower()
trans_set['trans-value']=term.replace('-', ' ')
# print(trans_set)
myterm.translations_ontolex['eurovoc'][lang].append(trans_set)
if len(myterm.translations['eurovoc'][lang])<=0:
myterm.translations['eurovoc'][lang].append(trans_set)
else:
if 'eurovoc' in myterm.synonyms:
if lang in myterm.synonyms['eurovoc']:
myterm.synonyms['eurovoc'][lang].append(trans_set)
else:
myterm.synonyms['eurovoc'][lang]=[]
myterm.synonyms['eurovoc'][lang].append(trans_set)
else:
myterm.synonyms['eurovoc']={}
myterm.synonyms['eurovoc'][lang]=[]
myterm.synonyms['eurovoc'][lang].append(trans_set)
if len(myterm.definitions_eurovoc)>0:
myterm.definitions['eurovoc']={}
for lang in myterm.definitions_eurovoc.keys():
myterm.definitions['eurovoc'][lang]=[]
for defi in myterm.definitions_eurovoc[lang]:
def_set = {}
defid=myterm.term+'-'+lang+'-def'
def_set['def-id']=defid.lower()
def_set['def-value']=defi
myterm.definitions['eurovoc'][lang].append(def_set)
return myterm
|
the-stack_106_14561
|
class Solution:
def XXX(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
#0,0 -> 0,3 - > 3,3 -> 3,0 -> 0,0
#0,1 -> 1,3 -> 3,2 -> 2,0-> 0,1
#0,2 -> 2,3 -> 3,1 -> 1,0 -> 0,2
n = len(matrix)
for i in range(n//2):
for j in range(i,n-i-1):
temp = matrix[i][j]
matrix[i][j] = matrix[n-1-j][i]
matrix[n-1-j][i] = matrix[n-1-i][n-1-j]
matrix[n-1-i][n-1-j] = matrix[j][n-1-i]
matrix[j][n-1-i] = temp
return matrix
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.