filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_14014
|
import cv2
import base64
import numpy as np
def base64_encode_image(img):
img = cv2.imencode('.jpg',img)[1]
img = str(base64.b64encode(img))[2:-1]
return img
def base64_decode_image(img_data):
img_b64decode = base64.b64decode(img_data)
img_array = np.frombuffer(img_b64decode, np.uint8)
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
return img
img=cv2.imread("/Users/ty/Desktop/毕业设计/Project/facebank/lyf/timg.jpg")
encode=base64_encode_image(img)
print(encode)
decode=base64_decode_image(encode)
cv2.imshow("r",decode)
cv2.waitKey(0)
|
the-stack_106_14016
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
import distro
import unittest
import numpy as np
from math import isnan
from nimbusml import Pipeline
from nimbusml.linear_model import FastLinearRegressor
from nimbusml.preprocessing import ToKeyImputer
from nimbusml.preprocessing.missing_values import Filter, Handler, Indicator
from pandas import DataFrame
from sklearn.utils.testing import assert_equal, assert_true, \
assert_allclose
class TestDataWithMissing(unittest.TestCase):
def test_missing(self):
data = DataFrame(data=dict(
f0=[np.nan, 1, 2, 3, 4, 5, 6],
f1=[1, 2, np.nan, 3, 4, 5, 6],
f2=[np.nan, 1, np.nan, 2, 3, np.nan, 4]))
for col in data.columns:
xf = Filter(columns=[col])
filtered = xf.fit_transform(data)
count = [isinstance(x, str) or not isnan(x)
for x in data[col]].count(True)
assert_equal(filtered.shape[0], count)
def test_null(self):
data = DataFrame(data=dict(
f0=[None, 1, 2, 3, 4, 5, 6],
f1=[1, 2, np.nan, 3, 4, 5, 6],
f2=[np.nan, 1, np.nan, 2, 3, np.nan, 4]))
for col in data.columns:
xf = Filter(columns=[col])
filtered = xf.fit_transform(data)
count = [x is None or isinstance(x, str) or not isnan(
x) for x in data[col]].count(True)
count_none = [x is None for x in data[col]].count(True)
assert_equal(filtered.shape[0], count)
assert_equal(
count_none, [
x is None for x in filtered[col]].count(True))
def test_inf(self):
data = DataFrame(data=dict(
f0=[np.inf, 1, 2, 3, 4, 5, 6],
f1=[1, 2, -np.Infinity, 3, 4, 5, 6]))
xf = Filter(columns=['f0'])
filtered = xf.fit_transform(data)
assert_equal(filtered['f0'][0], np.inf)
assert_equal(filtered['f1'][2], -np.inf)
def test_input_types(self):
df = DataFrame(
data=dict(
Label=[
1, 2, 3, 4, 5], f=[
1.1, 2.2, 3.3, np.nan, 5.5], f1=[
2.2, np.nan, 4.4, 5.5, 6.6]))
h = Handler(replace_with='Mean')
ft = FastLinearRegressor(shuffle=False, number_of_threads=1)
p = Pipeline([h, ft])
p.fit(df[['f', 'f1']].values, df['Label'])
res = p.predict(df[['f', 'f1']].values)
print(res)
print(p.summary())
assert_allclose(
res['Score'].values, [
4.965541, 0.519701, 4.992831, 3.877400, 5.020121], rtol=1e-4)
def test_input_conversion_to_float(self):
data={'f0': [0, 1, 2, 3],
'f1': [1, 2, 3, 4],
'f2': [1, 2, 3, 4],
'f3': [1, 2, 3, 4],
'f4': ['2', '3', '4', '5'],
'f5': [4, 5, np.nan, 9]}
data = DataFrame(data).astype({
'f0': np.int8,
'f1': np.int16,
'f2': np.int32,
'f3': np.int64,
'f4': str,
'f5': np.float64})
# Check Indicator
xf = Indicator()
result = xf.fit_transform(data)
assert_equal(result.loc[2, 'f5'], True)
result.loc[2, 'f5'] = False
result = ~result
for val in result.all().tolist():
self.assertTrue(val)
# Check Filter
xf = Filter()
result = xf.fit_transform(data)
assert_equal(len(result), 3)
assert_equal(result.loc[2, 'f5'], 9.0)
# Check Handler
xf = Handler(replace_with='Mean')
result = xf.fit_transform(data)
assert_equal(len(result), 4)
assert_equal(result.loc[2, 'f5.f5'], 6.0)
assert_equal(result.loc[2, 'f5.IsMissing.f5'], 1.0)
def test_input_conversion_to_float_retains_other_column_types(self):
data={'f0': [0, 1, 2, 3],
'f1': ['2', '3', '4', '5'],
'f2': [4, 5, np.nan, 9]}
data = DataFrame(data).astype({
'f0': np.int32,
'f1': str,
'f2': np.float64})
# Check Indicator
xf = Indicator(columns={'f2.ind': 'f2'})
result = xf.fit_transform(data)
assert_equal(result.dtypes['f0'], np.int32)
assert_equal(result.dtypes['f1'], np.object)
assert_equal(result.dtypes['f2'], np.float64)
assert_equal(result.dtypes['f2.ind'], np.bool)
assert_equal(result.loc[2, 'f2.ind'], True)
assert_equal(len(result), 4)
# Check Filter
xf = Filter(columns=['f2'])
result = xf.fit_transform(data)
assert_equal(len(result), 3)
assert_equal(result.loc[2, 'f2'], 9.0)
assert_equal(result.dtypes['f0'], np.int32)
assert_equal(result.dtypes['f1'], np.object)
assert_equal(result.dtypes['f2'], np.float32)
xf = Filter(columns=['f1'])
result = xf.fit_transform(data)
assert_equal(len(result), 4)
assert_equal(result.loc[3, 'f2'], 9.0)
assert_equal(result.dtypes['f0'], np.int32)
assert_equal(result.dtypes['f1'], np.float32)
assert_equal(result.dtypes['f2'], np.float64)
# Check Handler
xf = Handler(columns=['f2'], replace_with='Mean')
result = xf.fit_transform(data)
assert_equal(len(result), 4)
assert_equal(result.loc[2, 'f2.f2'], 6.0)
assert_equal(result.dtypes['f0'], np.int32)
assert_equal(result.dtypes['f1'], np.object)
assert_equal(result.dtypes['f2.f2'], np.float32)
@unittest.skipIf('centos' in distro.linux_distribution(full_distribution_name=False)[0].lower(), "centos is not supported")
def test_category_imputation(self):
data={'f0': [4, 4, np.nan, 9],
'f1': [4, 4, np.nan, np.nan]}
data = DataFrame(data)
# Check ToKeyImputer
xf = ToKeyImputer(columns={'f0.out': 'f0', 'f1.out': 'f1'})
result = xf.fit_transform(data)
assert_equal(result['f0.out'][1], 4)
assert_equal(result['f0.out'][2], 4)
assert_equal(result['f1.out'][1], 4)
assert_equal(result['f1.out'][2], 4)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_14017
|
import pandas as pd
import pytest
import numpy as np
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq, PANDAS_VERSION
N = 40
df = pd.DataFrame({'a': np.random.randn(N).cumsum(),
'b': np.random.randint(100, size=(N,)),
'c': np.random.randint(100, size=(N,)),
'd': np.random.randint(100, size=(N,)),
'e': np.random.randint(100, size=(N,))})
ddf = dd.from_pandas(df, 3)
idx = (pd.date_range('2016-01-01', freq='3s', periods=100) |
pd.date_range('2016-01-01', freq='5s', periods=100))[:N]
ts = pd.DataFrame({'a': np.random.randn(N).cumsum(),
'b': np.random.randint(100, size=(N,)),
'c': np.random.randint(100, size=(N,)),
'd': np.random.randint(100, size=(N,)),
'e': np.random.randint(100, size=(N,))},
index=idx)
dts = dd.from_pandas(ts, 3)
def shifted_sum(df, before, after, c=0):
a = df.shift(before)
b = df.shift(-after)
return df + a + b + c
def ts_shifted_sum(df, before, after, c=0):
a = df.shift(before.seconds)
b = df.shift(-after.seconds)
return df + a + b + c
@pytest.mark.parametrize('npartitions', [1, 4])
def test_map_overlap(npartitions):
ddf = dd.from_pandas(df, npartitions)
for before, after in [(0, 3), (3, 0), (3, 3), (0, 0)]:
# DataFrame
res = ddf.map_overlap(shifted_sum, before, after, before, after, c=2)
sol = shifted_sum(df, before, after, c=2)
assert_eq(res, sol)
# Series
res = ddf.b.map_overlap(shifted_sum, before, after, before, after, c=2)
sol = shifted_sum(df.b, before, after, c=2)
assert_eq(res, sol)
def test_map_partitions_names():
npartitions = 3
ddf = dd.from_pandas(df, npartitions)
res = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=2)
res2 = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=2)
assert set(res.dask) == set(res2.dask)
res3 = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=3)
assert res3._name != res._name
# Difference is just the final map
diff = set(res3.dask).difference(res.dask)
assert len(diff) == npartitions
res4 = ddf.map_overlap(shifted_sum, 3, 0, 0, 3, c=2)
assert res4._name != res._name
def test_map_partitions_errors():
# Non-integer
with pytest.raises(ValueError):
ddf.map_overlap(shifted_sum, 0.5, 3, 0, 2, c=2)
# Negative
with pytest.raises(ValueError):
ddf.map_overlap(shifted_sum, 0, -5, 0, 2, c=2)
# Partition size < window size
with pytest.raises(NotImplementedError):
ddf.map_overlap(shifted_sum, 0, 100, 0, 100, c=2).compute()
# Offset with non-datetime
with pytest.raises(TypeError):
ddf.map_overlap(shifted_sum, pd.Timedelta('1s'), pd.Timedelta('1s'),
0, 2, c=2)
def mad(x):
return np.fabs(x - x.mean()).mean()
rolling_method_args_check_less_precise = [
('count', (), False),
('sum', (), False),
('mean', (), False),
('median', (), False),
('min', (), False),
('max', (), False),
('std', (), False),
('var', (), False),
('skew', (), True), # here and elsewhere, results for kurt and skew are
('kurt', (), True), # checked with check_less_precise=True so that we are
# only looking at 3ish decimal places for the equality check
# rather than 5ish. I have encountered a case where a test
# seems to have failed due to numerical problems with kurt.
# So far, I am only weakening the check for kurt and skew,
# as they involve third degree powers and higher
('quantile', (.38,), False),
('apply', (mad,), False),
]
@pytest.mark.parametrize('method,args,check_less_precise',
rolling_method_args_check_less_precise)
@pytest.mark.parametrize('window', [1, 2, 4, 5])
@pytest.mark.parametrize('center', [True, False])
def test_rolling_methods(method, args, window, center, check_less_precise):
# DataFrame
prolling = df.rolling(window, center=center)
drolling = ddf.rolling(window, center=center)
if method == 'apply' and PANDAS_VERSION >= '0.23.0':
kwargs = {'raw': False}
else:
kwargs = {}
assert_eq(getattr(prolling, method)(*args, **kwargs),
getattr(drolling, method)(*args, **kwargs),
check_less_precise=check_less_precise)
# Series
prolling = df.a.rolling(window, center=center)
drolling = ddf.a.rolling(window, center=center)
assert_eq(getattr(prolling, method)(*args, **kwargs),
getattr(drolling, method)(*args, **kwargs),
check_less_precise=check_less_precise)
@pytest.mark.skipif(PANDAS_VERSION >= '0.23.0', reason="Raw is allowed.")
def test_rolling_raw_pandas_lt_0230_raises():
with pytest.raises(TypeError):
df.rolling(2).apply(mad, raw=True)
def test_rolling_raises():
df = pd.DataFrame({'a': np.random.randn(25).cumsum(),
'b': np.random.randint(100, size=(25,))})
ddf = dd.from_pandas(df, 3)
pytest.raises(ValueError, lambda: ddf.rolling(1.5))
pytest.raises(ValueError, lambda: ddf.rolling(-1))
pytest.raises(ValueError, lambda: ddf.rolling(3, min_periods=1.2))
pytest.raises(ValueError, lambda: ddf.rolling(3, min_periods=-2))
pytest.raises(ValueError, lambda: ddf.rolling(3, axis=10))
pytest.raises(ValueError, lambda: ddf.rolling(3, axis='coulombs'))
pytest.raises(NotImplementedError, lambda: ddf.rolling(100).mean().compute())
def test_rolling_names():
df = pd.DataFrame({'a': [1, 2, 3],
'b': [4, 5, 6]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.rolling(2).sum().dask) == sorted(a.rolling(2).sum().dask)
def test_rolling_axis():
df = pd.DataFrame(np.random.randn(20, 16))
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(df.rolling(3, axis=0).mean(), ddf.rolling(3, axis=0).mean())
assert_eq(df.rolling(3, axis=1).mean(), ddf.rolling(3, axis=1).mean())
assert_eq(df.rolling(3, min_periods=1, axis=1).mean(),
ddf.rolling(3, min_periods=1, axis=1).mean())
assert_eq(df.rolling(3, axis='columns').mean(),
ddf.rolling(3, axis='columns').mean())
assert_eq(df.rolling(3, axis='rows').mean(),
ddf.rolling(3, axis='rows').mean())
s = df[3]
ds = ddf[3]
assert_eq(s.rolling(5, axis=0).std(), ds.rolling(5, axis=0).std())
def test_rolling_partition_size():
df = pd.DataFrame(np.random.randn(50, 2))
ddf = dd.from_pandas(df, npartitions=5)
for obj, dobj in [(df, ddf), (df[0], ddf[0])]:
assert_eq(obj.rolling(10).mean(), dobj.rolling(10).mean())
assert_eq(obj.rolling(11).mean(), dobj.rolling(11).mean())
with pytest.raises(NotImplementedError):
dobj.rolling(12).mean().compute()
def test_rolling_repr():
ddf = dd.from_pandas(pd.DataFrame([10] * 30), npartitions=3)
assert repr(ddf.rolling(4)) == 'Rolling [window=4,center=False,axis=0]'
def test_time_rolling_repr():
assert repr(dts.rolling('4s')) == (
'Rolling [window=4000000000,center=False,win_type=freq,axis=0]')
def test_time_rolling_constructor():
result = dts.rolling('4s')
assert result.window == '4s'
assert result.min_periods is None
assert result.win_type is None
assert result._win_type == 'freq'
assert result._window == 4000000000 # ns
assert result._min_periods == 1
@pytest.mark.parametrize('method,args,check_less_precise',
rolling_method_args_check_less_precise)
@pytest.mark.parametrize('window', ['1S', '2S', '3S', pd.offsets.Second(5)])
def test_time_rolling_methods(method, args, window, check_less_precise):
# DataFrame
if method == 'apply' and PANDAS_VERSION >= '0.23.0':
kwargs = {"raw": False}
else:
kwargs = {}
prolling = ts.rolling(window)
drolling = dts.rolling(window)
assert_eq(getattr(prolling, method)(*args, **kwargs),
getattr(drolling, method)(*args, **kwargs),
check_less_precise=check_less_precise)
# Series
prolling = ts.a.rolling(window)
drolling = dts.a.rolling(window)
assert_eq(getattr(prolling, method)(*args, **kwargs),
getattr(drolling, method)(*args, **kwargs),
check_less_precise=check_less_precise)
@pytest.mark.parametrize('window', [pd.Timedelta('31s'), pd.Timedelta('1M')])
def test_time_rolling_window_too_large(window):
with pytest.raises(ValueError):
dts.map_overlap(ts_shifted_sum, window, window, window, window, c=2)
@pytest.mark.parametrize('before, after', [
('6s', '6s'),
('2s', '2s'),
('6s', '2s'),
])
def test_time_rolling(before, after):
window = before
before = pd.Timedelta(before)
after = pd.Timedelta(after)
result = dts.map_overlap(lambda x: x.rolling(window).count(), before, after)
expected = dts.compute().rolling(window).count()
assert_eq(result, expected)
def test_rolling_agg_aggregate():
df = pd.DataFrame({'A': range(5), 'B': range(0, 10, 2)})
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(df.rolling(window=3).agg([np.mean, np.std]),
ddf.rolling(window=3).agg([np.mean, np.std]))
assert_eq(df.rolling(window=3).agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)}),
ddf.rolling(window=3).agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)}))
assert_eq(df.rolling(window=3).agg([np.sum, np.mean]),
ddf.rolling(window=3).agg([np.sum, np.mean]))
assert_eq(df.rolling(window=3).agg({'A': [np.sum, np.mean]}),
ddf.rolling(window=3).agg({'A': [np.sum, np.mean]}))
assert_eq(df.rolling(window=3).apply(lambda x: np.std(x, ddof=1)),
ddf.rolling(window=3).apply(lambda x: np.std(x, ddof=1)))
|
the-stack_106_14018
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import json
import logging
import settings
from framework import basehandlers
from framework import permissions
from framework import utils
from internals import models
from framework import ramcache
from google.appengine.api import users
class FeaturesJsonHandler(basehandlers.FlaskHandler):
HTTP_CACHE_TYPE = 'private'
JSONIFY = True
def get_template_data(self, version=2):
user = users.get_current_user()
feature_list = models.Feature.get_chronological(
version=version, show_unlisted=permissions.can_view_feature(user, None))
return feature_list
class FeatureListHandler(basehandlers.FlaskHandler):
TEMPLATE_PATH = 'features.html'
def get_template_data(self, feature_id=None):
# Note: feature_id is not used here but JS gets it from the URL.
# This template data is all for filtering. The actual features
# are sent by an XHR request for /features.json.
template_data = {}
template_data['categories'] = [
(v, utils.normalized_name(v)) for k,v in
models.FEATURE_CATEGORIES.iteritems()]
template_data['IMPLEMENTATION_STATUSES'] = json.dumps([
{'key': k, 'val': v} for k,v in
models.IMPLEMENTATION_STATUS.iteritems()])
template_data['VENDOR_VIEWS'] = json.dumps([
{'key': k, 'val': v} for k,v in
models.VENDOR_VIEWS.iteritems()])
template_data['WEB_DEV_VIEWS'] = json.dumps([
{'key': k, 'val': v} for k,v in
models.WEB_DEV_VIEWS.iteritems()])
template_data['STANDARDS_VALS'] = json.dumps([
{'key': k, 'val': v} for k,v in
models.STANDARDIZATION.iteritems()])
return template_data
class FeatureListXMLHandler(basehandlers.FlaskHandler):
def get_template_data(self):
status = self.request.args.get('status', None)
if status:
feature_list = models.Feature.get_all_with_statuses(status.split(','))
else:
filterby = None
category = self.request.args.get('category', None)
# Support setting larger-than-default Atom feed sizes so that web
# crawlers can use this as a full site feed.
try:
max_items = int(self.request.args.get(
'max-items', settings.RSS_FEED_LIMIT))
except TypeError:
max_items = settings.RSS_FEED_LIMIT
if category is not None:
for k,v in models.FEATURE_CATEGORIES.iteritems():
normalized = utils.normalized_name(v)
if category == normalized:
filterby = ('category =', k)
break
feature_list = models.Feature.get_all( # cached
limit=max_items,
filterby=filterby,
order='-updated')
return utils.render_atom_feed(self.request, 'Features', feature_list)
routes = [
# Note: The only requests being made now hit /features.json and
# /features_v2.json, but both of those cause version == 2.
# There was logic to accept another version value, but it it was not used.
(r'/features.json', FeaturesJsonHandler),
(r'/features_v2.json', FeaturesJsonHandler),
('/', basehandlers.Redirector,
{'location': '/features'}),
('/features', FeatureListHandler),
('/features/<int:feature_id>', FeatureListHandler),
('/features.xml', FeatureListXMLHandler),
]
app = basehandlers.FlaskApplication(routes, debug=settings.DEBUG)
|
the-stack_106_14022
|
'''
This file contains the actual surrogate model function. In other words:
the class below defines the basis functions used for model construction,
as well as helper methods for evaluating the surrogate model afterwards.
TODO:
* If anyone has ideas for how to send a slice x[:,i] from
Pyomo instead of explicitly operating on 2D arrays below,
feel free to implement it. (I couldn't get that to work.)
'''
import numpy as np
import scipy.special
import itertools
import functools
import operator
class Surrogate:
'''
This class is a general representation of a surrogate model.
When you instantiate the object, you have to provide a dict
filled with configuration options (usually read from file).
The constructor than figures out the dimension of the input
space and what basis functions to use for model construction.
The result is a callable object, which evaluates a surrogate
model of the given class when provided with values for the
relevant regression parameters (p) and variable values (x).
The surrogate model is constructed based on 1-dimensional
basis functions bₙ(u). The simplest example is the monomial
basis bₙ(u) = uⁿ. When modeling e.g. a 3-dimensional input
space, and using a basis of order 2, we could then use the
following products as our 3-dimensional basis functions:
1, x, y, z, x², y², z², xy, xz, yz.
In the more general notation, these can be written:
b₀(x) b₀(y) b₀(z),
b₁(x) b₀(y) b₀(z), b₀(x) b₁(y) b₀(z), b₀(x) b₀(y) b₁(z),
b₂(x) b₀(y) b₀(z), b₀(x) b₂(y) b₀(z), b₀(x) b₀(y) b₂(z),
b₁(x) b₁(y) b₀(z), b₁(x) b₀(y) b₁(z), b₀(x) b₁(y) b₁(z).
In other words, we construct products bₙ(x) bₘ(y) bₖ(z) such
that the subscripts n+m+k ≤ t, where t=2 was the total order
used above. This strategy is easily generalized to any dimension,
using any set of basis functions {bₙ}, and any model order t.
Note that the 3-dimensional basis functions bₙ(x) bₘ(y) bₖ(z)
above can be classified by their indices (n,m,k). All allowed
such index tuples are stored in the member variable self.index.
Note also that the basis functions bₙ(u) available are defined
near the end of this class, and new ones can easily be added.
'''
def __init__(self, conf):
'''
Construct a surrogate model. All parameters necessary for
model creation should be provided via the dictionary `conf`.
'''
# Save all config options
self.conf = conf
# Dimension of the input variable
self.dim = conf['input_dim']
# How many basis functions to use
self.order = conf['model_order']
# Used to save regression parameters
self.p = None
# Used for constraints data file
self.data = None
# Select what class of basis functions to use
key = 'model_class'
val = conf[key]
try:
self.basis = getattr(self, 'basis_' + val)
except:
raise ValueError('"{}" cannot be set to "{}".'.format(key, val))
# Calculate all acceptable basis index tuples. This is done by:
# * Generating a list of acceptable indices in one dimension,
# which can be directly found from the variable self.order;
# * Taking the Cartesian product of one such list per dimension;
# * Constraining the sum of indices in order to get the acceptable
# subset of basis function indices in higher dimensions.
possible = itertools.product(range(self.order + 1), repeat=self.dim)
# All combinations of dim-tuples with one basis index from each range
self.index = [k for k in possible if sum(k) <= self.order]
# Number of regression coefficients
self.terms = len(self.index)
# Save the variable bounds (used for standardizing variables)
self.lower_bound = conf['input_lb']
self.upper_bound = conf['input_ub']
self.xs = [np.max([np.abs(b[0]), np.abs(b[1])]) for b
in zip(self.lower_bound, self.upper_bound)]
def __call__(self, x, p=None, pos=None):
'''
Evaluate the surrogate model using regression parameters
given by `p` and input variables given by `x`. If the
parameters p are not specified, the model will attempt
to look for regression parameters saved in the object.
Arguments:
x:
Variables used in the model (x).
p:
Parameters used in the model (θ).
pos:
If this argument is set, the function is called
from Pyomo, and this is an additonal array index.
'''
# Check whether regression parameters have been supplied
if p is None: p = self.p
# For technical reasons, the code below requires 2D arrays to work
# with the Pyomo model. This converts a 1D array to a 2D array if
# such an array is provided from NumPy/NOMAD instead of Pyomo.
if pos is None:
pos = 0
x = np.asmatrix(x).transpose()
# Actual model defintion
return sum(p[j] * self.product(x, j, pos) for j in range(len(p)))
def standard(self, x):
'''
Standardize variables x based on their known bounds.
'''
return [(x[i] - self.lower_bound[i]) / (self.upper_bound[i] - self.lower_bound[i]) for i in range(self.dim)]
def restore(self, x):
'''
Restore the true values of standardized variables x.
'''
return [self.lower_bound[i] + x[i] * (self.upper_bound[i] - self.lower_bound[i]) for i in range(self.dim)]
def product(self, x, n, m):
'''
This function constructs the n'th basis function in any dimension from
the known basis functions in one dimension. The result is evaluated at x.
'''
# Evaluate basis function number self.index[n][k] at point x[k,m]
factors = (self.basis(x[k, m], self.index[n][k]) for k in range(self.dim))
# Multiply all the one-dimensional results to get the net result
return functools.reduce(operator.mul, factors, 1)
#################################################################
# BASIS FUNCTIONS
#################################################################
# Define the basis functions available for model construction.
# All basis functions should be static methods, and their names
# should start with `basis_`. They will then be made available
# automatically: if we e.g. set the option `model_class`
# to `taylor` in the config file, the program automatically
# searches for a function named `basis_taylor` below.
#
# The basis function itself should take in a variable x∊[0,1] and
# integer n≥0, and then return the value of the n'th basis function
# evaluated at x. Note that you only need to define one-dimensional
# basis functions, since the higher-dimensional basis functions are
# automatically constructed from the products of these functions.
#################################################################
@staticmethod
def basis_taylor(x, n):
'''
Monomial basis x**n. Using this as a basis yields a Taylor expansion
around the lower-bound corner of the data set (i.e. the point x=0).
'''
return x**n
@staticmethod
def basis_legendre(x, n):
'''
Legendre polynomial P_n(x). These are rescaled from having a domain
of [0,1] to [-1,1], since that's where they form an orthogonal basis.
'''
return scipy.special.eval_legendre(n, 2*x - 1)
@staticmethod
def basis_chebyshev(x, n):
'''
Chebyshev polynomial T_n(x). These are rescaled from having a domain
of [0,1] to [-1,1], since that's where they form an orthogonal basis.
'''
return scipy.special.eval_chebyt(n, 2*x - 1)
@staticmethod
def basis_fourier(x, n):
'''
Fourier sine series. If n=0, this function simply returns 1, corresponding
to a constant term. If n>1, it returns sin(πnx), which alternate between
being even and odd functions with respect to the centerpoint of [0,1].
'''
return np.sin(np.pi*n*x) if n > 0 else 1
|
the-stack_106_14025
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import ast
import logging
import math
import os
import sys
from argparse import Namespace
from itertools import chain
import numpy as np
import torch
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from omegaconf import DictConfig
def main(cfg: DictConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
assert cfg.common_eval.path is not None, "--path required for generation!"
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw"
), "--replace-unk requires a raw text dataset (--dataset-impl=raw)"
if cfg.common_eval.results_path is not None:
os.makedirs(cfg.common_eval.results_path, exist_ok=True)
output_path = os.path.join(
cfg.common_eval.results_path,
"generate-{}.txt".format(cfg.dataset.gen_subset),
)
with open(output_path, "w", buffering=1, encoding="utf-8") as h:
return _main(cfg, h)
else:
return _main(cfg, sys.stdout)
def get_symbols_to_strip_from_output(generator):
if hasattr(generator, "symbols_to_strip_from_output"):
return generator.symbols_to_strip_from_output
else:
return {generator.eos}
def _main(cfg: DictConfig, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("fairseq_cli.generate")
utils.import_user_module(cfg.common)
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 12000
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Load dataset splits
task = tasks.setup_task(cfg.task)
# Set dictionaries
try:
src_dict = getattr(task, "source_dictionary", None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
if cfg.generation.lm_path is not None:
overrides["data"] = cfg.task.data
try:
lms, _ = checkpoint_utils.load_model_ensemble(
[cfg.generation.lm_path], arg_overrides=overrides, task=None
)
except:
logger.warning(
f"Failed to load language model! Please make sure that the language model dict is the same "
f"as target dict and is located in the data dir ({cfg.task.data})"
)
raise
assert len(lms) == 1
else:
lms = [None]
# Optimize ensemble for generation
for model in chain(models, lms):
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
gen_timer = StopwatchMeter()
extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight}
generator = task.build_generator(
models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
scorer = scoring.build_scorer(cfg.scoring, tgt_dict)
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if cfg.generation.prefix_size > 0:
prefix_tokens = sample["target"][:, : cfg.generation.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
gen_timer.start()
hypos = task.inference_step(
generator,
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
has_target = sample["target"] is not None
# Remove padding
if "src_tokens" in sample["net_input"]:
src_tokens = utils.strip_pad(
sample["net_input"]["src_tokens"][i, :], tgt_dict.pad()
)
else:
src_tokens = None
target_tokens = None
if has_target:
target_tokens = (
utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu()
)
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text(
sample_id
)
target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text(
sample_id
)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(
target_tokens,
cfg.common_eval.post_process,
escape_unk=True,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(
generator
),
)
src_str = decode_fn(src_str)
if has_target:
target_str = decode_fn(target_str)
if not cfg.common_eval.quiet:
if src_dict is not None:
print("S-{}\t{}".format(sample_id, src_str), file=output_file)
if has_target:
print("T-{}\t{}".format(sample_id, target_str), file=output_file)
# Process top predictions
for j, hypo in enumerate(hypos[i][: cfg.generation.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo["tokens"].int().cpu(),
src_str=src_str,
alignment=hypo["alignment"],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
if not cfg.common_eval.quiet:
score = hypo["score"] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print(
"H-{}\t{}\t{}".format(sample_id, score, hypo_str),
file=output_file,
)
# detokenized hypothesis
print(
"D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str),
file=output_file,
)
print(
"P-{}\t{}".format(
sample_id,
" ".join(
map(
lambda x: "{:.4f}".format(x),
# convert from base e to base 2
hypo["positional_scores"]
.div_(math.log(2))
.tolist(),
)
),
),
file=output_file,
)
if cfg.generation.print_alignment == "hard":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in alignment
]
),
),
file=output_file,
)
if cfg.generation.print_alignment == "soft":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[
",".join(src_probs)
for src_probs in alignment
]
),
),
file=output_file,
)
if cfg.generation.print_step:
print(
"I-{}\t{}".format(sample_id, hypo["steps"]),
file=output_file,
)
if cfg.generation.retain_iter_history:
for step, h in enumerate(hypo["history"]):
_, h_str, _ = utils.post_process_prediction(
hypo_tokens=h["tokens"].int().cpu(),
src_str=src_str,
alignment=None,
align_dict=None,
tgt_dict=tgt_dict,
remove_bpe=None,
)
print(
"E-{}_{}\t{}".format(sample_id, step, h_str),
file=output_file,
)
# Score only the top hypothesis
if has_target and j == 0:
if align_dict is not None or cfg.common_eval.post_process is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(
target_str, add_if_not_exist=True
)
hypo_tokens = tgt_dict.encode_line(
detok_hypo_str, add_if_not_exist=True
)
if hasattr(scorer, "add_string"):
scorer.add_string(target_str, detok_hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
progress.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info(
"Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
if has_target:
if cfg.bpe and not cfg.generation.sacrebleu:
if cfg.common_eval.post_process:
logger.warning(
"BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization"
)
else:
logger.warning(
"If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization"
)
# use print to be consistent with other main outputs: S-, H-, T-, D- and so on
print(
"Generate {} with beam={}: {}".format(
cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string()
),
file=output_file,
)
return scorer
def cli_main():
parser = options.get_generation_parser()
# TODO: replace this workaround with refactoring of `AudioPretraining`
parser.add_argument(
'--arch', '-a', metavar='ARCH', default="wav2vec2",
help='Model architecture. For constructing tasks that rely on '
'model args (e.g. `AudioPretraining`)'
)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
|
the-stack_106_14026
|
# coding:utf-8
def mergeSort(nums):
"""归并排序"""
if len(nums) <= 1:
return nums
mid = len(nums)//2
#left
left_nums = mergeSort(nums[:mid])
#right
right_nums = mergeSort(nums[mid:])
print(left_nums)
print(right_nums)
left_pointer,right_pointer = 0,0
result = []
while left_pointer < len(left_nums) and right_pointer < len(right_nums):
if left_nums[left_pointer] <= right_nums[right_pointer]:
result.append(left_nums[left_pointer])
left_pointer += 1
else:
result.append(right_nums[right_pointer])
right_pointer += 1
result += left_nums[left_pointer:]
result += right_nums[right_pointer:]
return result
if __name__ == "__main__":
li = [45,12,89,33,39,84,26,78,43]
print(li)
sorted_nums = mergeSort(li)
print(sorted_nums)
|
the-stack_106_14028
|
#!/usr/bin/env python
"""
Reads a list of intervals and a set of indexed mafs. For each interval print
the amount covered by each species other than the reference.
usage: %prog maf_files [options] < interval_file
-s, --src=s: Use this src for all intervals
-p, --prefix=p: Prepend this to each src before lookup
"""
from __future__ import division, print_function
import sys
import bx.align.maf
import psyco_full
from bx import intervals, misc
from bx.cookbook import doc_optparse
def __main__():
# Parse Command Line
options, args = doc_optparse.parse( __doc__ )
try:
maf_files = args
if options.prefix: prefix = options.prefix
else: prefix = None
except:
doc_optparse.exit()
# Open indexed access to mafs
indexes = [ bx.align.maf.Indexed( maf_file, maf_file + ".index" ) for maf_file in maf_files ]
# Iterate over input ranges
for line in sys.stdin:
fields = line.split()
src, start, end = fields[0], int( fields[1] ), int( fields[2] )
if prefix: src = prefix + src
total_length = end - start
# Find overlap with reference component
blocks = []
for index in indexes: blocks += index.get( src, start, end )
coverage = dict()
for block in blocks:
overlap_start = max( start, block.components[0].start )
overlap_end = min( end, block.components[0].end )
length = overlap_end - overlap_start
assert length > 0
for c in block.components[1:]:
species = c.src.split( '.' )[0]
try: coverage[ species ] += length
except: coverage[ species ] = length
print(line, end=' ')
for key, value in coverage.items():
print(" ", key.ljust(10), "%0.2f" % ( value / total_length ))
if __name__ == "__main__":
__main__()
|
the-stack_106_14029
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import jinja2
import json
import os
import yaml
from io import StringIO
from dateutil import parser
from dateutil.tz import gettz
def get_jinja_env():
env = jinja2.Environment(trim_blocks=True, autoescape=False)
env.filters['yaml_safe'] = yaml.safe_dump
env.filters['date_time_format'] = date_time_format
env.filters['get_date_time_delta'] = get_date_time_delta
env.globals['format_resource'] = resource_format
env.globals['format_struct'] = format_struct
env.loader = jinja2.FileSystemLoader(
[
os.path.abspath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..',
'msg-templates')), os.path.abspath('/')
]
)
return env
def get_rendered_jinja(target, sqs_message, resources, logger):
env = get_jinja_env()
mail_template = sqs_message['action'].get('template')
if not os.path.isabs(mail_template):
mail_template = '%s.j2' % mail_template
try:
template = env.get_template(mail_template)
except Exception as error_msg:
logger.error("Invalid template reference %s\n%s" % (mail_template, error_msg))
return
rendered_jinja = template.render(
recipient=target,
resources=resources,
account=sqs_message.get('account', ''),
event=sqs_message.get('event', None),
action=sqs_message['action'],
policy=sqs_message['policy'],
region=sqs_message.get('region', ''))
return rendered_jinja
# eg, target_tag_keys could be resource-owners ['Owners', 'SupportTeam']
# and this function would go through the resource and look for any tag keys
# that match Owners or SupportTeam, and return those values as targets
def get_resource_tag_targets(resource, target_tag_keys):
if 'Tags' not in resource:
return []
tags = {tag['Key']: tag['Value'] for tag in resource['Tags']}
targets = []
for target_tag_key in target_tag_keys:
if target_tag_key in tags:
targets.append(tags[target_tag_key])
return targets
def get_message_subject(sqs_message):
default_subject = 'Custodian notification - %s' % (sqs_message['policy']['name'])
subject = sqs_message['action'].get('subject', default_subject)
jinja_template = jinja2.Template(subject)
subject = jinja_template.render(
account=sqs_message.get('account', ''),
region=sqs_message.get('region', '')
)
return subject
def setup_defaults(config):
config.setdefault('region', 'us-east-1')
config.setdefault('ses_region', config.get('region'))
config.setdefault('memory', 1024)
config.setdefault('timeout', 300)
config.setdefault('subnets', None)
config.setdefault('security_groups', None)
config.setdefault('contact_tags', [])
config.setdefault('ldap_uri', None)
config.setdefault('ldap_bind_dn', None)
config.setdefault('ldap_bind_user', None)
config.setdefault('ldap_bind_password', None)
def date_time_format(utc_str, tz_str='US/Eastern', format='%Y %b %d %H:%M %Z'):
return parser.parse(utc_str).astimezone(gettz(tz_str)).strftime(format)
def get_date_time_delta(delta):
return str(datetime.datetime.now().replace(tzinfo=gettz('UTC')) + datetime.timedelta(delta))
def format_struct(evt):
buf = StringIO()
json.dump(evt, buf, indent=2)
return buf.getvalue()
def resource_tag(resource, k):
for t in resource.get('Tags', []):
if t['Key'] == k:
return t['Value']
return ''
def resource_format(resource, resource_type):
if resource_type == 'ec2':
tag_map = {t['Key']: t['Value'] for t in resource.get('Tags', ())}
return "%s %s %s %s %s %s" % (
resource['InstanceId'],
resource.get('VpcId', 'NO VPC!'),
resource['InstanceType'],
resource.get('LaunchTime'),
tag_map.get('Name', ''),
resource.get('PrivateIpAddress'))
elif resource_type == 'ami':
return "%s %s %s" % (
resource['Name'], resource['ImageId'], resource['CreationDate'])
elif resource_type == 's3':
return "%s" % (resource['Name'])
elif resource_type == 'ebs':
return "%s %s %s %s" % (
resource['VolumeId'],
resource['Size'],
resource['State'],
resource['CreateTime'])
elif resource_type == 'rds':
return "%s %s %s %s" % (
resource['DBInstanceIdentifier'],
"%s-%s" % (
resource['Engine'], resource['EngineVersion']),
resource['DBInstanceClass'],
resource['AllocatedStorage'])
elif resource_type == 'asg':
tag_map = {t['Key']: t['Value'] for t in resource.get('Tags', ())}
return "%s %s %s" % (
resource['AutoScalingGroupName'],
tag_map.get('Name', ''),
"instances: %d" % (len(resource.get('Instances', []))))
elif resource_type == 'elb':
tag_map = {t['Key']: t['Value'] for t in resource.get('Tags', ())}
if 'ProhibitedPolicies' in resource:
return "%s %s %s %s" % (
resource['LoadBalancerName'],
"instances: %d" % len(resource['Instances']),
"zones: %d" % len(resource['AvailabilityZones']),
"prohibited_policies: %s" % ','.join(
resource['ProhibitedPolicies']))
return "%s %s %s" % (
resource['LoadBalancerName'],
"instances: %d" % len(resource['Instances']),
"zones: %d" % len(resource['AvailabilityZones']))
elif resource_type == 'redshift':
return "%s %s %s" % (
resource['ClusterIdentifier'],
'nodes:%d' % len(resource['ClusterNodes']),
'encrypted:%s' % resource['Encrypted'])
elif resource_type == 'emr':
return "%s status:%s" % (
resource['Id'],
resource['Status']['State'])
elif resource_type == 'cfn':
return "%s" % (
resource['StackName'])
elif resource_type == 'launch-config':
return "%s" % (
resource['LaunchConfigurationName'])
elif resource_type == 'security-group':
name = resource.get('GroupName', '')
for t in resource.get('Tags', ()):
if t['Key'] == 'Name':
name = t['Value']
return "%s %s %s inrules: %d outrules: %d" % (
name,
resource['GroupId'],
resource.get('VpcId', 'na'),
len(resource.get('IpPermissions', ())),
len(resource.get('IpPermissionsEgress', ())))
elif resource_type == 'log-group':
if 'lastWrite' in resource:
return "name: %s last_write: %s" % (
resource['logGroupName'],
resource['lastWrite'])
return "name: %s" % (resource['logGroupName'])
elif resource_type == 'cache-cluster':
return "name: %s created: %s status: %s" % (
resource['CacheClusterId'],
resource['CacheClusterCreateTime'],
resource['CacheClusterStatus'])
elif resource_type == 'cache-snapshot':
return "name: %s cluster: %s source: %s" % (
resource['SnapshotName'],
resource['CacheClusterId'],
resource['SnapshotSource'])
elif resource_type == 'redshift-snapshot':
return "name: %s db: %s" % (
resource['SnapshotIdentifier'],
resource['DBName'])
elif resource_type == 'ebs-snapshot':
return "name: %s date: %s" % (
resource['SnapshotId'],
resource['StartTime'])
elif resource_type == 'subnet':
return "%s %s %s %s %s %s" % (
resource['SubnetId'],
resource['VpcId'],
resource['AvailabilityZone'],
resource['State'],
resource['CidrBlock'],
resource['AvailableIpAddressCount'])
elif resource_type == 'account':
return " %s %s" % (
resource['account_id'],
resource['account_name'])
elif resource_type == 'cloudtrail':
return " %s %s" % (
resource['account_id'],
resource['account_name'])
elif resource_type == 'vpc':
return "%s " % (
resource['VpcId'])
elif resource_type == 'iam-group':
return " %s %s %s" % (
resource['GroupName'],
resource['Arn'],
resource['CreateDate'])
elif resource_type == 'rds-snapshot':
return " %s %s %s" % (
resource['DBSnapshotIdentifier'],
resource['DBInstanceIdentifier'],
resource['SnapshotCreateTime'])
elif resource_type == 'iam-user':
return " %s " % (
resource['UserName'])
elif resource_type == 'iam-role':
return " %s %s " % (
resource['RoleName'],
resource['CreateDate'])
elif resource_type == 'iam-policy':
return " %s " % (
resource['PolicyName'])
elif resource_type == 'iam-profile':
return " %s " % (
resource['InstanceProfileId'])
elif resource_type == 'dynamodb-table':
return "name: %s created: %s status: %s" % (
resource['TableName'],
resource['CreationDateTime'],
resource['TableStatus'])
else:
print("Unknown resource type", resource_type)
return "%s" % format_struct(resource)
|
the-stack_106_14031
|
import pickle, os, re, json
from datetime import datetime
from .settings_handler import global_settings # the instance not the class.
import gzip, requests
from michelanglo_transpiler import PyMolTranspiler # called by get_offset_coordinates
from collections import defaultdict
import pymol2
from collections import Counter
from warnings import warn
from .metadata_from_PDBe import PDBMeta
from typing import *
import logging
log = logging.getLogger()
class Structure:
# Structure as in "protein structure" not C++ structure
"""
No longer a namedtuple.
"""
settings = global_settings
important_attributes = ['x', 'y', 'id', 'description', 'resolution', 'extra', 'alignment']
temporary_folder = 'temp'
# __slots__ = ['id', 'description', 'x', 'y', 'url','type','chain','offset', 'coordinates', 'extra', 'offset_corrected']
def __init__(self, id, description, x: int, y: int, code, type='rcsb', chain='*', offset: int = 0, coordinates=None,
extra=None, url=''):
"""
Stores the structural data for easy use by FeatureViewer and co. Can be converted to StructureAnalyser
type = rcsb | swissmodel | homologue | www | local | custom
The type ``rcsb`` isnt called pdb as that would be ambiguous w/ the format
"""
self.id = id #: RCSB code
self.description = description #: description
self.x = int(x) #: resi in the whole uniprot protein
self.y = int(y) #: end resi in the whole uniprot protein
self.offset_corrected = False # prevents a double trip
# TODO these do not seem to used as are overwritten by chain definitions.
if offset is None:
self.offset = None
else:
self.offset = int(offset) #: offset is the number *subtracted* from the PDB index
# to make it match the position in Uniprot.
self.offsets = {} if chain == '*' else {chain: self.offset} ## this is going to be the only one.
self.pdb_start = None # no longer used. TO be deleted.
self.pdb_end = None # ditto.
self.resolution = 0 #: crystal resolution. 0 or lower will trigger special cases
self.code = code
self.chain_definitions = [] # filled by SIFT. This is a list with a Dict per chain.
self.type = type.lower() #: str: rcsb | swissmodel | homologue | www | local | custom | alphafold2
self.chain = chain #: type str: chain letter or * (all)
self.alignment = {}
if extra is None:
self.extra = {}
else:
self.extra = extra
self.coordinates = coordinates #: PDBblock
self.url = url # for type = www or local or swissmodel
# https://files.rcsb.org/download/{self.code}.pdb does not work (often) while the url is something odd.
def is_satisfactory(self, resi: int):
with pymol2.PyMOL() as pymol:
pymol.cmd.read_pdbstr(self.coordinates, 'given_protein')
residex = defaultdict(list)
note = 'custom protein'
for atom in pymol.cmd.get_model('name CA').atom:
residex[atom.chain].append(atom.resi)
if len(residex) == 1 and 'A' not in residex:
note += ' - chain moved to A'
pymol.cmd.alter('given_protein', 'chain="A"')
pymol.cmd.sort()
move = list(residex.values())[0]
residex = {'A': move}
self.coordinates = pymol.cmd.get_pdbstr()
if not self.chain_definitions:
self.chain_definitions = [{'chain': chain,
'uniprot': "XXX",
'x': min(residex[chain]),
'y': max(residex[chain]),
'offset': 0,
'range': f'0-9999',
'name': self.code,
'description': note} for chain in residex]
assert pymol.cmd.select('given_protein'), 'Given protein had no valid data to load'
assert pymol.cmd.select('chain A'), 'Given protein has no chain A'
assert pymol.cmd.select(f'chain A and resi {resi}'), f'Given protein has no residue {resi} in chain A'
for name in ('N', 'CA', 'C'):
assert pymol.cmd.select(f'chain A and resi {resi} and name {name}'), \
f'Given protein has no {name} atom in residue {resi} in chain A'
def to_dict(self, full=False) -> Dict:
if full:
extras = {key: getattr(self, key) for key in self.important_attributes if hasattr(self, key)}
else:
extras = {}
return {'x': self.x, 'y': self.y, 'id': self.id,
'type': self.type, 'description': self.description, **extras}
def __str__(self):
return str(self.to_dict())
def get_coordinates(self) -> str:
"""
Gets the coordinates (PDB block) based on ``self.url`` and ``self.type``
:return: coordinates
:rtype: str
"""
# custom/w coordinates
if self.coordinates:
return self.coordinates
elif self.type == 'custom': # provided.
# self.coordinates was empty
raise ValueError('No coordinates provided for custom retrieval')
# url is filepath
elif self.url and self.type == 'local':
self.coordinates = open(self.url).read()
return self.coordinates
elif self.type == 'local':
# self.url was empty
raise ValueError('No filepath provided for local retrieval')
# url present
elif self.url: # regardless of type/
r = requests.get(self.url, allow_redirects=True)
elif self.type in ('www', 'alphafold2'):
assert self.url, 'No URL provided for www retrieval'
r = requests.get(self.url)
# other
elif self.type == 'rcsb':
r = requests.get(f'https://files.rcsb.org/download/{self.code}.pdb')
elif self.type == 'swissmodel':
assert self.url, 'No URL provided for SWISSMODEL retrieval'
r = requests.get(self.url, allow_redirects=True)
else:
raise ValueError(f'Model type {self.type} for {self.id} could not be recognised.')
# --- read reply
if r.status_code == 200:
self.coordinates = r.text
else:
raise ConnectionError(f'Model {self.code} ({self.url}) failed.')
return self.coordinates
def get_offset_coordinates(self, sequence: Optional[str] = None):
"""
Gets the coordinates and offsets them.
:return:
"""
# if self.offset_corrected:
# return self.coordinates
log.debug(self.chain_definitions)
if not self.chain_definitions:
self.lookup_sifts()
self.coordinates = PyMolTranspiler().renumber(self.get_coordinates(),
self.chain_definitions,
sequence=sequence,
make_A=self.chain,
remove_solvent=True).raw_pdb
self.fix_renumbered_annotation()
return self.coordinates
def fix_renumbered_annotation(self):
# this should have logging.
if self.chain != 'A':
## fix this horror.
log.warning('Chain A is not the target chain in the definitions!')
for i, c in enumerate(self.chain_definitions):
if self.chain_definitions[i]['chain'] == 'A':
self.chain_definitions[i]['chain'] = 'XXX'
break
for i, c in enumerate(self.chain_definitions):
if self.chain_definitions[i]['chain'] == self.chain:
self.chain_definitions[i]['chain'] = 'A'
break
for i, c in enumerate(self.chain_definitions):
if self.chain_definitions[i]['chain'] == 'XXX':
self.chain_definitions[i]['chain'] = self.chain
break
# just in case there is a double trip! (Non server usage)
self.chain = 'A'
self.offset = 0
self.offset_corrected = True
for i, c in enumerate(self.chain_definitions):
if c['chain'] == 'A':
self.chain_definitions[i]['offset'] = 0
def includes(self, position, offset=0):
"""
Generally there should not be an offset as x and y are from Uniprot data so they are already fixed!
:param position:
:param offset:
:return:
"""
if self.x + offset > position:
return False
elif self.y + offset < position:
return False
else:
return True
def lookup_sifts(self):
"""
SIFTS data. for PDBe query see elsewhere.
There are four start/stop pairs that need to be compared to get a good idea of a protein.
For a lengthy discussion see https://blog.matteoferla.com/2019/09/pdb-numbering-rollercoaster.html
Also for a good list of corner case models see https://proteopedia.org/wiki/index.php/Unusual_sequence_numbering
:return: self
"""
if self.type != 'rcsb':
return self # it is probably clean.
log.debug(f'Looking up sifts if this is empty: {self.chain_definitions}')
if not self.chain_definitions:
details = self._get_sifts()
offset = 0
for detail in details:
# clean rows
for k in ('PDB_BEG', 'PDB_END', 'RES_END', 'RES_BEG', 'SP_BEG', 'SP_END'):
if k == 'None' or k is None:
detail[k] = None
elif isinstance(detail[k], int):
pass # this means so test is being done.
else:
r = re.search('(-?\d+)', detail[k]) # str().isdigit() does not like negatives.
if r is None:
detail[k] = None
else:
detail[k] = int(r.group(1)) # yes. py int is signed
# get offset
if detail['PDB_BEG'] is not None: # nice.
offset = detail['SP_BEG'] - detail['PDB_BEG']
elif detail['PDB_END'] is not None:
offset = detail['SP_BEG'] - (detail['PDB_END'] - (detail['SP_END'] - detail['SP_BEG']))
elif detail['SP_BEG']:
offset = 0
else:
offset = 0
self.chain_definitions = [{'chain': d['CHAIN'],
'uniprot': d['SP_PRIMARY'],
'x': d["SP_BEG"],
'y': d["SP_END"],
'offset': offset,
'range': f'{d["SP_BEG"]}-{d["SP_END"]}',
'name': d['SP_PRIMARY'],
'description': None} for d in details]
try:
if self.chain != '*':
detail = next(filter(lambda x: self.chain == x['chain'], self.chain_definitions))
self.offset = detail['offset']
except StopIteration:
warn(f'{self.code} {self.chain} not in {self.chain_definitions}')
return self
self.offsets = {d['chain']: d['offset'] for d in self.chain_definitions}
return self
def _get_sifts(self, all_chains=True): # formerly called .lookup_pdb_chain_uniprot
details = []
headers = 'PDB CHAIN SP_PRIMARY RES_BEG RES_END PDB_BEG PDB_END SP_BEG SP_END'.split()
with self.settings.open('pdb_chain_uniprot') as fh:
for row in fh:
if self.code.lower() == row[0:4]:
entry = dict(zip(headers, row.split()))
if self.chain == entry['CHAIN'] or all_chains:
details.append(entry)
return details
def get_offset_from_PDB(self, chain_detail: Dict, sequence: str) -> int:
"""
This is used by sandbox. if transition to swissmodel data works this will be removed.
This gets the offset for a chain in the code given a sequence.
It takes 30 ms to run. However, the sequence is problematic.
:param chain: see SIFTs {'PDB': '5l8o', 'CHAIN': 'C', 'SP_PRIMARY': 'P51161', 'RES_BEG': 1, 'RES_END': 128, 'PDB_BEG': None, 'PDB_END': None, 'SP_BEG': 1, 'SP_END': 128}
:type chain: Dict
:param sequence: sequence of unirpot
:type sequence: str
:return: offset
:rtype: int
"""
aa = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL': 'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
begin = int(chain_detail['SP_BEG'])
end = int(chain_detail['SP_BEG'])
assert isinstance(chain_detail, dict), 'Chain detail is a Dict of the specific chain. Not whole protein.'
debugprint = lambda x: None
with pymol2.PyMOL() as pymol:
# Load file
pymol.cmd.set('fetch_path', os.path.join(self.settings.temp_folder, 'PDB'))
pymol.cmd.fetch(self.code)
# Try different windows
for begin_offset in range(0, len(sequence) - begin, 10):
# Try full size window
window = 50
target = sequence[begin - 1 + begin_offset: begin + window + begin_offset]
if len(target) == 0:
# Under what conditions does this happen???
debugprint(
f'sequence is {len(sequence)}, while range is {begin + begin_offset}-{end + begin_offset}')
continue
sele_target = f"chain {chain_detail['CHAIN']} and pepseq {target} and name CA"
# Shrink window to account for failed selection due to weird atoms or short peptides
while pymol.cmd.select(sele_target) == 0:
window -= 10
if window < 10:
debugprint(f'{self.code} ({chain_detail}) does not contain this sequence ({target})')
break # double continue
target = sequence[begin + begin_offset - 1: begin + begin_offset + window]
sele_target = f"chain {chain_detail['CHAIN']} and pepseq {target} and name CA"
# double continue
if window < 10:
continue
# Iterate
atoms = pymol.cmd.get_model(sele_target)
prev = 'X'
prev_i = -999
for atom in atoms.atom:
if int(atom.resi) == prev_i:
continue
if atom.resn in aa:
# Simplest case:
# if aa[atom.resn] == target[0]:
# return chain_detail["SP_BEG"] - atom.resi
# In case there are missing parts.
# for i in range(0, 20):
# if aa[atom.resn] == target[i]:
# return (i + chain_detail["SP_BEG"]) - atom.resi
# In case there are missing parts and repeated residues.
# print(atom.resn, atom.resi, target)
for i in range(1, window): # in case there are missing parts.
if aa[atom.resn] == target[i] and target[i - 1] == prev:
# print(f'YATTA {(i + chain_detail["SP_BEG"]) - int(atom.resi)}')
return (i + int(chain_detail["SP_BEG"])) - int(atom.resi)
prev = aa[atom.resn]
else:
prev = 'X'
prev_i = int(atom.resi)
else: # more than 50 aa without coordinates at the N terminus? Engineered residues.
debugprint(f'{self.code} More than {window} AA without a match!! {target} {prev}')
continue
warn(f'UTTER FAILURE for {self.code}')
return 0
def lookup_resolution(self):
if self.type != 'rcsb':
return self
with self.settings.open('resolution') as fh:
resolution = json.load(fh)
for entry in resolution:
if entry['IDCODE'] == self.code:
if entry['RESOLUTION'].strip():
self.resolution = float(entry['RESOLUTION'])
break
else:
warn(f'No resolution info for {self.code}')
return self
def lookup_ligand(self):
warn('TEMP! Returns the data... not self')
# code not used anywhere.
return PDBMeta(self.code + '_' + self.chain).data
@classmethod
def from_swissmodel_query(cls, structural_data: dict, uniprot: str):
# called by retrieve_structures_from_swissmodel of ProteinCore
# structural_data is an entry in the list data['result']['structures'] from a JSON query to swissmodel
keepers = ['coverage', 'created_date', 'from', 'gmqe', 'identity', 'in_complex_with', 'ligand_chains', 'method',
'oligo-state', 'qmean', 'similarity', 'template', 'to']
# offset when downloaded from Expasy not PDB
if structural_data['provider'] == 'PDB':
offset = structural_data['chains'][0]['segments'][0]['uniprot']['from'] - \
structural_data['chains'][0]['segments'][0]['pdb']['from']
else:
offset = 0
chain_id = 0 # is this ever not the zeroth?
structure = cls(
# these two do ziltch:
id=structural_data['md5'],
description=structural_data['coordinates'].split('/')[-1],
# this is shown by venus
code=structural_data['template'] if structural_data[
'provider'] == 'PDB' else f'based upon {structural_data["template"]}',
# range in uniprot
x=structural_data['from'],
y=structural_data['to'],
# offset not provided by Swissmodel. get_offset_from_PDB is the only option.
offset=offset,
type='swissmodel' if structural_data['provider'] == 'SWISSMODEL' else 'rcsb',
url=structural_data['coordinates'],
# structural_data['template'][-1] works only for swissmodel for chain.
chain=structural_data['chains'][chain_id]['id'],
extra={k: structural_data[k] for k in keepers if k in structural_data}
)
# do not fill the structure.chain_definitions via SIFT
def get_chain_def(info) -> List[dict]:
chain = info['id']
chained = [dict(chain=chain,
x=structure.x,
y=structure.y,
offset=structure.offset,
range=f'{structure.x}-{structure.y}',
uniprot=uniprot)]
if 'in_complex_with' in structural_data:
for partner_chain, partner_def in structural_data['in_complex_with'].items():
chained.append(dict(chain=partner_chain,
offset=0,
description=partner_def[0]['description'],
uniprot=partner_def[0]['uniprot_ac'] if 'uniprot_ac' in partner_def[0] else 'P00404',
))
return chained
structure.chain_definitions = [chained for info in structural_data['chains'] for chained in get_chain_def(info)]
# ---- sequence
def get_sequence(part):
# the gap needs to be for 'uniprot' only!
seq = '-' * (structural_data['chains'][chain_id]['segments'][0]['uniprot']['from'] - 1)
seq += structural_data['chains'][chain_id]['segments'][0][part]['aligned_sequence']
return seq
if structure.type == 'swissmodel':
# herein the template is
structure.alignment = {'template': get_sequence('smtl'), 'uniprot': get_sequence('uniprot')}
elif structure.type == 'rcsb':
structure.alignment = {'template': get_sequence('pdb'), 'uniprot': get_sequence('uniprot')}
return structure
# def Xget_coordinates_w_template_extras(self, sequence: Optional[str] = None, monomer=True):
# """
# Sequence is the Uniprot sequence. For safety/debug
# Biological assembly is a tricky one. Therefore it is often safer to just use a monomer.
# in_struct_asyms and in_chains are different if bio assembly is smaller than async assembly
# """
# assert self.type == 'swissmodel'
# template_code, chain = re.search('(\w{4})\.\w+\.(\w)', self.code).groups()
# pdbblock = self.get_coordinates()
# meta = PDBMeta(template_code, chain)
# other_chains = meta.get_other_chains(chain, first_only=monomer) - set(self.extra['in_complex_with'].keys())
# if 'ligand_chains' in self.extra:
# present_ligands = {e['hetid'] for e in self.extra['ligand_chains'] if 'hetid' in e}
# else:
# present_ligands = set()
# other_ligands = meta.get_interesting_ligand_names() - present_ligands
# if len(other_chains) + len(other_ligands) > 0:
# with pymol2.PyMOL() as pymol:
# pymol.cmd.read_pdbstr(self.coordinates, 'threaded')
# pymol.cmd.fetch(template_code, 'template', file=None)
# pymol.cmd.remove(f'template and chain {chain} and polymer')
# interesting_lig = ' or '.join([f'resi {name3}' for name3 in other_ligands])
# wanted_chains = ' or '.join([f'chain {chain}' for chain in other_chains])
# chain_sele = f'(template and ({wanted_chains}))'
# lig_sele = f'(template and ({interesting_lig}))'
# if wanted_chains and interesting_lig:
# pymol.cmd.create('combo', f'threaded or {chain_sele} or {lig_sele}')
# elif wanted_chains:
# pymol.cmd.create('combo', f'threaded or {chain_sele}')
# elif interesting_lig:
# pymol.cmd.create('combo', f'threaded or {lig_sele}')
# else:
# pymol.cmd.create('combo', 'threaded') # this should be a break statement with logging
# # log.critical('Impossible')
# pymol.cmd.remove('(byres polymer around 5) and not polymer') # remove asymmetric non bio ligands.
# pdbblock = pymol.cmd.get_pdbstr('combo')
# # update chain definitions
# for entity in meta.get_other_proteins(chain):
# for chain in entity['in_chains']:
# if chain not in other_chains:
# continue
# if pymol.cmd.select(f'combo and chain {chain}') == 0:
# continue
# if 'mappings' in entity['source'][0]:
# x = entity['source'][0]['mappings'][0]['start']['residue_number']
# y = entity['source'][0]['mappings'][0]['end']['residue_number']
# else:
# x = 1
# y = 1_000_000
# self.chain_definitions.append(dict(chain=chain,
# x=x, y=y,
# offset=0, # who cares.
# range=f'{x}-{y}',
# uniprot='P00404', # unknown.
# name=entity['molecule_name'][0],
# transplanted=True
# ))
# if monomer:
# break # only first entity['in_chains'] was added
# for entity in meta.get_other_polymers(chain):
# if meta.is_peptide(entity):
# continue
# for chain in entity['in_chains']:
# if pymol.cmd.select(f'combo and chain {chain}') == 0:
# continue
# self.chain_definitions.append(dict(chain=chain,
# x=1, y=entity['length'],
# offset=0, # who cares.
# range=f"1-{entity['length']}",
# uniprot='P00404', # not valid
# name=entity['molecule_name'][0],
# transplanted=True
# ))
# # pro-forma.
# self.fix_renumbered_annotation()
# return self.coordinates
def get_coordinates_w_template_extras(self, sequence: Optional[str] = None):
"""
Sequence is the Uniprot sequence. For safety/debug
Biological assembly is a tricky one. Therefore it is often safer to just use a monomer.
in_struct_asyms and in_chains are different if bio assembly is smaller than async assembly
The Swissmodel template library contains segi and chain renumbered structures
preferably as biounits. This is great except for the fact that the data of the chains is not available.
Hence the hybrid PDBe approach.
This means that for 6pax, whereas the (segi, chains) are
([('B', 'A'), ('C', 'B'), ('A', 'C'), ('B', 'D'), ('C', 'E'), ('A', 'F')]
In the SMTL 6pax.1 it is:
[('-', ''), ('A', ''), ('B', ''), ('C', '')])
"""
assert self.type == 'swissmodel'
template_code, chain = re.search('(\w{4})\.\w+\.(\w)', self.code).groups()
log.debug(f'template_code={template_code} chain={chain} current={self.chain}')
pdbblock = self.get_coordinates()
meta = PDBMeta(template_code, chain)
with pymol2.PyMOL() as pymol:
pymol.cmd.set('fetch_path', self.temporary_folder)
pymol.cmd.read_pdbstr(pdbblock, 'threaded')
try:
pymol.cmd.fetch(template_code, 'template', file=None, type='pdb1')
except Exception as err:
# Nowadays pymol2.pymol.CmdException inherits Exception and not BaseException directly
log.warning(f'Error caused with pdb1. {err.__class__.__name__}')
pymol.cmd.fetch(template_code, 'template', file=None)
log.debug('Merging with template...')
pymol.cmd.remove('solvent')
overlap_iter = pymol.cmd.get_model('template and (threaded around 0.1)').atom
data = Counter([(atom.chain, atom.segi) for atom in overlap_iter])
removed_chains = []
for (chain, segi), count in data.most_common():
if count < 10:
pass # a couple of overlapping atoms are fine.
else:
if chain and segi:
pymol.cmd.remove(f'template and chain {chain} and segi {segi} and polymer')
elif chain:
pymol.cmd.remove(f'template and chain {chain} and polymer')
else:
pymol.cmd.remove(f'template and chain "" and and polymer')
removed_chains.append(chain)
meta.remove_chain(chain)
log.debug(f'The following chains were removed {removed_chains}')
pymol.cmd.remove(f'template and (threaded around 0.1) and not polymer')
boring = '+'.join(meta.boring_ligands)
if boring:
pymol.cmd.remove(f'template and resn {boring}')
# correct template chain A.
# thanks to the fact that SM do not have segi.
if self.chain != 'A' and pymol.cmd.select(f'threaded and chain A'):
pymol.cmd.alter(f'threaded and chain A', 'chain="X"')
pymol.cmd.sort()
for defi in self.chain_definitions:
if defi['chain'] == 'A':
defi['chain'] = 'X'
break
else:
log.debug(f'Chain absent in definition')
if self.chain != 'A':
pymol.cmd.alter(f'threaded and chain {self.chain}', 'chain="A"')
pymol.cmd.sort()
for defi in self.chain_definitions:
if defi['chain'] == self.chain:
defi['chain'] = 'A'
break
else:
log.debug(f'... Chain absent in definition')
self.chain = 'A'
transpiler = PyMolTranspiler() # using parts.
transpiler.pymol = pymol
chaingen = transpiler.get_new_letter()
present_chains = {atom.chain for atom in pymol.cmd.get_model('threaded').atom}
to_be_added_chains = {atom.chain for atom in pymol.cmd.get_model('template').atom}
for chain in to_be_added_chains:
if chain in present_chains:
old_chain = chain
new_chain = next(chaingen)
pymol.cmd.alter(f"template and chain {old_chain}", f'chain="{new_chain}"')
pymol.cmd.sort()
meta.move_chain(old_chain, new_chain)
elif chain == '':
pymol.cmd.alter(f"template and chain ''", f'chain="{new_chain}"')
pymol.cmd.sort()
pymol.cmd.create('combo', 'template or threaded', 1) # state 1.
self.coordinates = pymol.cmd.get_pdbstr('combo')
if False: # extreme debug time
pymol.cmd.save('temp.pse')
raise SystemExit
# update chain definitions
for entity in meta.get_proteins():
for chain in entity['in_chains']:
if chain not in to_be_added_chains:
continue
elif pymol.cmd.select(f'combo and chain {chain}') == 0:
continue
elif 'mappings' in entity['source'][0]:
x = entity['source'][0]['mappings'][0]['start']['residue_number']
y = entity['source'][0]['mappings'][0]['end']['residue_number']
else:
x = 1
y = 1_000_000
log.debug(f'Adding chain {chain}')
self.chain_definitions.append(dict(chain=chain,
x=x, y=y,
offset=0, # who cares.
range=f'{x}-{y}',
uniprot='P00404', # unknown.
name=entity['molecule_name'][0],
transplanted=True
))
for entity in meta.get_polymers():
if meta.is_peptide(entity):
continue
for chain in entity['in_chains']:
if pymol.cmd.select(f'combo and chain {chain}') == 0:
continue
self.chain_definitions.append(dict(chain=chain,
x=1, y=entity['length'],
offset=0, # who cares.
range=f"1-{entity['length']}",
uniprot='P00404', # not valid
name=entity['molecule_name'][0].replace('*P', '')
.replace('P*', '')
.replace('*', ''),
transplanted=True
))
# end of pymol context manager
# pro-forma:
self.fix_renumbered_annotation()
log.debug('... Merged')
return self.coordinates
|
the-stack_106_14032
|
# Copyright 2020 communicating_scinet (https://github.com/tonymetger/communicating_scinet)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
file_path = os.path.dirname(__file__)
data_path = os.path.realpath(os.path.join(file_path, '../data/')) + '/'
tf_save_path = os.path.realpath(os.path.join(file_path, '../tf_save/')) + '/'
tf_log_path = os.path.realpath(os.path.join(file_path, '../tf_log/')) + '/'
|
the-stack_106_14033
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'recipe_engine/python',
'recipe_engine/json',
'recipe_engine/step',
]
PYTHON_VERSION_COMPATIBILITY = 'PY2+3'
|
the-stack_106_14034
|
"""
Low-level BLAS functions (:mod:`scipy.linalg.blas`)
===================================================
This module contains low-level functions from the BLAS library.
.. versionadded:: 0.12.0
.. note::
The common ``overwrite_<>`` option in many routines, allows the
input arrays to be overwritten to avoid extra memory allocation.
However this requires the array to satisfy two conditions
which are memory order and the data type to match exactly the
order and the type expected by the routine.
As an example, if you pass a double precision float array to any
``S....`` routine which expects single precision arguments, f2py
will create an intermediate array to match the argument types and
overwriting will be performed on that intermediate array.
Similarly, if a C-contiguous array is passed, f2py will pass a
FORTRAN-contiguous array internally. Please make sure that these
details are satisfied. More information can be found in the f2py
documentation.
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
:toctree: generated/
get_blas_funcs
find_best_blas_type
BLAS Level 1 functions
----------------------
.. autosummary::
:toctree: generated/
caxpy
ccopy
cdotc
cdotu
crotg
cscal
csrot
csscal
cswap
dasum
daxpy
dcopy
ddot
dnrm2
drot
drotg
drotm
drotmg
dscal
dswap
dzasum
dznrm2
icamax
idamax
isamax
izamax
sasum
saxpy
scasum
scnrm2
scopy
sdot
snrm2
srot
srotg
srotm
srotmg
sscal
sswap
zaxpy
zcopy
zdotc
zdotu
zdrot
zdscal
zrotg
zscal
zswap
BLAS Level 2 functions
----------------------
.. autosummary::
:toctree: generated/
sgbmv
sgemv
sger
ssbmv
sspr
sspr2
ssymv
ssyr
ssyr2
stbmv
stpsv
strmv
strsv
dgbmv
dgemv
dger
dsbmv
dspr
dspr2
dsymv
dsyr
dsyr2
dtbmv
dtpsv
dtrmv
dtrsv
cgbmv
cgemv
cgerc
cgeru
chbmv
chemv
cher
cher2
chpmv
chpr
chpr2
ctbmv
ctbsv
ctpmv
ctpsv
ctrmv
ctrsv
csyr
zgbmv
zgemv
zgerc
zgeru
zhbmv
zhemv
zher
zher2
zhpmv
zhpr
zhpr2
ztbmv
ztbsv
ztpmv
ztrmv
ztrsv
zsyr
BLAS Level 3 functions
----------------------
.. autosummary::
:toctree: generated/
sgemm
ssymm
ssyr2k
ssyrk
strmm
strsm
dgemm
dsymm
dsyr2k
dsyrk
dtrmm
dtrsm
cgemm
chemm
cher2k
cherk
csymm
csyr2k
csyrk
ctrmm
ctrsm
zgemm
zhemm
zher2k
zherk
zsymm
zsyr2k
zsyrk
ztrmm
ztrsm
"""
#
# Author: Pearu Peterson, March 2002
# refactoring by Fabian Pedregosa, March 2010
#
from __future__ import division, print_function, absolute_import
__all__ = ['get_blas_funcs', 'find_best_blas_type']
import numpy as _np
from scipy.linalg import _fblas
try:
from scipy.linalg import _cblas
except ImportError:
_cblas = None
# Expose all functions (only fblas --- cblas is an implementation detail)
empty_module = None
from scipy.linalg._fblas import *
del empty_module
# all numeric dtypes '?bBhHiIlLqQefdgFDGO' that are safe to be converted to
# single precision float : '?bBhH!!!!!!ef!!!!!!'
# double precision float : '?bBhHiIlLqQefdg!!!!'
# single precision complex : '?bBhH!!!!!!ef!!F!!!'
# double precision complex : '?bBhHiIlLqQefdgFDG!'
_type_score = {x: 1 for x in '?bBhHef'}
_type_score.update({x: 2 for x in 'iIlLqQd'})
# Handle float128(g) and complex256(G) separately in case non-windows systems.
# On windows, the values will be rewritten to the same key with the same value.
_type_score.update({'F': 3, 'D': 4, 'g': 2, 'G': 4})
# Final mapping to the actual prefixes and dtypes
_type_conv = {1: ('s', _np.dtype('float32')),
2: ('d', _np.dtype('float64')),
3: ('c', _np.dtype('complex64')),
4: ('z', _np.dtype('complex128'))}
# some convenience alias for complex functions
_blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2',
'cdot': 'cdotc', 'zdot': 'zdotc',
'cger': 'cgerc', 'zger': 'zgerc',
'sdotc': 'sdot', 'sdotu': 'sdot',
'ddotc': 'ddot', 'ddotu': 'ddot'}
def find_best_blas_type(arrays=(), dtype=None):
"""Find best-matching BLAS/LAPACK type.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
prefix : str
BLAS/LAPACK prefix character.
dtype : dtype
Inferred Numpy data type.
prefer_fortran : bool
Whether to prefer Fortran order routines over C order.
Examples
--------
>>> import scipy.linalg.blas as bla
>>> a = np.random.rand(10,15)
>>> b = np.asfortranarray(a) # Change the memory layout order
>>> bla.find_best_blas_type((a,))
('d', dtype('float64'), False)
>>> bla.find_best_blas_type((a*1j,))
('z', dtype('complex128'), False)
>>> bla.find_best_blas_type((b,))
('d', dtype('float64'), True)
"""
dtype = _np.dtype(dtype)
max_score = _type_score.get(dtype.char, 5)
prefer_fortran = False
if arrays:
# In most cases, single element is passed through, quicker route
if len(arrays) == 1:
max_score = _type_score.get(arrays[0].dtype.char, 5)
prefer_fortran = arrays[0].flags['FORTRAN']
else:
# use the most generic type in arrays
scores = [_type_score.get(x.dtype.char, 5) for x in arrays]
max_score = max(scores)
ind_max_score = scores.index(max_score)
# safe upcasting for mix of float64 and complex64 --> prefix 'z'
if max_score == 3 and (2 in scores):
max_score = 4
if arrays[ind_max_score].flags['FORTRAN']:
# prefer Fortran for leading array with column major order
prefer_fortran = True
# Get the LAPACK prefix and the corresponding dtype if not fall back
# to 'd' and double precision float.
prefix, dtype = _type_conv.get(max_score, ('d', _np.dtype('float64')))
return prefix, dtype, prefer_fortran
def _get_funcs(names, arrays, dtype,
lib_name, fmodule, cmodule,
fmodule_name, cmodule_name, alias):
"""
Return available BLAS/LAPACK functions.
Used also in lapack.py. See get_blas_funcs for docstring.
"""
funcs = []
unpack = False
dtype = _np.dtype(dtype)
module1 = (cmodule, cmodule_name)
module2 = (fmodule, fmodule_name)
if isinstance(names, str):
names = (names,)
unpack = True
prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype)
if prefer_fortran:
module1, module2 = module2, module1
for name in names:
func_name = prefix + name
func_name = alias.get(func_name, func_name)
func = getattr(module1[0], func_name, None)
module_name = module1[1]
if func is None:
func = getattr(module2[0], func_name, None)
module_name = module2[1]
if func is None:
raise ValueError(
'%s function %s could not be found' % (lib_name, func_name))
func.module_name, func.typecode = module_name, prefix
func.dtype = dtype
func.prefix = prefix # Backward compatibility
funcs.append(func)
if unpack:
return funcs[0]
else:
return funcs
def get_blas_funcs(names, arrays=(), dtype=None):
"""Return available BLAS function objects from names.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
names : str or sequence of str
Name(s) of BLAS functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In BLAS, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy
types {float32, float64, complex64, complex128} respectively.
The code and the dtype are stored in attributes `typecode` and `dtype`
of the returned functions.
Examples
--------
>>> import scipy.linalg as LA
>>> a = np.random.rand(3,2)
>>> x_gemv = LA.get_blas_funcs('gemv', (a,))
>>> x_gemv.typecode
'd'
>>> x_gemv = LA.get_blas_funcs('gemv',(a*1j,))
>>> x_gemv.typecode
'z'
"""
return _get_funcs(names, arrays, dtype,
"BLAS", _fblas, _cblas, "fblas", "cblas",
_blas_alias)
|
the-stack_106_14036
|
# encoding: UTF-8
'''
本文件中包含的是CTA模块的组合回测引擎,回测引擎的API和CTA引擎一致,
可以使用和实盘相同的代码进行回测。
华富资产 李来佳
'''
from __future__ import division
import sys
import os
import importlib
import csv
import copy
import pandas as pd
import traceback
import numpy as np
import logging
import socket
import zlib
import pickle
from bson import binary
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
from functools import lru_cache
from pathlib import Path
from .base import (
EngineType,
STOPORDER_PREFIX,
StopOrder,
StopOrderStatus
)
from .template import CtaTemplate
from vnpy.component.cta_fund_kline import FundKline
from vnpy.trader.object import (
BarData,
TickData,
OrderData,
TradeData,
ContractData
)
from vnpy.trader.constant import (
Exchange,
Direction,
Offset,
Status,
OrderType,
Product
)
from vnpy.trader.converter import PositionHolding
from vnpy.trader.utility import (
get_underlying_symbol,
round_to,
extract_vt_symbol,
format_number,
import_module_by_str
)
from vnpy.trader.util_logger import setup_logger
from vnpy.data.mongo.mongo_data import MongoData
from uuid import uuid1
class BackTestingEngine(object):
"""
CTA回测引擎
函数接口和策略引擎保持一样,
从而实现同一套代码从回测到实盘。
针对1分钟bar的回测
或者tick级别得回测
提供对组合回测/批量回测得服务
"""
def __init__(self, event_engine=None):
"""Constructor"""
# 绑定事件引擎
self.event_engine = event_engine
self.mode = 'bar' # 'bar': 根据1分钟k线进行回测, 'tick',根据分笔tick进行回测
# 引擎类型为回测
self.engine_type = EngineType.BACKTESTING
self.contract_type = 'future' # future, stock, digital
# 回测策略相关
self.classes = {} # 策略类,class_name: stategy_class
self.class_module_map = {} # 策略类名与模块名映射 class_name: mudule_name
self.strategies = {} # 回测策略实例, key = strategy_name, value= strategy
self.symbol_strategy_map = defaultdict(list) # vt_symbol: strategy list
self.test_name = 'portfolio_test_{}'.format(datetime.now().strftime('%M%S')) # 回测策略组合的实例名字
self.daily_report_name = '' # 策略的日净值报告文件名称
self.test_start_date = '' # 组合回测启动得日期
self.init_days = 0 # 初始化天数
self.test_end_date = '' # 组合回测结束日期
self.slippage = {} # 回测时假设的滑点
self.commission_rate = {} # 回测时假设的佣金比例(适用于百分比佣金)
self.fix_commission = {} # 每手固定手续费
self.size = {} # 合约大小,默认为1
self.price_tick = {} # 价格最小变动
self.margin_rate = {} # 回测合约的保证金比率
self.price_dict = {} # 登记vt_symbol对应的最新价
self.contract_dict = {} # 登记vt_symbol得对应合约信息
self.symbol_exchange_dict = {} # 登记symbol: exchange的对应关系
self.data_start_date = None # 回测数据开始日期,datetime对象 (用于截取数据)
self.data_end_date = None # 回测数据结束日期,datetime对象 (用于截取数据)
self.strategy_start_date = None # 策略启动日期(即前面的数据用于初始化),datetime对象
self.stop_order_count = 0 # 本地停止单编号
self.stop_orders = {} # 本地停止单
self.active_stop_orders = {} # 活动本地停止单
self.limit_order_count = 0 # 限价单编号
self.limit_orders = OrderedDict() # 限价单字典
self.active_limit_orders = OrderedDict() # 活动限价单字典,用于进行撮合用
self.order_strategy_dict = {} # orderid 与 strategy的映射
# 持仓缓存字典
# key为vt_symbol,value为PositionBuffer对象
self.pos_holding_dict = {}
self.trade_count = 0 # 成交编号
self.trade_dict = OrderedDict() # 用于统计成交收益时,还没处理得交易
self.trades = OrderedDict() # 记录所有得成交记录
self.trade_pnl_list = [] # 交易记录列表
self.long_position_list = [] # 多单持仓
self.short_position_list = [] # 空单持仓
self.holdings = {} # 多空持仓
# 当前最新数据,用于模拟成交用
self.gateway_name = u'BackTest'
self.last_bar = {} # 最新的bar
self.last_tick = {} # 最新tick
self.last_dt = None # 最新时间
# csvFile相关
self.bar_interval_seconds = 60 # csv文件,属于K线类型,K线的周期(秒数),缺省是1分钟
# 费用风控情况
self.percent = 0.0
self.percent_limit = 30 # 投资仓位比例上限
# 回测计算相关
self.use_margin = True # 使用保证金模式(期货使用,计算保证金时,按照开仓价计算。股票是按照当前价计算)
self.init_capital = 1000000 # 期初资金
self.cur_capital = self.init_capital # 当前资金净值
self.net_capital = self.init_capital # 实时资金净值(每日根据capital和持仓浮盈计算)
self.max_capital = self.init_capital # 资金最高净值
self.max_net_capital = self.init_capital
self.avaliable = self.init_capital
self.max_pnl = 0 # 最高盈利
self.min_pnl = 0 # 最大亏损
self.max_occupy_rate = 0 # 最大保证金占比
self.winning_result = 0 # 盈利次数
self.losing_result = 0 # 亏损次数
self.total_trade_count = 0 # 总成交数量
self.total_winning = 0 # 总盈利
self.total_losing = 0 # 总亏损
self.total_turnover = 0 # 总成交金额(合约面值)
self.total_commission = 0 # 总手续费
self.total_slippage = 0 # 总滑点
self.time_list = [] # 时间序列
self.pnl_list = [] # 每笔盈亏序列
self.capital_list = [] # 盈亏汇总的时间序列
self.drawdown_list = [] # 回撤的时间序列
self.drawdown_rate_list = [] # 最大回撤比例的时间序列(成交结算)
self.max_net_capital_time = ''
self.max_drawdown_rate_time = ''
self.daily_max_drawdown_rate = 0 # 按照日结算价计算
self.pnl_strategy_dict = {} # 策略实例的平仓盈亏
self.is_plot_daily = False
self.daily_list = [] # 按日统计得序列
self.daily_first_benchmark = None
self.logger = None
self.strategy_loggers = {}
self.debug = False
self.is_7x24 = False
self.logs_path = None
self.data_path = None
self.fund_kline_dict = {}
self.active_fund_kline = False
# 回测任务/回测结果,保存在数据库中
self.mongo_api = None
self.task_id = None
self.test_setting = None # 回测设置
self.strategy_setting = None # 所有回测策略得设置
def create_fund_kline(self, name, use_renko=False):
"""
创建资金曲线
:param name: 账号名,或者策略名
:param use_renko:
:return:
"""
setting = {}
setting.update({'name': name})
setting['para_ma1_len'] = 5
setting['para_ma2_len'] = 10
setting['para_ma3_len'] = 20
setting['para_active_yb'] = True
setting['price_tick'] = 0.01
setting['underlying_symbol'] = 'fund'
if use_renko:
# 使用砖图,高度是资金的千分之一
setting['height'] = self.init_capital * 0.001
setting['use_renko'] = True
fund_kline = FundKline(cta_engine=self, setting=setting)
self.fund_kline_dict.update({name: fund_kline})
return fund_kline
def get_fund_kline(self, name: str = None):
# 指定资金账号/策略名
if name:
kline = self.fund_kline_dict.get(name, None)
return kline
# 没有指定账号,并且存在一个或多个资金K线
if len(self.fund_kline_dict) > 0:
# 优先找vt_setting中,配置了strategy_groud的资金K线
kline = self.fund_kline_dict.get(self.test_name, None)
# 找不到,返回第一个
if kline is None:
kline = self.fund_kline_dict.values()[0]
return kline
else:
return None
def get_account(self, vt_accountid: str = ""):
"""返回账号的实时权益,可用资金,仓位比例,投资仓位比例上限"""
if self.net_capital == 0.0:
self.percent = 0.0
return self.net_capital, self.avaliable, self.percent, self.percent_limit
def set_test_start_date(self, start_date: str = '20100416', init_days: int = 10):
"""设置回测的启动日期"""
self.test_start_date = start_date
self.init_days = init_days
self.data_start_date = datetime.strptime(start_date, '%Y%m%d')
# 初始化天数
init_time_delta = timedelta(init_days)
self.strategy_start_date = self.data_start_date + init_time_delta
self.write_log(u'设置:回测数据开始日期:{},初始化数据为{}天,策略自动启动日期:{}'
.format(self.data_start_date, self.init_days, self.strategy_start_date))
def set_test_end_date(self, end_date: str = ''):
"""设置回测的结束日期"""
self.test_end_date = end_date
if end_date:
self.data_end_date = datetime.strptime(end_date, '%Y%m%d')
# 若不修改时间则会导致不包含dataEndDate当天数据
self.data_end_date.replace(hour=23, minute=59)
else:
self.data_end_date = datetime.now()
self.write_log(u'设置:回测数据结束日期:{}'.format(self.data_end_date))
def set_init_capital(self, capital: float):
"""设置期初净值"""
self.cur_capital = capital # 资金
self.net_capital = capital # 实时资金净值(每日根据capital和持仓浮盈计算)
self.max_capital = capital # 资金最高净值
self.max_net_capital = capital
self.avaliable = capital
self.init_capital = capital
def set_margin_rate(self, vt_symbol: str, margin_rate: float):
"""设置某个合约得保证金比率"""
self.margin_rate.update({vt_symbol: margin_rate})
@lru_cache()
def get_margin_rate(self, vt_symbol: str):
return self.margin_rate.get(vt_symbol, 0.1)
def set_slippage(self, vt_symbol: str, slippage: float):
"""设置滑点点数"""
self.slippage.update({vt_symbol: slippage})
@lru_cache()
def get_slippage(self, vt_symbol: str):
"""获取滑点"""
return self.slippage.get(vt_symbol, 0)
def set_size(self, vt_symbol: str, size: int):
"""设置合约大小"""
self.size.update({vt_symbol: size})
@lru_cache()
def get_size(self, vt_symbol: str):
"""查询合约的size"""
return self.size.get(vt_symbol, 10)
def set_price(self, vt_symbol: str, price: float):
self.price_dict.update({vt_symbol: price})
def get_price(self, vt_symbol: str):
return self.price_dict.get(vt_symbol, None)
def set_commission_rate(self, vt_symbol: str, rate: float):
"""设置佣金比例"""
self.commission_rate.update({vt_symbol: rate})
if rate >= 0.1:
self.fix_commission.update({vt_symbol: rate})
def get_commission_rate(self, vt_symbol: str):
""" 获取保证金比例,缺省万分之一"""
return self.commission_rate.get(vt_symbol, float(0.00001))
def get_fix_commission(self, vt_symbol: str):
return self.fix_commission.get(vt_symbol, 0)
def set_price_tick(self, vt_symbol: str, price_tick: float):
"""设置价格最小变动"""
self.price_tick.update({vt_symbol: price_tick})
def get_price_tick(self, vt_symbol: str):
return self.price_tick.get(vt_symbol, 1)
def set_contract(self, symbol: str, exchange: Exchange, product: Product, name: str, size: int,
price_tick: float, margin_rate: float = 0.1):
"""设置合约信息"""
vt_symbol = '.'.join([symbol, exchange.value])
if vt_symbol not in self.contract_dict:
c = ContractData(
gateway_name=self.gateway_name,
symbol=symbol,
exchange=exchange,
name=name,
product=product,
size=size,
pricetick=price_tick,
margin_rate=margin_rate
)
self.contract_dict.update({vt_symbol: c})
self.set_size(vt_symbol, size)
self.set_margin_rate(vt_symbol, margin_rate)
self.set_price_tick(vt_symbol, price_tick)
self.symbol_exchange_dict.update({symbol: exchange})
@lru_cache()
def get_contract(self, vt_symbol):
"""获取合约配置信息"""
return self.contract_dict.get(vt_symbol)
@lru_cache()
def get_exchange(self, symbol: str):
return self.symbol_exchange_dict.get(symbol, Exchange.LOCAL)
def get_position_holding(self, vt_symbol: str, gateway_name: str = ''):
""" 查询合约在账号的持仓(包含多空)"""
if gateway_name:
gateway_name = self.gateway_name
k = f'{gateway_name}.{vt_symbol}'
holding = self.holdings.get(k, None)
if not holding:
symbol, exchange = extract_vt_symbol(vt_symbol)
if self.contract_type == 'future':
product = Product.FUTURES
elif self.contract_type == 'stock':
product = Product.EQUITY
else:
product = Product.SPOT
contract = ContractData(gateway_name=gateway_name,
name=vt_symbol,
product=product,
symbol=symbol,
exchange=exchange,
size=self.get_size(vt_symbol),
pricetick=self.get_price_tick(vt_symbol),
margin_rate=self.get_margin_rate(vt_symbol))
holding = PositionHolding(contract)
self.holdings[k] = holding
return holding
def set_name(self, test_name):
"""
设置组合的运行实例名称
:param test_name:
:return:
"""
self.test_name = test_name
def set_daily_report_name(self, report_file):
"""
设置策略的日净值记录csv保存文件名(含路径)
:param report_file: 保存文件名(含路径)
:return:
"""
self.daily_report_name = report_file
def prepare_env(self, test_setting):
"""
根据配置参数,准备环境
包括:
回测名称 ,是否debug,数据目录/日志目录,
资金/保证金类型/仓位控制
回测开始/结束日期
:param test_setting:
:return:
"""
self.test_setting = copy.copy(test_setting)
self.output('back_testing prepare_env')
if 'name' in test_setting:
self.set_name(test_setting.get('name'))
self.mode = test_setting.get('mode', 'bar')
self.output(f'采用{self.mode}方式回测')
self.contract_type = test_setting.get('contract_type', 'future')
self.output(f'测试合约主要为{self.contract_type}')
self.debug = test_setting.get('debug', False)
# 更新数据目录
if 'data_path' in test_setting:
self.data_path = test_setting.get('data_path')
else:
self.data_path = os.path.abspath(os.path.join(os.getcwd(), 'data'))
print(f'数据输出目录:{self.data_path}')
# 更新日志目录
if 'logs_path' in test_setting:
self.logs_path = os.path.abspath(os.path.join(test_setting.get('logs_path'), self.test_name))
else:
self.logs_path = os.path.abspath(os.path.join(os.getcwd(), 'log', self.test_name))
print(f'日志输出目录:{self.logs_path}')
# 创建日志
self.create_logger(debug=self.debug)
# 设置资金
if 'init_capital' in test_setting:
self.write_log(u'设置期初资金:{}'.format(test_setting.get('init_capital')))
self.set_init_capital(test_setting.get('init_capital'))
# 缺省使用保证金方式。(期货使用保证金/股票不使用保证金)
self.use_margin = test_setting.get('use_margin', True)
# 设置最大资金使用比例
if 'percent_limit' in test_setting:
self.write_log(u'设置最大资金使用比例:{}%'.format(test_setting.get('percent_limit')))
self.percent_limit = test_setting.get('percent_limit')
if 'start_date' in test_setting:
if 'strategy_start_date' not in test_setting:
init_days = test_setting.get('init_days', 10)
self.write_log(u'设置回测开始日期:{},数据加载日数:{}'.format(test_setting.get('start_date'), init_days))
self.set_test_start_date(test_setting.get('start_date'), init_days)
else:
start_date = test_setting.get('start_date')
strategy_start_date = test_setting.get('strategy_start_date')
self.write_log(u'使用指定的数据开始日期:{}和策略启动日期:{}'.format(start_date, strategy_start_date))
self.test_start_date = start_date
self.data_start_date = datetime.strptime(start_date.replace('-', ''), '%Y%m%d')
self.strategy_start_date = datetime.strptime(strategy_start_date.replace('-', ''), '%Y%m%d')
if 'end_date' in test_setting:
self.write_log(u'设置回测结束日期:{}'.format(test_setting.get('end_date')))
self.set_test_end_date(test_setting.get('end_date'))
# 准备数据
if 'symbol_datas' in test_setting:
self.write_log(u'准备数据')
self.prepare_data(test_setting.get('symbol_datas'))
if self.mode == 'tick':
self.tick_path = test_setting.get('tick_path', None)
# 设置bar文件的时间间隔秒数
if 'bar_interval_seconds' in test_setting:
self.write_log(u'设置bar文件的时间间隔秒数:{}'.format(test_setting.get('bar_interval_seconds')))
self.bar_interval_seconds = test_setting.get('bar_interval_seconds')
# 资金曲线
self.active_fund_kline = test_setting.get('active_fund_kline', False)
if self.active_fund_kline:
# 创建资金K线
self.create_fund_kline(self.test_name, use_renko=test_setting.get('use_renko', False))
self.is_plot_daily = test_setting.get('is_plot_daily', False)
# 加载所有本地策略class
self.load_strategy_class()
def prepare_data(self, data_dict):
"""
准备组合数据
:param data_dict:
:return:
"""
self.output('prepare_data')
if len(data_dict) == 0:
self.write_log(u'请指定回测数据和文件')
return
for symbol, symbol_data in data_dict.items():
self.write_log(u'配置{}数据:{}'.format(symbol, symbol_data))
self.set_price_tick(symbol, symbol_data.get('price_tick', 1))
self.set_slippage(symbol, symbol_data.get('slippage', 0))
self.set_size(symbol, symbol_data.get('symbol_size', 10))
margin_rate = symbol_data.get('margin_rate', 0.1)
self.set_margin_rate(symbol, margin_rate)
self.set_commission_rate(symbol, symbol_data.get('commission_rate', float(0.0001)))
self.set_contract(
symbol=symbol,
name=symbol,
exchange=Exchange(symbol_data.get('exchange', 'LOCAL')),
product=Product(symbol_data.get('product', "期货")),
size=symbol_data.get('symbol_size', 10),
price_tick=symbol_data.get('price_tick', 1),
margin_rate=margin_rate
)
def new_tick(self, tick):
"""新得tick"""
self.last_tick.update({tick.vt_symbol: tick})
if self.last_dt is None or (tick.datetime and tick.datetime > self.last_dt):
self.last_dt = tick.datetime
self.set_price(tick.vt_symbol, tick.last_price)
self.cross_stop_order(tick=tick) # 撮合停止单
self.cross_limit_order(tick=tick) # 先撮合限价单
# 更新账号级别资金曲线(只有持仓时,才更新)
fund_kline = self.get_fund_kline(self.test_name)
if fund_kline is not None and (len(self.long_position_list) > 0 or len(self.short_position_list) > 0):
fund_kline.update_account(self.last_dt, self.net_capital)
for strategy in self.symbol_strategy_map.get(tick.vt_symbol, []):
# 更新策略的资金K线
fund_kline = self.fund_kline_dict.get(strategy.strategy_name, None)
if fund_kline:
hold_pnl, _ = fund_kline.get_hold_pnl()
if hold_pnl != 0:
fund_kline.update_strategy(dt=self.last_dt, hold_pnl=hold_pnl)
# 推送tick到策略中
strategy.on_tick(tick) # 推送K线到策略中
# 到达策略启动日期,启动策略
if not strategy.trading and self.strategy_start_date < tick.datetime:
strategy.trading = True
strategy.on_start()
self.output(u'{}策略启动交易'.format(strategy.strategy_name))
def new_bar(self, bar):
"""新的K线"""
self.last_bar.update({bar.vt_symbol: bar})
if self.last_dt is None or (bar.datetime and bar.datetime > self.last_dt):
self.last_dt = bar.datetime
self.set_price(bar.vt_symbol, bar.close_price)
self.cross_stop_order(bar=bar) # 撮合停止单
self.cross_limit_order(bar=bar) # 先撮合限价单
# 更新账号的资金曲线(只有持仓时,才更新)
fund_kline = self.get_fund_kline(self.test_name)
if fund_kline is not None and (len(self.long_position_list) > 0 or len(self.short_position_list) > 0):
fund_kline.update_account(self.last_dt, self.net_capital)
for strategy in self.symbol_strategy_map.get(bar.vt_symbol, []):
# 更新策略的资金K线
fund_kline = self.fund_kline_dict.get(strategy.strategy_name, None)
if fund_kline:
hold_pnl, _ = fund_kline.get_hold_pnl()
if hold_pnl != 0:
fund_kline.update_strategy(dt=self.last_dt, hold_pnl=hold_pnl)
# 推送K线到策略中
strategy.on_bar(bar) # 推送K线到策略中
# 到达策略启动日期,启动策略
if not strategy.trading and self.strategy_start_date < bar.datetime:
strategy.trading = True
strategy.on_start()
self.output(u'{}策略启动交易'.format(strategy.strategy_name))
def load_strategy_class(self):
"""
Load strategy class from source code.
"""
self.write_log('加载所有策略class')
# 加载 vnpy/app/cta_strategy_pro/strategies的所有策略
path1 = Path(__file__).parent.joinpath("strategies")
self.load_strategy_class_from_folder(
path1, "vnpy.app.cta_strategy_pro.strategies")
def load_strategy_class_from_folder(self, path: Path, module_name: str = ""):
"""
Load strategy class from certain folder.
"""
for dirpath, dirnames, filenames in os.walk(str(path)):
for filename in filenames:
if filename.endswith(".py"):
strategy_module_name = ".".join(
[module_name, filename.replace(".py", "")])
elif filename.endswith(".pyd"):
strategy_module_name = ".".join(
[module_name, filename.split(".")[0]])
else:
continue
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
"""
Load/Reload strategy class from module file.
"""
try:
module = importlib.import_module(module_name)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and issubclass(value, CtaTemplate) and value is not CtaTemplate):
class_name = value.__name__
if class_name not in self.classes:
self.write_log(f"加载策略类{module_name}.{class_name}")
else:
self.write_log(f"更新策略类{module_name}.{class_name}")
self.classes[class_name] = value
self.class_module_map[class_name] = module_name
return True
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_error(msg)
self.output(msg)
return False
def load_strategy(self, strategy_name: str, strategy_setting: dict = None):
"""
装载回测的策略
setting是参数设置,包括
class_name: str, 策略类名字
vt_symbol: str, 缺省合约
setting: {}, 策略的参数
auto_init: True/False, 策略是否自动初始化
auto_start: True/False, 策略是否自动启动
"""
# 获取策略的类名
class_name = strategy_setting.get('class_name', None)
if class_name is None or strategy_name is None:
self.write_error(u'setting中没有class_name')
return
# strategy_class => module.strategy_class
if '.' not in class_name:
module_name = self.class_module_map.get(class_name, None)
if module_name:
class_name = module_name + '.' + class_name
self.write_log(u'转换策略为全路径:{}'.format(class_name))
# 获取策略类的定义
strategy_class = import_module_by_str(class_name)
if strategy_class is None:
self.write_error(u'加载策略模块失败:{}'.format(class_name))
return
# 处理 vt_symbol
vt_symbol = strategy_setting.get('vt_symbol')
if '.' in vt_symbol:
symbol, exchange = extract_vt_symbol(vt_symbol)
elif self.contract_type == 'future':
symbol = vt_symbol
underly_symbol = get_underlying_symbol(symbol).upper()
exchange = self.get_exchange(f'{underly_symbol}99')
vt_symbol = '.'.join([symbol, exchange.value])
else:
symbol = vt_symbol
exchange = Exchange.LOCAL
vt_symbol = '.'.join([symbol, exchange.value])
# 在期货组合回测,中需要把一般配置的主力合约,更换为指数合约
if '99' not in symbol and exchange != Exchange.SPD and self.contract_type == 'future':
underly_symbol = get_underlying_symbol(symbol).upper()
self.write_log(u'更新vt_symbol为指数合约:{}=>{}'.format(vt_symbol, underly_symbol + '99.' + exchange.value))
vt_symbol = underly_symbol.upper() + '99.' + exchange.value
strategy_setting.update({'vt_symbol': vt_symbol})
# 属于自定义套利合约
if exchange == Exchange.SPD:
symbol_pairs = symbol.split('-')
active_symbol = get_underlying_symbol(symbol_pairs[0])
passive_symbol = get_underlying_symbol(symbol_pairs[2])
new_vt_symbol = '-'.join([active_symbol.upper() + '99',
symbol_pairs[1],
passive_symbol.upper() + '99',
symbol_pairs[3],
symbol_pairs[4]]) + '.SPD'
self.write_log(u'更新vt_symbol为指数合约:{}=>{}'.format(vt_symbol, new_vt_symbol))
vt_symbol = new_vt_symbol
strategy_setting.update({'vt_symbol': vt_symbol})
# 取消自动启动
if 'auto_start' in strategy_setting:
strategy_setting.update({'auto_start': False})
# 策略参数设置
setting = strategy_setting.get('setting', {})
# 强制更新回测为True
setting.update({'backtesting': True})
# 创建实例
strategy = strategy_class(self, strategy_name, vt_symbol, setting)
# 保存到策略实例映射表中
self.strategies.update({strategy_name: strategy})
# 更新vt_symbol合约与策略的订阅关系
self.subscribe_symbol(strategy_name=strategy_name, vt_symbol=vt_symbol)
if strategy_setting.get('auto_init', False):
self.write_log(u'自动初始化策略')
strategy.on_init()
if strategy_setting.get('auto_start', False):
self.write_log(u'自动启动策略')
strategy.on_start()
if self.active_fund_kline:
# 创建策略实例的资金K线
self.create_fund_kline(name=strategy_name, use_renko=False)
def subscribe_symbol(self, strategy_name: str, vt_symbol: str, gateway_name: str = '', is_bar: bool = False):
"""订阅合约"""
strategy = self.strategies.get(strategy_name, None)
if not strategy:
return False
# 添加 合约订阅 vt_symbol <=> 策略实例 strategy 映射.
strategies = self.symbol_strategy_map[vt_symbol]
strategies.append(strategy)
return True
# ---------------------------------------------------------------------
def save_strategy_data(self):
"""保存策略数据"""
for strategy in self.strategies.values():
self.write_log(u'save strategy data')
strategy.save_data()
def send_order(self,
strategy: CtaTemplate,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool,
lock: bool,
order_type: OrderType = OrderType.LIMIT,
gateway_name: str = None):
"""发单"""
price_tick = self.get_price_tick(vt_symbol)
price = round_to(price, price_tick)
if stop:
return self.send_local_stop_order(
strategy=strategy,
vt_symbol=vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
lock=lock,
gateway_name=gateway_name
)
else:
return self.send_limit_order(
strategy=strategy,
vt_symbol=vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
lock=lock,
gateway_name=gateway_name
)
def send_limit_order(self,
strategy: CtaTemplate,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool,
order_type: OrderType = OrderType.LIMIT,
gateway_name: str = None
):
self.limit_order_count += 1
order_id = str(self.limit_order_count)
symbol, exchange = extract_vt_symbol(vt_symbol)
if gateway_name is None:
gateway_name = self.gateway_name
order = OrderData(
gateway_name=gateway_name,
symbol=symbol,
exchange=exchange,
orderid=order_id,
direction=direction,
offset=offset,
type=order_type,
price=round_to(value=price, target=self.get_price_tick(symbol)),
volume=volume,
status=Status.NOTTRADED,
time=str(self.last_dt)
)
# 保存到限价单字典中
self.active_limit_orders[order.vt_orderid] = order
self.limit_orders[order.vt_orderid] = order
self.order_strategy_dict.update({order.vt_orderid: strategy})
self.write_log(f'创建限价单:{order.__dict__}')
return [order.vt_orderid]
def send_local_stop_order(
self,
strategy: CtaTemplate,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool,
gateway_name: str = None):
""""""
self.stop_order_count += 1
stop_order = StopOrder(
vt_symbol=vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop_orderid=f"{STOPORDER_PREFIX}.{self.stop_order_count}",
strategy_name=strategy.strategy_name,
)
self.write_log(f'创建本地停止单:{stop_order.__dict__}')
self.order_strategy_dict.update({stop_order.stop_orderid: strategy})
self.active_stop_orders[stop_order.stop_orderid] = stop_order
self.stop_orders[stop_order.stop_orderid] = stop_order
return [stop_order.stop_orderid]
def cancel_order(self, strategy: CtaTemplate, vt_orderid: str):
"""撤单"""
if vt_orderid.startswith(STOPORDER_PREFIX):
return self.cancel_stop_order(strategy, vt_orderid)
else:
return self.cancel_limit_order(strategy, vt_orderid)
def cancel_limit_order(self, strategy: CtaTemplate, vt_orderid: str):
"""限价单撤单"""
if vt_orderid in self.active_limit_orders:
order = self.active_limit_orders[vt_orderid]
register_strategy = self.order_strategy_dict.get(vt_orderid, None)
if register_strategy.strategy_name != strategy.strategy_name:
return False
order.status = Status.CANCELLED
order.cancelTime = str(self.last_dt)
self.active_limit_orders.pop(vt_orderid, None)
strategy.on_order(order)
return True
return False
def cancel_stop_order(self, strategy: CtaTemplate, vt_orderid: str):
"""本地停止单撤单"""
if vt_orderid not in self.active_stop_orders:
return False
stop_order = self.active_stop_orders.pop(vt_orderid)
stop_order.status = StopOrderStatus.CANCELLED
strategy.on_stop_order(stop_order)
return True
def cancel_all(self, strategy):
"""撤销某个策略的所有委托单"""
self.cancel_orders(strategy=strategy)
def cancel_orders(self, vt_symbol: str = None, offset: Offset = None, strategy: CtaTemplate = None):
"""撤销所有单"""
# Symbol参数:指定合约的撤单;
# OFFSET参数:指定Offset的撤单,缺省不填写时,为所有
# strategy参数: 指定某个策略的单子
if len(self.active_limit_orders) > 0:
self.write_log(u'从所有订单中,撤销:开平:{},合约:{},策略:{}'
.format(offset,
vt_symbol if vt_symbol is not None else u'所有',
strategy.strategy_name if strategy else None))
for vt_orderid in list(self.active_limit_orders.keys()):
order = self.active_limit_orders.get(vt_orderid, None)
order_strategy = self.order_strategy_dict.get(vt_orderid, None)
if order is None or order_strategy is None:
continue
if offset is None:
offset_cond = True
else:
offset_cond = order.offset == offset
if vt_symbol is None:
symbol_cond = True
else:
symbol_cond = order.vt_symbol == vt_symbol
if strategy is None:
strategy_cond = True
else:
strategy_cond = strategy.strategy_name == order_strategy.strategy_name
if offset_cond and symbol_cond and strategy_cond:
self.write_log(u'撤销订单:{},{} {}@{}'
.format(vt_orderid, order.direction, order.price, order.volume))
order.status = Status.CANCELLED
order.cancel_time = str(self.last_dt)
del self.active_limit_orders[vt_orderid]
if strategy:
strategy.on_order(order)
for stop_orderid in list(self.active_stop_orders.keys()):
order = self.active_stop_orders.get(stop_orderid, None)
order_strategy = self.order_strategy_dict.get(stop_orderid, None)
if order is None or order_strategy is None:
continue
if offset is None:
offset_cond = True
else:
offset_cond = order.offset == offset
if vt_symbol is None:
symbol_cond = True
else:
symbol_cond = order.vt_symbol == vt_symbol
if strategy is None:
strategy_cond = True
else:
strategy_cond = strategy.strategy_name == order_strategy.strategy_name
if offset_cond and symbol_cond and strategy_cond:
self.write_log(u'撤销本地停止单:{},{} {}@{}'
.format(stop_orderid, order.direction, order.price, order.volume))
order.status = Status.CANCELLED
order.cancel_time = str(self.last_dt)
self.active_stop_orders.pop(stop_orderid, None)
if strategy:
strategy.on_stop_order(order)
def cross_stop_order(self, bar: BarData = None, tick: TickData = None):
"""
Cross stop order with last bar/tick data.
"""
vt_symbol = bar.vt_symbol if bar else tick.vt_symbol
for stop_orderid in list(self.active_stop_orders.keys()):
stop_order = self.active_stop_orders[stop_orderid]
strategy = self.order_strategy_dict.get(stop_orderid, None)
if stop_order.vt_symbol != vt_symbol or stop_order is None or strategy is None:
continue
# 若买入方向停止单价格高于等于该价格,则会触发
if bar:
long_cross_price = round_to(value=bar.low_price, target=self.get_price_tick(vt_symbol))
long_cross_price -= self.get_price_tick(vt_symbol)
# 若卖出方向停止单价格低于等于该价格,则会触发
short_cross_price = round_to(value=bar.high_price, target=self.get_price_tick(vt_symbol))
short_cross_price += self.get_price_tick(vt_symbol)
# 在当前时间点前发出的买入委托可能的最优成交价
long_best_price = round_to(value=bar.open_price,
target=self.get_price_tick(vt_symbol)) + self.get_price_tick(vt_symbol)
# 在当前时间点前发出的卖出委托可能的最优成交价
short_best_price = round_to(value=bar.open_price,
target=self.get_price_tick(vt_symbol)) - self.get_price_tick(vt_symbol)
else:
long_cross_price = tick.last_price
short_cross_price = tick.last_price
long_best_price = tick.last_price
short_best_price = tick.last_price
# Check whether stop order can be triggered.
long_cross = stop_order.direction == Direction.LONG and stop_order.price <= long_cross_price
short_cross = stop_order.direction == Direction.SHORT and stop_order.price >= short_cross_price
if not long_cross and not short_cross:
continue
# Create order data.
self.limit_order_count += 1
symbol, exchange = extract_vt_symbol(vt_symbol)
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=str(self.limit_order_count),
direction=stop_order.direction,
offset=stop_order.offset,
price=stop_order.price,
volume=stop_order.volume,
status=Status.ALLTRADED,
gateway_name=self.gateway_name,
)
order.datetime = self.last_dt
self.write_log(f'停止单被触发:\n{stop_order.__dict__}\n=>委托单{order.__dict__}')
self.limit_orders[order.vt_orderid] = order
# Create trade data.
if long_cross:
trade_price = max(stop_order.price, long_best_price)
else:
trade_price = min(stop_order.price, short_best_price)
self.trade_count += 1
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=str(self.trade_count),
direction=order.direction,
offset=order.offset,
price=trade_price,
volume=order.volume,
time=self.last_dt.strftime("%Y-%m-%d %H:%M:%S"),
datetime=self.last_dt,
gateway_name=self.gateway_name,
)
trade.strategy_name = strategy.strategy_name
trade.datetime = self.last_dt
self.write_log(f'停止单触发成交:{trade.__dict__}')
self.trade_dict[trade.vt_tradeid] = trade
self.trades[trade.vt_tradeid] = copy.copy(trade)
# Update stop order.
stop_order.vt_orderids.append(order.vt_orderid)
stop_order.status = StopOrderStatus.TRIGGERED
self.active_stop_orders.pop(stop_order.stop_orderid)
# Push update to strategy.
strategy.on_stop_order(stop_order)
strategy.on_order(order)
self.append_trade(trade)
holding = self.get_position_holding(vt_symbol=trade.vt_symbol, gateway_name=self.gateway_name)
holding.update_trade(trade)
strategy.on_trade(trade)
def cross_limit_order(self, bar: BarData = None, tick: TickData = None):
"""基于最新数据撮合限价单"""
vt_symbol = bar.vt_symbol if bar else tick.vt_symbol
# 遍历限价单字典中的所有限价单
for vt_orderid in list(self.active_limit_orders.keys()):
order = self.active_limit_orders.get(vt_orderid, None)
if order.vt_symbol != vt_symbol:
continue
strategy = self.order_strategy_dict.get(order.vt_orderid, None)
if strategy is None:
self.write_error(u'找不到vt_orderid:{}对应的策略'.format(order.vt_orderid))
continue
if bar:
buy_cross_price = round_to(value=bar.low_price,
target=self.get_price_tick(vt_symbol)) + self.get_price_tick(
vt_symbol) # 若买入方向限价单价格高于该价格,则会成交
sell_cross_price = round_to(value=bar.high_price,
target=self.get_price_tick(vt_symbol)) - self.get_price_tick(
vt_symbol) # 若卖出方向限价单价格低于该价格,则会成交
buy_best_cross_price = round_to(value=bar.open_price,
target=self.get_price_tick(vt_symbol)) + self.get_price_tick(
vt_symbol) # 在当前时间点前发出的买入委托可能的最优成交价
sell_best_cross_price = round_to(value=bar.open_price,
target=self.get_price_tick(vt_symbol)) - self.get_price_tick(
vt_symbol) # 在当前时间点前发出的卖出委托可能的最优成交价
else:
buy_cross_price = tick.last_price
sell_cross_price = tick.last_price
buy_best_cross_price = tick.last_price
sell_best_cross_price = tick.last_price
# 判断是否会成交
buy_cross = order.direction == Direction.LONG and order.price >= buy_cross_price
sell_cross = order.direction == Direction.SHORT and order.price <= sell_cross_price
# 如果发生了成交
if buy_cross or sell_cross:
# 推送成交数据
self.trade_count += 1 # 成交编号自增1
trade_id = str(self.trade_count)
symbol, exchange = extract_vt_symbol(vt_symbol)
trade = TradeData(
gateway_name=self.gateway_name,
symbol=symbol,
exchange=exchange,
tradeid=trade_id,
orderid=order.orderid,
direction=order.direction,
offset=order.offset,
volume=order.volume,
time=self.last_dt.strftime("%Y-%m-%d %H:%M:%S"),
datetime=self.last_dt
)
# 以买入为例:
# 1. 假设当根K线的OHLC分别为:100, 125, 90, 110
# 2. 假设在上一根K线结束(也是当前K线开始)的时刻,策略发出的委托为限价105
# 3. 则在实际中的成交价会是100而不是105,因为委托发出时市场的最优价格是100
if buy_cross:
trade_price = min(order.price, buy_best_cross_price)
else:
trade_price = max(order.price, sell_best_cross_price)
trade.price = trade_price
# 记录该合约来自哪个策略实例
trade.strategy_name = strategy.strategy_name
strategy.on_trade(trade)
for cov_trade in self.convert_spd_trade(trade):
self.trade_dict[cov_trade.vt_tradeid] = cov_trade
self.trades[cov_trade.vt_tradeid] = copy.copy(cov_trade)
self.write_log(u'vt_trade_id:{0}'.format(cov_trade.vt_tradeid))
# 更新持仓缓存数据
holding = self.get_position_holding(cov_trade.vt_symbol, self.gateway_name)
holding.update_trade(cov_trade)
self.write_log(u'{} : crossLimitOrder: TradeId:{}, posBuffer = {}'.format(cov_trade.strategy_name,
cov_trade.tradeid,
holding.to_str()))
# 写入交易记录
self.append_trade(cov_trade)
# 更新资金曲线
if 'SPD' not in cov_trade.vt_symbol:
fund_kline = self.get_fund_kline(cov_trade.strategy_name)
if fund_kline:
fund_kline.update_trade(cov_trade)
# 推送委托数据
order.traded = order.volume
order.status = Status.ALLTRADED
strategy.on_order(order)
# 从字典中删除该限价单
self.active_limit_orders.pop(vt_orderid, None)
# 实时计算模式
self.realtime_calculate()
def convert_spd_trade(self, trade):
"""转换为品种对的交易记录"""
if trade.exchange != Exchange.SPD:
return [trade]
try:
active_symbol, active_rate, passive_symbol, passive_rate, spd_type = trade.symbol.split('-')
active_rate = int(active_rate)
passive_rate = int(passive_rate)
active_exchange = self.get_exchange(active_symbol)
active_vt_symbol = active_symbol + '.' + active_exchange.value
passive_exchange = self.get_exchange(passive_symbol)
# passive_vt_symbol = active_symbol + '.' + passive_exchange.value
# 主动腿成交记录
act_trade = TradeData(gateway_name=self.gateway_name,
symbol=active_symbol,
exchange=active_exchange,
orderid='spd_' + str(trade.orderid),
tradeid='spd_act_' + str(trade.tradeid),
direction=trade.direction,
offset=trade.offset,
strategy_name=trade.strategy_name,
price=self.get_price(active_vt_symbol),
volume=int(trade.volume * active_rate),
time=trade.time,
datetime=trade.datetime
)
# 被动腿成交记录
# 交易方向与spd合约方向相反
pas_trade = TradeData(gateway_name=self.gateway_name,
symbol=passive_symbol,
exchange=passive_exchange,
orderid='spd_' + str(trade.orderid),
tradeid='spd_pas_' + str(trade.tradeid),
direction=Direction.LONG if trade.direction == Direction.SHORT else Direction.SHORT,
offset=trade.offset,
strategy_name=trade.strategy_name,
time=trade.time,
datetime=trade.datetime
)
# 根据套利合约的类型+主合约的价格,反向推导出被动合约的价格
if spd_type == 'BJ':
pas_trade.price = (act_trade.price * active_rate * 100 / trade.price) / passive_rate
else:
pas_trade.price = (act_trade.price * active_rate - trade.price) / passive_rate
pas_trade.price = round_to(value=pas_trade.price, target=self.get_price_tick(pas_trade.vt_symbol))
pas_trade.volume = int(trade.volume * passive_rate)
pas_trade.time = trade.time
# 返回原交易记录,主动腿交易记录,被动腿交易记录
return [trade, act_trade, pas_trade]
except Exception as ex:
self.write_error(u'转换主动/被动腿异常:{}'.format(str(ex)))
return [trade]
def update_pos_buffer(self):
"""更新持仓信息,把今仓=>昨仓"""
for k, v in self.pos_holding_dict.items():
if v.long_td > 0:
self.write_log(u'调整多单持仓:今仓{}=> 0 昨仓{} => 昨仓:{}'.format(v.long_td, v.long_yd, v.long_pos))
v.long_td = 0
v.longYd = v.long_pos
if v.short_td > 0:
self.write_log(u'调整空单持仓:今仓{}=> 0 昨仓{} => 昨仓:{}'.format(v.short_td, v.short_yd, v.short_pos))
v.short_td = 0
v.short_yd = v.short_pos
def get_data_path(self):
"""
获取数据保存目录
:return:
"""
if self.data_path is not None:
data_folder = self.data_path
else:
data_folder = os.path.abspath(os.path.join(os.getcwd(), 'data'))
self.data_path = data_folder
if not os.path.exists(data_folder):
os.makedirs(data_folder)
return data_folder
def get_logs_path(self):
"""
获取日志保存目录
:return:
"""
if self.logs_path is not None:
logs_folder = self.logs_path
else:
logs_folder = os.path.abspath(os.path.join(os.getcwd(), 'log'))
self.logs_path = logs_folder
if not os.path.exists(logs_folder):
os.makedirs(logs_folder)
return logs_folder
def create_logger(self, strategy_name=None, debug=False):
"""
创建日志
:param strategy_name 策略实例名称
:param debug:是否详细记录日志
:return:
"""
if strategy_name is None:
filename = os.path.abspath(os.path.join(self.get_logs_path(), '{}'.format(
self.test_name if len(self.test_name) > 0 else 'portfolio_test')))
print(u'create logger:{}'.format(filename))
self.logger = setup_logger(file_name=filename,
name=self.test_name,
log_level=logging.DEBUG if debug else logging.ERROR,
backtesing=True)
else:
filename = os.path.abspath(
os.path.join(self.get_logs_path(), '{}_{}'.format(self.test_name, str(strategy_name))))
print(u'create logger:{}'.format(filename))
self.strategy_loggers[strategy_name] = setup_logger(file_name=filename,
name=str(strategy_name),
log_level=logging.DEBUG if debug else logging.ERROR,
backtesing=True)
def write_log(self, msg: str, strategy_name: str = None, level: int = logging.DEBUG):
"""记录日志"""
# log = str(self.datetime) + ' ' + content
# self.logList.append(log)
if strategy_name is None:
# 写入本地log日志
if self.logger:
self.logger.log(msg=msg, level=level)
else:
self.create_logger(debug=self.debug)
else:
if strategy_name in self.strategy_loggers:
self.strategy_loggers[strategy_name].log(msg=msg, level=level)
else:
self.create_logger(strategy_name=strategy_name, debug=self.debug)
def write_error(self, msg, strategy_name=None):
"""记录异常"""
if strategy_name is None:
if self.logger:
self.logger.error(msg)
else:
self.create_logger(debug=self.debug)
else:
if strategy_name in self.strategy_loggers:
self.strategy_loggers[strategy_name].error(msg)
else:
self.create_logger(strategy_name=strategy_name, debug=self.debug)
try:
self.strategy_loggers[strategy_name].error(msg)
except Exception as ex:
print('{}'.format(datetime.now()), file=sys.stderr)
print('could not create cta logger for {},excption:{},trace:{}'.format(strategy_name, str(ex),
traceback.format_exc()))
print(msg, file=sys.stderr)
def output(self, content):
"""输出内容"""
print(self.test_name + "\t" + content)
def realtime_calculate(self):
"""实时计算交易结果
支持多空仓位并存"""
if len(self.trade_dict) < 1:
return
# 获取所有未处理得成交单
vt_tradeids = list(self.trade_dict.keys())
result_list = [] # 保存交易记录
longid = ''
shortid = ''
# 对交易记录逐一处理
for vt_tradeid in vt_tradeids:
trade = self.trade_dict.pop(vt_tradeid, None)
if trade is None:
continue
if trade.volume == 0:
continue
# buy trade
if trade.direction == Direction.LONG and trade.offset == Offset.OPEN:
self.write_log(f'{trade.vt_symbol} buy, price:{trade.price},volume:{trade.volume}')
# 放入多单仓位队列
self.long_position_list.append(trade)
# cover trade,
elif trade.direction == Direction.LONG and trade.offset == Offset.CLOSE:
g_id = trade.vt_tradeid # 交易组(多个平仓数为一组)
g_result = None # 组合的交易结果
cover_volume = trade.volume
self.write_log(f'{trade.vt_symbol} cover:{cover_volume}')
while cover_volume > 0:
# 如果当前没有空单,属于异常行为
if len(self.short_position_list) == 0:
self.write_error(u'异常!没有空单持仓,不能cover')
raise Exception(u'异常!没有空单持仓,不能cover')
return
cur_short_pos_list = [s_pos.volume for s_pos in self.short_position_list]
self.write_log(u'{}当前空单:{}'.format(trade.vt_symbol, cur_short_pos_list))
# 来自同一策略,同一合约才能撮合
pop_indexs = [i for i, val in enumerate(self.short_position_list) if
val.vt_symbol == trade.vt_symbol and val.strategy_name == trade.strategy_name]
if len(pop_indexs) < 1:
self.write_error(u'异常,{}没有对应symbol:{}的空单持仓'.format(trade.strategy_name, trade.vt_symbol))
raise Exception(u'realtimeCalculate2() Exception,没有对应symbol:{0}的空单持仓'.format(trade.vt_symbol))
return
pop_index = pop_indexs[0]
# 从未平仓的空头交易
open_trade = self.short_position_list.pop(pop_index)
# 开空volume,不大于平仓volume
if cover_volume >= open_trade.volume:
self.write_log(f'cover volume:{cover_volume}, 满足:{open_trade.volume}')
cover_volume = cover_volume - open_trade.volume
if cover_volume > 0:
self.write_log(u'剩余待平数量:{}'.format(cover_volume))
self.write_log(
f'{open_trade.vt_symbol} coverd, price: {trade.price},volume:{open_trade.volume}')
result = TradingResult(open_price=open_trade.price,
open_datetime=open_trade.datetime,
exit_price=trade.price,
close_datetime=trade.datetime,
volume=-open_trade.volume,
rate=self.get_commission_rate(trade.vt_symbol),
slippage=self.get_slippage(trade.vt_symbol),
size=self.get_size(trade.vt_symbol),
group_id=g_id,
fix_commission=self.get_fix_commission(trade.vt_symbol))
t = OrderedDict()
t['gid'] = g_id
t['strategy'] = open_trade.strategy_name
t['vt_symbol'] = open_trade.vt_symbol
t['open_time'] = open_trade.time
t['open_price'] = open_trade.price
t['direction'] = u'Short'
t['close_time'] = trade.time
t['close_price'] = trade.price
t['volume'] = open_trade.volume
t['profit'] = result.pnl
t['commission'] = result.commission
self.trade_pnl_list.append(t)
# 非自定义套利对,才更新到策略盈亏
if not open_trade.vt_symbol.endswith('SPD'):
# 更新策略实例的累加盈亏
self.pnl_strategy_dict.update(
{open_trade.strategy_name: self.pnl_strategy_dict.get(open_trade.strategy_name,
0) + result.pnl})
msg = u'gid:{} {}[{}:开空tid={}:{}]-[{}.平空tid={},{},vol:{}],净盈亏pnl={},手续费:{}' \
.format(g_id, open_trade.vt_symbol, open_trade.time, shortid, open_trade.price,
trade.time, vt_tradeid, trade.price,
open_trade.volume, result.pnl, result.commission)
self.write_log(msg)
result_list.append(result)
if g_result is None:
if cover_volume > 0:
# 属于组合
g_result = copy.deepcopy(result)
else:
# 更新组合的数据
g_result.turnover = g_result.turnover + result.turnover
g_result.commission = g_result.commission + result.commission
g_result.slippage = g_result.slippage + result.slippage
g_result.pnl = g_result.pnl + result.pnl
# 所有仓位平完
if cover_volume == 0:
self.write_log(u'所有平空仓位撮合完毕')
g_result.volume = abs(trade.volume)
# 开空volume,大于平仓volume,需要更新减少tradeDict的数量。
else:
remain_volume = open_trade.volume - cover_volume
self.write_log(f'{open_trade.vt_symbol} short pos: {open_trade.volume} => {remain_volume}')
result = TradingResult(open_price=open_trade.price,
open_datetime=open_trade.datetime,
exit_price=trade.price,
close_datetime=trade.datetime,
volume=-cover_volume,
rate=self.get_commission_rate(trade.vt_symbol),
slippage=self.get_slippage(trade.vt_symbol),
size=self.get_size(trade.vt_symbol),
group_id=g_id,
fix_commission=self.get_fix_commission(trade.vt_symbol))
t = OrderedDict()
t['gid'] = g_id
t['strategy'] = open_trade.strategy_name
t['vt_symbol'] = open_trade.vt_symbol
t['open_time'] = open_trade.time
t['open_price'] = open_trade.price
t['direction'] = u'Short'
t['close_time'] = trade.time
t['close_price'] = trade.price
t['volume'] = cover_volume
t['profit'] = result.pnl
t['commission'] = result.commission
self.trade_pnl_list.append(t)
# 非自定义套利对,才更新盈亏
if not (open_trade.vt_symbol.endswith('SPD') or open_trade.vt_symbol.endswith('SPD99')):
# 更新策略实例的累加盈亏
self.pnl_strategy_dict.update(
{open_trade.strategy_name: self.pnl_strategy_dict.get(open_trade.strategy_name,
0) + result.pnl})
msg = u'gid:{} {}[{}:开空tid={}:{}]-[{}.平空tid={},{},vol:{}],净盈亏pnl={},手续费:{}' \
.format(g_id, open_trade.vt_symbol, open_trade.time, shortid, open_trade.price,
trade.time, vt_tradeid, trade.price,
cover_volume, result.pnl, result.commission)
self.write_log(msg)
# 更新(减少)开仓单的volume,重新推进开仓单列表中
open_trade.volume = remain_volume
self.write_log(u'更新(减少)开仓单的volume,重新推进开仓单列表中:{}'.format(open_trade.volume))
self.short_position_list.append(open_trade)
cur_short_pos_list = [s_pos.volume for s_pos in self.short_position_list]
self.write_log(u'当前空单:{}'.format(cur_short_pos_list))
cover_volume = 0
result_list.append(result)
if g_result is not None:
# 更新组合的数据
g_result.turnover = g_result.turnover + result.turnover
g_result.commission = g_result.commission + result.commission
g_result.slippage = g_result.slippage + result.slippage
g_result.pnl = g_result.pnl + result.pnl
g_result.volume = abs(trade.volume)
if g_result is not None:
self.write_log(u'组合净盈亏:{0}'.format(g_result.pnl))
# Short Trade
elif trade.direction == Direction.SHORT and trade.offset == Offset.OPEN:
self.write_log(f'{trade.vt_symbol}, short: price:{trade.price},volume{trade.volume}')
self.short_position_list.append(trade)
continue
# sell trade
elif trade.direction == Direction.SHORT and trade.offset == Offset.CLOSE:
g_id = trade.vt_tradeid # 交易组(多个平仓数为一组)
g_result = None # 组合的交易结果
sell_volume = trade.volume
while sell_volume > 0:
if len(self.long_position_list) == 0:
self.write_error(f'异常,没有{trade.vt_symbol}的多仓')
raise RuntimeError(u'realtimeCalculate2() Exception,没有开多单')
return
pop_indexs = [i for i, val in enumerate(self.long_position_list) if
val.vt_symbol == trade.vt_symbol and val.strategy_name == trade.strategy_name]
if len(pop_indexs) < 1:
self.write_error(f'没有{trade.strategy_name}对应的symbol{trade.vt_symbol}多单数据,')
raise RuntimeError(
f'realtimeCalculate2() Exception,没有对应的symbol{trade.vt_symbol}多单数据,')
return
cur_long_pos_list = [s_pos.volume for s_pos in self.long_position_list]
self.write_log(u'{}当前多单:{}'.format(trade.vt_symbol, cur_long_pos_list))
pop_index = pop_indexs[0]
open_trade = self.long_position_list.pop(pop_index)
# 开多volume,不大于平仓volume
if sell_volume >= open_trade.volume:
self.write_log(f'{open_trade.vt_symbol},Sell Volume:{sell_volume} 满足:{open_trade.volume}')
sell_volume = sell_volume - open_trade.volume
self.write_log(f'{open_trade.vt_symbol},sell, price:{trade.price},volume:{open_trade.volume}')
result = TradingResult(open_price=open_trade.price,
open_datetime=open_trade.datetime,
exit_price=trade.price,
close_datetime=trade.datetime,
volume=open_trade.volume,
rate=self.get_commission_rate(trade.vt_symbol),
slippage=self.get_slippage(trade.vt_symbol),
size=self.get_size(trade.vt_symbol),
group_id=g_id,
fix_commission=self.get_fix_commission(trade.vt_symbol))
t = OrderedDict()
t['gid'] = g_id
t['strategy'] = open_trade.strategy_name
t['vt_symbol'] = open_trade.vt_symbol
t['open_time'] = open_trade.time
t['open_price'] = open_trade.price
t['direction'] = u'Long'
t['close_time'] = trade.time
t['close_price'] = trade.price
t['volume'] = open_trade.volume
t['profit'] = result.pnl
t['commission'] = result.commission
self.trade_pnl_list.append(t)
# 非自定义套利对,才更新盈亏
if not (open_trade.vt_symbol.endswith('SPD') or open_trade.vt_symbol.endswith('SPD99')):
# 更新策略实例的累加盈亏
self.pnl_strategy_dict.update(
{open_trade.strategy_name: self.pnl_strategy_dict.get(open_trade.strategy_name,
0) + result.pnl})
msg = u'gid:{} {}[{}:开多tid={}:{}]-[{}.平多tid={},{},vol:{}],净盈亏pnl={},手续费:{}' \
.format(g_id, open_trade.vt_symbol,
open_trade.time, longid, open_trade.price,
trade.time, vt_tradeid, trade.price,
open_trade.volume, result.pnl, result.commission)
self.write_log(msg)
result_list.append(result)
if g_result is None:
if sell_volume > 0:
# 属于组合
g_result = copy.deepcopy(result)
else:
# 更新组合的数据
g_result.turnover = g_result.turnover + result.turnover
g_result.commission = g_result.commission + result.commission
g_result.slippage = g_result.slippage + result.slippage
g_result.pnl = g_result.pnl + result.pnl
if sell_volume == 0:
g_result.volume = abs(trade.volume)
# 开多volume,大于平仓volume,需要更新减少tradeDict的数量。
else:
remain_volume = open_trade.volume - sell_volume
self.write_log(f'{open_trade.vt_symbol} short pos: {open_trade.volume} => {remain_volume}')
result = TradingResult(open_price=open_trade.price,
open_datetime=open_trade.datetime,
exit_price=trade.price,
close_datetime=trade.datetime,
volume=sell_volume,
rate=self.get_commission_rate(trade.vt_symbol),
slippage=self.get_slippage(trade.vt_symbol),
size=self.get_size(trade.vt_symbol),
group_id=g_id,
fix_commission=self.get_fix_commission(trade.vt_symbol))
t = OrderedDict()
t['gid'] = g_id
t['strategy'] = open_trade.strategy_name
t['vt_symbol'] = open_trade.vt_symbol
t['open_time'] = open_trade.time
t['open_price'] = open_trade.price
t['direction'] = u'Long'
t['close_time'] = trade.time
t['close_price'] = trade.price
t['volume'] = sell_volume
t['profit'] = result.pnl
t['commission'] = result.commission
self.trade_pnl_list.append(t)
# 非自定义套利对,才更新盈亏
if not (open_trade.vt_symbol.endswith('SPD') or open_trade.vt_symbol.endswith('SPD99')):
# 更新策略实例的累加盈亏
self.pnl_strategy_dict.update(
{open_trade.strategy_name: self.pnl_strategy_dict.get(open_trade.strategy_name,
0) + result.pnl})
msg = u'Gid:{} {}[{}:开多tid={}:{}]-[{}.平多tid={},{},vol:{}],净盈亏pnl={},手续费:{}' \
.format(g_id, open_trade.vt_symbol, open_trade.time, longid, open_trade.price,
trade.time, vt_tradeid, trade.price, sell_volume, result.pnl,
result.commission)
self.write_log(msg)
# 减少开多volume,重新推进多单持仓列表中
open_trade.volume = remain_volume
self.long_position_list.append(open_trade)
sell_volume = 0
result_list.append(result)
if g_result is not None:
# 更新组合的数据
g_result.turnover = g_result.turnover + result.turnover
g_result.commission = g_result.commission + result.commission
g_result.slippage = g_result.slippage + result.slippage
g_result.pnl = g_result.pnl + result.pnl
g_result.volume = abs(trade.volume)
if g_result is not None:
self.write_log(u'组合净盈亏:{0}'.format(g_result.pnl))
# 计算仓位比例
occupy_money = 0.0 # 保证金
occupy_long_money_dict = {} # 多单保证金,key为合约短号,value为保证金
occupy_short_money_dict = {} # 空单保证金,key为合约短号,value为保证金
occupy_underly_symbol_set = set() # 所有合约短号
long_pos_dict = {}
short_pos_dict = {}
if len(self.long_position_list) > 0:
for t in self.long_position_list:
# 不计算套利合约的持仓占用保证金
if t.vt_symbol.endswith('SPD') or t.vt_symbol.endswith('SPD99'):
continue
# 当前持仓的保证金
if self.use_margin:
cur_occupy_money = t.price * abs(t.volume) * self.get_size(t.vt_symbol) * self.get_margin_rate(
t.vt_symbol)
else:
cur_occupy_money = self.get_price(t.vt_symbol) * abs(t.volume) * self.get_size(
t.vt_symbol) * self.get_margin_rate(t.vt_symbol)
# 更新该合约短号的累计保证金
underly_symbol = get_underlying_symbol(t.symbol)
occupy_underly_symbol_set.add(underly_symbol)
occupy_long_money_dict.update(
{underly_symbol: occupy_long_money_dict.get(underly_symbol, 0) + cur_occupy_money})
if t.vt_symbol in long_pos_dict:
long_pos_dict[t.vt_symbol] += abs(t.volume)
else:
long_pos_dict[t.vt_symbol] = abs(t.volume)
if len(self.short_position_list) > 0:
for t in self.short_position_list:
# 不计算套利合约的持仓占用保证金
if t.vt_symbol.endswith('SPD') or t.vt_symbol.endswith('SPD99'):
continue
# 当前空单保证金
if self.use_margin:
cur_occupy_money = max(self.get_price(t.vt_symbol), t.price) * abs(t.volume) * self.get_size(
t.vt_symbol) * self.get_margin_rate(t.vt_symbol)
else:
cur_occupy_money = self.get_price(t.vt_symbol) * abs(t.volume) * self.get_size(
t.vt_symbol) * self.get_margin_rate(t.vt_symbol)
# 该合约短号的累计空单保证金
underly_symbol = get_underlying_symbol(t.symbol)
occupy_underly_symbol_set.add(underly_symbol)
occupy_short_money_dict.update(
{underly_symbol: occupy_short_money_dict.get(underly_symbol, 0) + cur_occupy_money})
if t.vt_symbol in short_pos_dict:
short_pos_dict[t.vt_symbol] += abs(t.volume)
else:
short_pos_dict[t.vt_symbol] = abs(t.volume)
# 计算多空的保证金累加(对锁的取最大值)
for underly_symbol in occupy_underly_symbol_set:
occupy_money += max(occupy_long_money_dict.get(underly_symbol, 0),
occupy_short_money_dict.get(underly_symbol, 0))
# 可用资金 = 当前净值 - 占用保证金
self.avaliable = self.net_capital - occupy_money
# 当前保证金占比
self.percent = round(float(occupy_money * 100 / self.net_capital), 2)
# 更新最大保证金占比
self.max_occupy_rate = max(self.max_occupy_rate, self.percent)
# 检查是否有平交易
if len(result_list) == 0:
msg = u''
if len(self.long_position_list) > 0:
msg += u'持多仓{0},'.format(str(long_pos_dict))
if len(self.short_position_list) > 0:
msg += u'持空仓{0},'.format(str(short_pos_dict))
msg += u'资金占用:{0},仓位:{1}%%'.format(occupy_money, self.percent)
self.write_log(msg)
return
# 对交易结果汇总统计
for result in result_list:
if result.pnl > 0:
self.winning_result += 1
self.total_winning += result.pnl
else:
self.losing_result += 1
self.total_losing += result.pnl
self.cur_capital += result.pnl
self.max_capital = max(self.cur_capital, self.max_capital)
self.net_capital = max(self.net_capital, self.cur_capital)
self.max_net_capital = max(self.net_capital, self.max_net_capital)
# self.maxVolume = max(self.maxVolume, result.volume)
drawdown = self.net_capital - self.max_net_capital
drawdown_rate = round(float(drawdown * 100 / self.max_net_capital), 4)
self.pnl_list.append(result.pnl)
self.time_list.append(result.close_datetime)
self.capital_list.append(self.cur_capital)
self.drawdown_list.append(drawdown)
self.drawdown_rate_list.append(drawdown_rate)
self.total_trade_count += 1
self.total_turnover += result.turnover
self.total_commission += result.commission
self.total_slippage += result.slippage
msg = u'[gid:{}] {} 交易盈亏:{},交易手续费:{}回撤:{}/{},账号平仓权益:{},持仓权益:{},累计手续费:{}' \
.format(result.group_id, result.close_datetime, result.pnl, result.commission, drawdown,
drawdown_rate, self.cur_capital, self.net_capital, self.total_commission)
self.write_log(msg)
# 重新计算一次avaliable
self.avaliable = self.net_capital - occupy_money
self.percent = round(float(occupy_money * 100 / self.net_capital), 2)
def saving_daily_data(self, d, c, m, commission, benchmark=0):
"""保存每日数据"""
data = {}
data['date'] = d.strftime('%Y/%m/%d') # 日期
data['capital'] = c # 当前平仓净值
data['max_capital'] = m # 之前得最高净值
today_holding_profit = 0 # 持仓浮盈
long_pos_occupy_money = 0
short_pos_occupy_money = 0
strategy_pnl = {}
for strategy in self.strategies.keys():
strategy_pnl.update({strategy: self.pnl_strategy_dict.get(strategy, 0)})
positionMsg = ""
for longpos in self.long_position_list:
# 不计算套利合约的持仓盈亏
if longpos.vt_symbol.endswith('SPD') or longpos.vt_symbol.endswith('SPD99'):
continue
symbol = longpos.vt_symbol
# 计算持仓浮盈浮亏/占用保证金
holding_profit = 0
last_price = self.get_price(symbol)
if last_price is not None:
holding_profit = (last_price - longpos.price) * longpos.volume * self.get_size(symbol)
long_pos_occupy_money += last_price * abs(longpos.volume) * self.get_size(
symbol) * self.get_margin_rate(symbol)
# 账号的持仓盈亏
today_holding_profit += holding_profit
# 计算每个策略实例的持仓盈亏
strategy_pnl.update({longpos.strategy_name: strategy_pnl.get(longpos.strategy_name, 0) + holding_profit})
positionMsg += "{},long,p={},v={},m={};".format(symbol, longpos.price, longpos.volume, holding_profit)
for shortpos in self.short_position_list:
# 不计算套利合约的持仓盈亏
if shortpos.vt_symbol.endswith('SPD') or shortpos.vt_symbol.endswith('SPD99'):
continue
symbol = shortpos.vt_symbol
# 计算持仓浮盈浮亏/占用保证金
holding_profit = 0
last_price = self.get_price(symbol)
if last_price is not None:
holding_profit = (shortpos.price - last_price) * shortpos.volume * self.get_size(symbol)
short_pos_occupy_money += last_price * abs(shortpos.volume) * self.get_size(
symbol) * self.get_margin_rate(symbol)
# 账号的持仓盈亏
today_holding_profit += holding_profit
# 计算每个策略实例的持仓盈亏
strategy_pnl.update({shortpos.strategy_name: strategy_pnl.get(shortpos.strategy_name, 0) + holding_profit})
positionMsg += "{},short,p={},v={},m={};".format(symbol, shortpos.price, shortpos.volume, holding_profit)
data['net'] = c + today_holding_profit # 当日净值(含持仓盈亏)
data['rate'] = (c + today_holding_profit) / self.init_capital
data['occupy_money'] = max(long_pos_occupy_money, short_pos_occupy_money)
data['occupy_rate'] = data['occupy_money'] / data['capital']
data['commission'] = commission
data.update(self.price_dict)
data.update(strategy_pnl)
self.daily_list.append(data)
# 更新每日浮动净值
self.net_capital = data['net']
# 更新最大初次持仓浮盈净值
if data['net'] > self.max_net_capital:
self.max_net_capital = data['net']
self.max_net_capital_time = data['date']
drawdown_rate = round((float(self.max_net_capital - data['net']) * 100) / self.max_net_capital, 4)
if drawdown_rate > self.daily_max_drawdown_rate:
self.daily_max_drawdown_rate = drawdown_rate
self.max_drawdown_rate_time = data['date']
msg = u'{}: net={}, capital={} max={} margin={} commission={}, pos: {}' \
.format(data['date'],
data['net'], c, m,
today_holding_profit,
commission,
positionMsg)
if not self.debug:
self.output(msg)
else:
self.write_log(msg)
# 今仓 =》 昨仓
for holding in self.holdings.values():
if holding.long_td > 0:
self.write_log(
f'{holding.vt_symbol} 多单今仓{holding.long_td},昨仓:{holding.long_yd}=> 昨仓:{holding.long_pos}')
holding.long_td = 0
holding.long_yd = holding.long_pos
if holding.short_td > 0:
self.write_log(
f'{holding.vt_symbol} 空单今仓{holding.short_td},昨仓:{holding.short_yd}=> 昨仓:{holding.short_pos}')
holding.short_td = 0
holding.short_yd = holding.short_pos
# ---------------------------------------------------------------------
def export_trade_result(self):
"""
导出交易结果(开仓-》平仓, 平仓收益)
导出每日净值结果表
:return:
"""
if len(self.trade_pnl_list) == 0:
self.write_log('no traded records')
return
s = self.test_name.replace('&', '')
s = s.replace(' ', '')
trade_list_csv_file = os.path.abspath(os.path.join(self.get_logs_path(), '{}_trade_list.csv'.format(s)))
self.write_log(u'save trade records to:{}'.format(trade_list_csv_file))
import csv
csv_write_file = open(trade_list_csv_file, 'w', encoding='utf8', newline='')
fieldnames = ['gid', 'strategy', 'vt_symbol', 'open_time', 'open_price', 'direction', 'close_time',
'close_price',
'volume', 'profit', 'commission']
writer = csv.DictWriter(f=csv_write_file, fieldnames=fieldnames, dialect='excel')
writer.writeheader()
for row in self.trade_pnl_list:
writer.writerow(row)
# 导出每日净值记录表
if not self.daily_list:
return
if self.daily_report_name == '':
daily_csv_file = os.path.abspath(os.path.join(self.get_logs_path(), '{}_daily_list.csv'.format(s)))
else:
daily_csv_file = self.daily_report_name
self.write_log(u'save daily records to:{}'.format(daily_csv_file))
csv_write_file2 = open(daily_csv_file, 'w', encoding='utf8', newline='')
fieldnames = ['date', 'capital', 'net', 'max_capital', 'rate', 'commission', 'long_money', 'short_money',
'occupy_money', 'occupy_rate', 'today_margin_long', 'today_margin_short']
# 添加合约的每日close价
fieldnames.extend(sorted(self.price_dict.keys()))
# 添加策略列表
fieldnames.extend(sorted(self.strategies.keys()))
writer2 = csv.DictWriter(f=csv_write_file2, fieldnames=fieldnames, dialect='excel')
writer2.writeheader()
for row in self.daily_list:
writer2.writerow(row)
if self.is_plot_daily:
# 生成净值曲线图片
df = pd.DataFrame(self.daily_list)
df = df.set_index('date')
from vnpy.trader.utility import display_dual_axis
plot_file = os.path.abspath(os.path.join(self.get_logs_path(), '{}_plot.png'.format(s)))
# 双坐标输出,左侧坐标是净值(比率),右侧是各策略的实际资金收益曲线
display_dual_axis(df=df, columns1=['rate'], columns2=list(self.strategies.keys()), image_name=plot_file)
return
def get_result(self):
# 返回回测结果
d = {}
d['init_capital'] = self.init_capital
d['profit'] = self.cur_capital - self.init_capital
d['max_capital'] = self.max_net_capital # 取消原 maxCapital
if len(self.pnl_list) == 0:
return {}, [], []
d['max_pnl'] = max(self.pnl_list)
d['min_pnl'] = min(self.pnl_list)
d['max_occupy_rate'] = self.max_occupy_rate
d['total_trade_count'] = self.total_trade_count
d['total_turnover'] = self.total_turnover
d['total_commission'] = self.total_commission
d['total_slippage'] = self.total_slippage
d['time_list'] = self.time_list
d['pnl_list'] = self.pnl_list
d['capital_list'] = self.capital_list
d['drawdown_list'] = self.drawdown_list
d['drawdown_rate_list'] = self.drawdown_rate_list # 净值最大回撤率列表
d['winning_rate'] = round(100 * self.winning_result / len(self.pnl_list), 4)
average_winning = 0 # 这里把数据都初始化为0
average_losing = 0
profit_loss_ratio = 0
if self.winning_result:
average_winning = self.total_winning / self.winning_result # 平均每笔盈利
if self.losing_result:
average_losing = self.total_losing / self.losing_result # 平均每笔亏损
if average_losing:
profit_loss_ratio = -average_winning / average_losing # 盈亏比
d['average_winning'] = average_winning
d['average_losing'] = average_losing
d['profit_loss_ratio'] = profit_loss_ratio
# 计算Sharp
if not self.daily_list:
return {}, [], []
capital_net_list = []
capital_list = []
for row in self.daily_list:
capital_net_list.append(row['net'])
capital_list.append(row['capital'])
capital = pd.Series(capital_net_list)
log_returns = np.log(capital).diff().fillna(0)
sharpe = (log_returns.mean() * 252) / (log_returns.std() * np.sqrt(252))
d['sharpe'] = sharpe
return d, capital_net_list, capital_list
def show_backtesting_result(self):
"""显示回测结果"""
d, daily_net_capital, daily_capital = self.get_result()
if len(d) == 0:
self.output(u'无交易结果')
return {}, ''
# 导出交易清单
self.export_trade_result()
result_info = OrderedDict()
# 输出
self.output('-' * 30)
result_info.update({u'第一笔交易': str(d['time_list'][0])})
self.output(u'第一笔交易:\t%s' % d['time_list'][0])
result_info.update({u'最后一笔交易': str(d['time_list'][-1])})
self.output(u'最后一笔交易:\t%s' % d['time_list'][-1])
result_info.update({u'总交易次数': d['total_trade_count']})
self.output(u'总交易次数:\t%s' % format_number(d['total_trade_count']))
result_info.update({u'期初资金': d['init_capital']})
self.output(u'期初资金:\t%s' % format_number(d['init_capital']))
result_info.update({u'总盈亏': d['profit']})
self.output(u'总盈亏:\t%s' % format_number(d['profit']))
result_info.update({u'资金最高净值': d['max_capital']})
self.output(u'资金最高净值:\t%s' % format_number(d['max_capital']))
result_info.update({u'资金最高净值时间': str(self.max_net_capital_time)})
self.output(u'资金最高净值时间:\t%s' % self.max_net_capital_time)
result_info.update({u'每笔最大盈利': d['max_pnl']})
self.output(u'每笔最大盈利:\t%s' % format_number(d['max_pnl']))
result_info.update({u'每笔最大亏损': d['min_pnl']})
self.output(u'每笔最大亏损:\t%s' % format_number(d['min_pnl']))
result_info.update({u'净值最大回撤': min(d['drawdown_list'])})
self.output(u'净值最大回撤: \t%s' % format_number(min(d['drawdown_list'])))
result_info.update({u'净值最大回撤率': self.daily_max_drawdown_rate})
self.output(u'净值最大回撤率: \t%s' % format_number(self.daily_max_drawdown_rate))
result_info.update({u'净值最大回撤时间': str(self.max_drawdown_rate_time)})
self.output(u'净值最大回撤时间:\t%s' % self.max_drawdown_rate_time)
result_info.update({u'胜率': d['winning_rate']})
self.output(u'胜率:\t%s' % format_number(d['winning_rate']))
result_info.update({u'盈利交易平均值': d['average_winning']})
self.output(u'盈利交易平均值\t%s' % format_number(d['average_winning']))
result_info.update({u'亏损交易平均值': d['average_losing']})
self.output(u'亏损交易平均值\t%s' % format_number(d['average_losing']))
result_info.update({u'盈亏比': d['profit_loss_ratio']})
self.output(u'盈亏比:\t%s' % format_number(d['profit_loss_ratio']))
result_info.update({u'最大资金占比': d['max_occupy_rate']})
self.output(u'最大资金占比:\t%s' % format_number(d['max_occupy_rate']))
result_info.update({u'平均每笔盈利': d['profit'] / d['total_trade_count']})
self.output(u'平均每笔盈利:\t%s' % format_number(d['profit'] / d['total_trade_count']))
result_info.update({u'平均每笔滑点成本': d['total_slippage'] / d['total_trade_count']})
self.output(u'平均每笔滑点成本:\t%s' % format_number(d['total_slippage'] / d['total_trade_count']))
result_info.update({u'平均每笔佣金': d['total_commission'] / d['total_trade_count']})
self.output(u'平均每笔佣金:\t%s' % format_number(d['total_commission'] / d['total_trade_count']))
result_info.update({u'Sharpe Ratio': d['sharpe']})
self.output(u'Sharpe Ratio:\t%s' % format_number(d['sharpe']))
# 保存回测结果/交易记录/日线统计 至数据库
self.save_result_to_mongo(result_info)
return result_info
def save_setting_to_mongo(self):
""" 保存测试设置到mongo中"""
self.task_id = self.test_setting.get('task_id', str(uuid1()))
# 保存到mongo得配置
save_mongo = self.test_setting.get('save_mongo', {})
if len(save_mongo) == 0:
return
if not self.mongo_api:
self.mongo_api = MongoData(host=save_mongo.get('host', 'localhost'), port=save_mongo.get('port', 27017))
d = {
'task_id': self.task_id, # 单实例回测任务id
'name': self.test_name, # 回测实例名称, 策略名+参数+时间
'group_id': self.test_setting.get('group_id', datetime.now().strftime('%y-%m-%d')), # 回测组合id
'status': 'start',
'task_start_time': datetime.now(), # 任务开始执行时间
'run_host': socket.gethostname(), # 任务运行得host主机
'test_setting': self.test_setting, # 回测参数
'strategy_setting': self.strategy_setting, # 策略参数
}
# 保存入数据库
self.mongo_api.db_insert(
db_name=self.gateway_name,
col_name='tasks',
d=d)
def save_fail_to_mongo(self, fail_msg):
# 保存到mongo得配置
save_mongo = self.test_setting.get('save_mongo', {})
if len(save_mongo) == 0:
return
if not self.mongo_api:
self.mongo_api = MongoData(host=save_mongo.get('host', 'localhost'), port=save_mongo.get('port', 27017))
# 更新数据到数据库回测记录中
flt = {'task_id': self.task_id}
d = self.mongo_api.db_query_one(
db_name=self.gateway_name,
col_name='tasks',
flt=flt)
if d:
d.update({'status': 'fail'}) # 更新状态未完成
d.update({'fail_msg': fail_msg})
self.write_log(u'更新回测结果至数据库')
self.mongo_api.db_update(
db_name=self.gateway_name,
col_name='tasks',
filter_dict=flt,
data_dict=d,
replace=False)
def save_result_to_mongo(self, result_info):
# 保存到mongo得配置
save_mongo = self.test_setting.get('save_mongo', {})
if len(save_mongo) == 0:
return
if not self.mongo_api:
self.mongo_api = MongoData(host=save_mongo.get('host', 'localhost'), port=save_mongo.get('port', 27017))
# 更新数据到数据库回测记录中
flt = {'task_id': self.task_id}
d = self.mongo_api.db_query_one(
db_name=self.gateway_name,
col_name='tasks',
flt=flt)
if d:
d.update({'status': 'finish'}) # 更新状态未完成
d.update(result_info) # 补充回测结果
d.update({'task_finish_time': datetime.now()}) # 更新回测完成时间
d.update({'trade_list': binary.Binary(zlib.compress(pickle.dumps(self.trade_pnl_list)))}) # 更新交易记录
d.update({'daily_list': binary.Binary(zlib.compress(pickle.dumps(self.daily_list)))}) # 更新每日净值记录
self.write_log(u'更新回测结果至数据库')
self.mongo_api.db_update(
db_name=self.gateway_name,
col_name='tasks',
filter_dict=flt,
data_dict=d,
replace=False)
def put_strategy_event(self, strategy: CtaTemplate):
"""发送策略更新事件,回测中忽略"""
pass
def clear_backtesting_result(self):
"""清空之前回测的结果"""
# 清空限价单相关
self.limit_order_count = 0
self.limit_orders.clear()
self.active_limit_orders.clear()
# 清空成交相关
self.trade_count = 0
self.trade_dict.clear()
self.trades.clear()
self.trade_pnl_list = []
def append_trade(self, trade: TradeData):
"""
根据策略名称,写入 logs\test_name_straetgy_name_trade.csv文件
:param trade:
:return:
"""
strategy_name = getattr(trade, 'strategy', self.test_name)
trade_fields = ['symbol', 'exchange', 'vt_symbol', 'tradeid',
'vt_tradeid', 'orderid', 'vt_orderid',
'direction',
'offset', 'price', 'volume', 'time']
d = OrderedDict()
try:
for k in trade_fields:
if k in ['exchange', 'direction', 'offset']:
d[k] = getattr(trade, k).value
else:
d[k] = getattr(trade, k, '')
trade_file = os.path.abspath(os.path.join(self.get_logs_path(), '{}_trade.csv'.format(strategy_name)))
self.append_data(file_name=trade_file, dict_data=d)
except Exception as ex:
self.write_error(u'写入交易记录csv出错:{},{}'.format(str(ex), traceback.format_exc()))
# 保存记录相关
def append_data(self, file_name: str, dict_data: OrderedDict, field_names: list = None):
"""
添加数据到csv文件中
:param file_name: csv的文件全路径
:param dict_data: OrderedDict
:return:
"""
if field_names is None or field_names == []:
dict_fieldnames = list(dict_data.keys())
else:
dict_fieldnames = field_names
try:
if not os.path.exists(file_name):
self.write_log(u'create csv file:{}'.format(file_name))
with open(file_name, 'a', encoding='utf8', newline='') as csvWriteFile:
writer = csv.DictWriter(f=csvWriteFile, fieldnames=dict_fieldnames, dialect='excel')
self.write_log(u'write csv header:{}'.format(dict_fieldnames))
writer.writeheader()
writer.writerow(dict_data)
else:
with open(file_name, 'a', encoding='utf8', newline='') as csvWriteFile:
writer = csv.DictWriter(f=csvWriteFile, fieldnames=dict_fieldnames, dialect='excel',
extrasaction='ignore')
writer.writerow(dict_data)
except Exception as ex:
self.write_error(u'append_data exception:{}'.format(str(ex)))
########################################################################
class TradingResult(object):
"""每笔交易的结果"""
def __init__(self, open_price, open_datetime, exit_price, close_datetime, volume, rate, slippage, size, group_id,
fix_commission=0.0):
"""Constructor"""
self.open_price = open_price # 开仓价格
self.exit_price = exit_price # 平仓价格
self.open_datetime = open_datetime # 开仓时间datetime
self.close_datetime = close_datetime # 平仓时间
self.volume = volume # 交易数量(+/-代表方向)
self.group_id = group_id # 主交易ID(针对多手平仓)
self.turnover = (self.open_price + self.exit_price) * size * abs(volume) # 成交金额
if fix_commission > 0:
self.commission = fix_commission * abs(self.volume)
else:
self.commission = abs(self.turnover * rate) # 手续费成本
self.slippage = slippage * 2 * size * abs(volume) # 滑点成本
self.pnl = ((self.exit_price - self.open_price) * volume * size
- self.commission - self.slippage) # 净盈亏
|
the-stack_106_14038
|
import importlib.util
import os
def vyLoadModuleFromFilePath(filePath, moduleName=None):
if moduleName == None:
replacements = [
('/', '.'),
('\\', '.'),
('-', '_'),
(' ', '_'),
]
filePathSansExt = os.path.splitext(filePath)[0]
for issue, replacement in replacements:
filePathSansExt = filePathSansExt.replace(issue, replacement)
moduleName = filePathSansExt
spec = importlib.util.spec_from_file_location(moduleName, filePath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
|
the-stack_106_14040
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Quantum information utility functions for states.
"""
import numpy as np
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.states.statevector import Statevector
from qiskit.quantum_info.states.densitymatrix import DensityMatrix
from qiskit.quantum_info.operators.channel import SuperOp
def partial_trace(state, qargs):
"""Return reduced density matrix by tracing out part of quantum state.
If all subsystems are traced over this returns the
:meth:`~qiskit.quantum_info.DensityMatrix.trace` of the
input state.
Args:
state (Statevector or DensityMatrix): the input state.
qargs (list): The subsystems to trace over.
Returns:
DensityMatrix: The reduced density matrix.
Raises:
QiskitError: if input state is invalid.
"""
state = _format_state(state, validate=False)
# Compute traced shape
traced_shape = state._op_shape.remove(qargs=qargs)
# Convert vector shape to matrix shape
traced_shape._dims_r = traced_shape._dims_l
traced_shape._num_qargs_r = traced_shape._num_qargs_l
# If we are tracing over all subsystems we return the trace
if traced_shape.size == 0:
return state.trace()
# Statevector case
if isinstance(state, Statevector):
trace_systems = len(state._op_shape.dims_l()) - 1 - np.array(qargs)
arr = state._data.reshape(state._op_shape.tensor_shape)
rho = np.tensordot(arr, arr.conj(), axes=(trace_systems, trace_systems))
rho = np.reshape(rho, traced_shape.shape)
return DensityMatrix(rho, dims=traced_shape._dims_l)
# Density matrix case
# Empty partial trace case.
if not qargs:
return state.copy()
# Trace first subsystem to avoid coping whole density matrix
dims = state.dims(qargs)
tr_op = SuperOp(np.eye(dims[0]).reshape(1, dims[0] ** 2), input_dims=[dims[0]], output_dims=[1])
ret = state.evolve(tr_op, [qargs[0]])
# Trace over remaining subsystems
for qarg, dim in zip(qargs[1:], dims[1:]):
tr_op = SuperOp(np.eye(dim).reshape(1, dim**2), input_dims=[dim], output_dims=[1])
ret = ret.evolve(tr_op, [qarg])
# Remove traced over subsystems which are listed as dimension 1
ret._op_shape = traced_shape
return ret
def shannon_entropy(pvec, base=2):
r"""Compute the Shannon entropy of a probability vector.
The shannon entropy of a probability vector
:math:`\vec{p} = [p_0, ..., p_{n-1}]` is defined as
.. math::
H(\vec{p}) = \sum_{i=0}^{n-1} p_i \log_b(p_i)
where :math:`b` is the log base and (default 2), and
:math:`0 \log_b(0) \equiv 0`.
Args:
pvec (array_like): a probability vector.
base (int): the base of the logarithm [Default: 2].
Returns:
float: The Shannon entropy H(pvec).
"""
if base == 2:
def logfn(x):
return -x * np.log2(x)
elif base == np.e:
def logfn(x):
return -x * np.log(x)
else:
def logfn(x):
return -x * np.log(x) / np.log(base)
h_val = 0.0
for x in pvec:
if 0 < x < 1:
h_val += logfn(x)
return h_val
def _format_state(state, validate=True):
"""Format input state into class object"""
if isinstance(state, list):
state = np.array(state, dtype=complex)
if isinstance(state, np.ndarray):
ndim = state.ndim
if ndim == 1:
state = Statevector(state)
elif ndim == 2:
dim1, dim2 = state.shape
if dim2 == 1:
state = Statevector(state)
elif dim1 == dim2:
state = DensityMatrix(state)
if not isinstance(state, (Statevector, DensityMatrix)):
raise QiskitError("Input is not a quantum state")
if validate and not state.is_valid():
raise QiskitError("Input quantum state is not a valid")
return state
def _funm_svd(matrix, func):
"""Apply real scalar function to singular values of a matrix.
Args:
matrix (array_like): (N, N) Matrix at which to evaluate the function.
func (callable): Callable object that evaluates a scalar function f.
Returns:
ndarray: funm (N, N) Value of the matrix function specified by func
evaluated at `A`.
"""
import scipy.linalg as la
unitary1, singular_values, unitary2 = la.svd(matrix)
diag_func_singular = np.diag(func(singular_values))
return unitary1.dot(diag_func_singular).dot(unitary2)
|
the-stack_106_14042
|
# The default ``config_Global_File.py``
# flake8: noqa
def set_prefs(prefs):
"""This function is called before opening the project"""
# Specify which files and folders to ignore in the project.
# Changes to ignored resources are not added to the history and
# VCSs. Also they are not returned in `Project.get_files()`.
# Note that ``?`` and ``*`` match all characters but slashes.
# '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
# 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
# '.svn': matches 'pkg/.svn' and all of its children
# 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
# 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
'.hg', '.svn', '_svn', '.git', '.tox']
# Specifies which files should be considered python files. It is
# useful when you have scripts inside your project. Only files
# ending with ``.py`` are considered to be python files by
# default.
# prefs['python_files'] = ['*.py']
# Custom source folders: By default rope searches the project
# for finding source folders (folders that should be searched
# for finding modules). You can add paths to that list. Note
# that rope guesses project source folders correctly most of the
# time; use this if you have any problems.
# The folders should be relative to project root and use '/' for
# separating folders regardless of the platform rope is running on.
# 'src/my_source_folder' for instance.
# prefs.add('source_folders', 'src')
# You can extend python path for looking up modules
# prefs.add('python_path', '~/python/')
# Should rope save object information or not.
prefs['save_objectdb'] = True
prefs['compress_objectdb'] = False
# If `True`, rope analyzes each module when it is being saved.
prefs['automatic_soa'] = True
# The depth of calls to follow in static object analysis
prefs['soa_followed_calls'] = 0
# If `False` when running modules or unit tests "dynamic object
# analysis" is turned off. This makes them much faster.
prefs['perform_doa'] = True
# Rope can check the validity of its object DB when running.
prefs['validate_objectdb'] = True
# How many undos to hold?
prefs['max_history_items'] = 32
# Shows whether to save history across sessions.
prefs['save_history'] = True
prefs['compress_history'] = False
# Set the number spaces used for indenting. According to
# :PEP:`8`, it is best to use 4 spaces. Since most of rope's
# unit-tests use 4 spaces it is more reliable, too.
prefs['indent_size'] = 4
# Builtin and c-extension modules that are allowed to be imported
# and inspected by rope.
prefs['extension_modules'] = []
# Add all standard c-extensions to extension_modules list.
prefs['import_dynload_stdmods'] = True
# If `True` modules with syntax errors are considered to be empty.
# The default value is `False`; When `False` syntax errors raise
# `rope.base.exceptions.ModuleSyntaxError` exception.
prefs['ignore_syntax_errors'] = False
# If `True`, rope ignores unresolvable imports. Otherwise, they
# appear in the importing namespace.
prefs['ignore_bad_imports'] = False
# If `True`, rope will insert new module imports as
# `from <package> import <module>` by default.
prefs['prefer_module_from_imports'] = False
# If `True`, rope will transform a comma list of imports into
# multiple separate import statements when organizing
# imports.
prefs['split_imports'] = False
# If `True`, rope will remove all top-level import statements and
# reinsert them at the top of the module when making changes.
prefs['pull_imports_to_top'] = True
# If `True`, rope will sort imports alphabetically by module name instead
# of alphabetically by import statement, with from imports after normal
# imports.
prefs['sort_imports_alphabetically'] = False
# Location of implementation of
# rope.base.oi.type_hinting.interfaces.ITypeHintingFactory In general
# case, you don't have to change this value, unless you're an rope expert.
# Change this value to inject you own implementations of interfaces
# listed in module rope.base.oi.type_hinting.providers.interfaces
# For example, you can add you own providers for Django Models, or disable
# the search type-hinting in a class hierarchy, etc.
prefs['type_hinting_factory'] = (
'rope.base.oi.type_hinting.factory.default_type_hinting_factory')
def project_opened(project):
"""This function is called after opening the project"""
# Do whatever you like here!
|
the-stack_106_14043
|
#!/usr/bin/env python
import os
from setuptools import setup
module_dir = os.path.dirname(os.path.abspath(__file__))
if __name__ == "__main__":
setup(
name='mp_workshop',
version='2018.07.27',
install_requires=["pymatgen", "jupyter", "atomate", "graphviz", "maggma"],
description='Repository for workshop code',
package_data={"mp_workshop.data.data_files": ["*.json"]},
python_requires='>=3.6',
)
|
the-stack_106_14045
|
# -*- coding: utf-8 -*-
"""This is the decoder module.
This module cantains MetadataDecoder class, which decodes metadata information
provided from shairport-sync. It assumes the metadata was encoded by an
AirPlay-style server (such as iTunes) or shairport-sync itself.
"""
from datetime import datetime, timedelta
import logging
from shairport_sync_metadata import VERSION
from shairport_sync_metadata.CoverArt import CoverArt
logger = logging.getLogger(__name__)
# Code adapted from
# https://github.com/brookstalley/live-mir/blob/master/metadataparser.py
# (MIT License)
class MetadataDecoder(object):
__instance = None
def __new__(cls):
if MetadataDecoder.__instance is None:
MetadataDecoder.__instance = object.__new__(cls)
MetadataDecoder.__instance.fieldList = {
# ssnc values
"PICT": ["picture", cls.pictHandler],
"pcen": ["pictureend", cls.rtptime_handler],
"pcst": ["picturestart", cls.rtptime_handler],
"mdst": ["metadatastart", cls.rtptime_handler],
"stal": ["metadatastall", cls.string_handler],
"mden": ["metadataend", cls.rtptime_handler],
"snua": ["useragent", cls.string_handler],
"snam": ["appleclientname", cls.string_handler],
"pbeg": ["playbegin", cls.zero_byte_handler],
"pend": ["playend", cls.zero_byte_handler],
"pfls": ["playflush", cls.zero_byte_handler],
"prsm": ["playresume", cls.zero_byte_handler],
"pffr": ["playfirstframereceived", cls.zero_byte_handler],
"pvol": ["playvolume", cls.play_volume_handler],
"daid": ["dacpid", cls.intHandler],
"acre": ["activeremotetoken", cls.string_handler],
"prgr": ["playprogress", cls.progress_handler],
"caps": ["playstate", cls.one_byte_handler],
"flsr": ["flushtime", cls.rtptime_handler],
# need better handlers
"clip": ["clientip", cls.string_handler], # (value b'10.0.1.144')
"svip": ["serverip", cls.string_handler], # (value b'10.0.1.62')
"dapo": ["remoteportnumber",
cls.string_handler], # (value b'3689')
# Core values
"mikd": ["itemkind", cls.one_byte_handler],
"minm": ["itemname", cls.string_handler],
"mper": ["persistentid", cls.eight_byte_handler],
"miid": ["itemid", cls.four_byte_handler],
"asal": ["songalbum", cls.string_handler],
"asar": ["songartist", cls.string_handler],
"ascm": ["songcomment", cls.string_handler],
"asco": ["songcompilation", cls.bool_handler],
"asbr": ["songbitrate", cls.two_byte_handler],
"ascp": ["songcomposer", cls.string_handler],
"asda": ["songdateadded",
cls.date_handler], # often datetime.now()
"aspl": ["songdateplayed", cls.date_handler],
"asdm": ["songdatemodified", cls.date_handler],
"asdc": ["songdisccount", cls.two_byte_handler],
"asdn": ["songdiscnumber", cls.two_byte_handler],
"aseq": ["songeqpreset", cls.string_handler],
"asgn": ["songgenre", cls.string_handler],
"asdt": ["songdescription", cls.string_handler],
"asrv": ["songrelativevolume", cls.one_byte_handler],
"assr": ["songsamplerate", cls.four_byte_handler],
"assz": ["songsize", cls.four_byte_handler],
"asst": ["songstarttime", cls.four_byte_handler],
"assp": ["songstoptime", cls.four_byte_handler],
"astm": ["songtime", cls.four_byte_handler],
"astc": ["songtrackcount", cls.two_byte_handler],
"astn": ["songtracknumber", cls.two_byte_handler],
"asur": ["songuserrating", cls.one_byte_handler],
"asyr": ["songyear", cls.two_byte_handler],
"asfm": ["songformat", cls.string_handler],
"asdb": ["songdisabled", cls.bool_handler],
"asdk": ["songdatakind", cls.one_byte_handler],
"asbt": ["songbeatsperminute", cls.two_byte_handler],
"agrp": ["songgrouping", cls.string_handler],
"ascd": ["songcodectype", cls.string_handler],
"ascs": ["songcodecsubtype", cls.intHandler],
"asct": ["songcategory", cls.string_handler],
"ascn": ["songcontentdescription", cls.string_handler],
"ascr": ["songcontentrating", cls.intHandler],
"asri": ["songartistid", cls.eight_byte_handler],
"asai": ["songalbumid", cls.intHandler],
"askd": ["songlastskipdate", cls.date_handler],
"assn": ["songsortname", cls.string_handler],
"assu": ["songsortalbum", cls.string_handler],
"asaa": ["songalbumartist", cls.string_handler],
"asbk": ["bookmarkable", cls.bool_handler],
"asbo": ["songbookmark", cls.four_byte_handler],
"asdr": ["songdatereleased", cls.date_handler],
"ased": ["songextradata", cls.two_byte_handler],
"asgp": ["songgapless", cls.bool_handler],
"ashp": ["songhasbeenplayed", cls.bool_handler],
"asls": ["songlongsize", cls.eight_byte_handler],
"aspu": ["songpodcasturl", cls.string_handler],
"assa": ["sortartist", cls.string_handler],
"assc": ["sortcomposer", cls.string_handler],
"assl": ["sortalbumartist", cls.string_handler],
"asss": ["sortseriesname", cls.string_handler],
"aeNV": ["itunesnormvolume", cls.intHandler],
"aePC": ["itunesispodcast", cls.bool_handler],
"aeHV": ["ituneshasvideo", cls.bool_handler],
"aeMK": ["itunesmediakind", cls.intHandler],
"aeSN": ["itunesseriesname", cls.string_handler],
"aeEN": ["itunesepisodenumberstring", cls.string_handler],
"aeSU": ["itunesseasonnumber", cls.four_byte_handler],
"aeES": ["itunesepisodesort", cls.four_byte_handler],
"aeMk": ["itunesextendedmediakind", cls.four_byte_handler],
"aeGD": ["itunesgaplessencdr", cls.four_byte_handler],
"aeGE": ["itunesgaplessencdel", cls.four_byte_handler],
"aeGH": ["itunesgaplessheur", cls.four_byte_handler],
"aeGR": ["itunesgaplessresy", cls.eight_byte_handler],
"aeGU": ["itunesgaplessdur", cls.eight_byte_handler],
"aeHD": ["itunesishdvideo", cls.bool_handler],
"aeSE": ["itunesstorepersid", cls.eight_byte_handler],
"aeXD": ["itunesxid", cls.string_handler],
"aeDR": ["itunesdrmuserid", cls.eight_byte_handler],
"aeND": ["itunesnondrmuserid", cls.eight_byte_handler],
"aeK1": ["itunesdrmkey1id", cls.eight_byte_handler],
"aeK2": ["itunesdrmkey2id", cls.eight_byte_handler],
"aeDV": ["itunesdrmversions", cls.four_byte_handler],
"aeDP": ["itunesdrmplatformid", cls.four_byte_handler],
"aeAI": ["itunesitmsartistid", cls.four_byte_handler],
"aePI": ["itunesitmsplaylistid", cls.four_byte_handler],
"aeCI": ["itunesitmscomposerid", cls.four_byte_handler],
"aeGI": ["itunesitmsgenreid", cls.four_byte_handler],
# found more unknowns during testing
"aeCM": ["unknownaeCM", cls.default_string_handler],
"aeCR": ["unknownaeCR", cls.default_string_handler],
"aeCS": ["unknownaeCS", cls.default_string_handler],
"aeDL": ["unknownaeDL", cls.default_string_handler],
"aeFA": ["unknownaeFA", cls.default_string_handler],
"aeGs": ["unknownaeGs", cls.default_string_handler],
"aeMX": ["unknownaeMX", cls.default_string_handler],
"aeSI": ["unknownaeSI", cls.eight_byte_handler],
"aels": ["unknownaels", cls.default_string_handler],
"ajAE": ["unknownajAE", cls.default_string_handler],
"ajAS": ["unknownajAS", cls.default_string_handler],
"ajAT": ["unknownajAT", cls.default_string_handler],
"ajAV": ["unknownajAV", cls.default_string_handler],
"ajal": ["unknownajal", cls.default_string_handler],
"ajcA": ["unknownajcA", cls.default_string_handler],
"ajuw": ["unknownajuw", cls.default_string_handler],
"amvc": ["unknownamvc", cls.default_string_handler],
"amvm": ["unknownamvm", cls.default_string_handler],
"amvn": ["unknownamvn", cls.default_string_handler],
"asac": ["unknownasac", cls.two_byte_handler],
"asas": ["unknownasas", cls.default_string_handler],
"ases": ["unknownases", cls.default_string_handler],
"askp": ["unknownaskp", cls.default_string_handler],
"aslr": ["unknownaslr", cls.default_string_handler],
"aspc": ["unknownaspc", cls.default_string_handler],
"asrs": ["unknownasrs", cls.default_string_handler],
"awrk": ["unknownawrk", cls.default_string_handler],
"mext": ["unknownmext", cls.two_byte_handler],
"meia": ["unknownmeia", cls.four_byte_handler],
"meip": ["unknownmeip", cls.four_byte_handler]
}
return MetadataDecoder.__instance
def ParseItem(self, typ, code, rawItem):
assert isinstance(rawItem, (bytes, bytearray))
rawData = rawItem
# logger.debug("Looking up {}:{} {}".format(typ, code, rawData))
try:
fieldInfo = self.fieldList[code]
except KeyError:
logger.warning('Key not found: {} (value {})'.format(
code, rawData))
return
# override handler on mdst for 'core'
if (typ == 'core' and code == 'mdst'):
data = self.one_byte_handler(rawData)
else:
data = fieldInfo[1](self, rawData)
fieldName = fieldInfo[0]
logger.debug("Setting %s : %s to %s" % (code, fieldName, data))
item = {"type": typ, "code": code, "name": fieldName, "value": data}
return item
def default_string_handler(self, rawData):
if rawData == b'\x00':
return 0
elif rawData == b'\x00\x00':
return 0
elif rawData == b'\x00\x00\x00\x00':
return 0
elif rawData == b'\x00\x00\x00\x00\x00\x00\x00\x00':
return 0
return self.string_handler(rawData)
def string_handler(self, rawData):
try:
return rawData.decode("utf-8")
except UnicodeDecodeError:
logger.warning('Unable to decode binary data {}'.format(rawData))
return rawData
def bool_handler(self, rawData):
if (rawData[0] > 0):
return True
else:
return False
def intHandler(self, rawData):
return 0
def pictHandler(self, rawData):
cover_art = CoverArt(binary=rawData)
if cover_art.binary is not None:
size = len(cover_art.binary)
else:
size = 0
logger.debug('PICT {} size={}'.format(
cover_art.as_dict(base64=False), size))
return cover_art
def zero_byte_handler(self, rawData):
"""Used for fields whose presence is the message"""
return True
def one_byte_handler(self, rawData):
return int(rawData[0])
def two_byte_handler(self, rawData):
#stringed = rawData.decode("utf-8")
return (rawData[0] << 8) + rawData[1]
def four_byte_handler(self, rawData):
return (rawData[0] << 24) + (rawData[1] << 16) + (
rawData[2] << 8) + rawData[3]
def eight_byte_handler(self, rawData):
return (rawData[0] << 56) + (rawData[1] << 48) + (rawData[2] << 40) + (
rawData[3] << 32) + (rawData[4] << 24) + (rawData[5] << 16) + (
rawData[6] << 8) + rawData[7]
# http://www.neotitans.com/resources/python/python-unsigned-32bit-value.html
def to_int32_signed(self, x):
if x > 0xFFFFFFFF:
raise OverflowError
if x > 0x7FFFFFFF:
x = int(0x100000000 - x)
if x < 2147483648:
return -x
else:
return -2147483648
return x
def date_handler(self, rawData):
intTime = self.four_byte_handler(rawData)
intTime_signed = self.to_int32_signed(intTime)
# an uninitialized value seems to be represented by
# decimal intTime : 2212144096 intTime 32-bit signed : -2082823200
# logger.debug('intTime : {} intTime 32-bit signed : {}'.format(intTime, intTime_signed))
if (intTime_signed < 0):
# intTime_31bit = int(bin(intTime_signed & 0x7fffffff), 2)
timestamp = datetime(1970, 1,
1) + timedelta(seconds=intTime_signed)
else:
timestamp = datetime(1970, 1,
1) + timedelta(seconds=intTime_signed)
# logger.debug(timestamp)
return timestamp
# def time_handler(self, rawData):
# stringTime = rawData.decode("utf-8")
# logger.debug('time_handler: {}'.format(stringTime))
# try:
# # need this approach since .fromtimestamp is ValueError: timestamp out of range for platform time # https://stackoverflow.com/questions/36179914/timestamp-out-of-range-for-platform-localtime-gmtime-function
# OverflowError: timestamp out of range for platform time_t
# timestamp = datetime(1970, 1, 1) + timedelta(seconds=int(stringTime)/100)
# logger.debug(timestamp)
# return timestamp
# except ValueError:
# logger.warning('ValueError for value {}'.format(rawData))
# return rawData
def rtptime_handler(self, rawData):
stringTime = rawData.decode("utf-8")
logger.debug('rtptime_handler: {}'.format(stringTime))
try:
rtptime = int(stringTime)
return rtptime
except ValueError:
logger.warning('ValueError for value {}'.format(rawData))
return rawData
def progress_handler(self, rawData):
stringTimes = rawData.decode("utf-8")
timeList = stringTimes.split("/")
progress = {
"start": int(timeList[0]),
"current": int(timeList[1]),
"end": int(timeList[2])
}
logger.debug('progress: {}'.format(progress))
return progress
def play_volume_handler(self, rawData):
volumesString = rawData.decode("utf-8")
volumesList = volumesString.split(",")
volumes = {
'airplay_volume': float(volumesList[0]),
'volume': float(volumesList[1]),
'lowest_volume': float(volumesList[2]),
'highest_volume': float(volumesList[3]),
}
# logger.debug('volumes: {}'.format(volumes))
return volumes
|
the-stack_106_14046
|
#!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1ProjectEntityResourceRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {"owner": "str", "project": "str", "uuid": "str"}
attribute_map = {"owner": "owner", "project": "project", "uuid": "uuid"}
def __init__(self, owner=None, project=None, uuid=None): # noqa: E501
"""V1ProjectEntityResourceRequest - a model defined in Swagger""" # noqa: E501
self._owner = None
self._project = None
self._uuid = None
self.discriminator = None
if owner is not None:
self.owner = owner
if project is not None:
self.project = project
if uuid is not None:
self.uuid = uuid
@property
def owner(self):
"""Gets the owner of this V1ProjectEntityResourceRequest. # noqa: E501
:return: The owner of this V1ProjectEntityResourceRequest. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this V1ProjectEntityResourceRequest.
:param owner: The owner of this V1ProjectEntityResourceRequest. # noqa: E501
:type: str
"""
self._owner = owner
@property
def project(self):
"""Gets the project of this V1ProjectEntityResourceRequest. # noqa: E501
:return: The project of this V1ProjectEntityResourceRequest. # noqa: E501
:rtype: str
"""
return self._project
@project.setter
def project(self, project):
"""Sets the project of this V1ProjectEntityResourceRequest.
:param project: The project of this V1ProjectEntityResourceRequest. # noqa: E501
:type: str
"""
self._project = project
@property
def uuid(self):
"""Gets the uuid of this V1ProjectEntityResourceRequest. # noqa: E501
:return: The uuid of this V1ProjectEntityResourceRequest. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this V1ProjectEntityResourceRequest.
:param uuid: The uuid of this V1ProjectEntityResourceRequest. # noqa: E501
:type: str
"""
self._uuid = uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(V1ProjectEntityResourceRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ProjectEntityResourceRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_14047
|
#!python3.6
import gettext
import pathlib
import sub
import mypackage.mymodule
langPath = str(pathlib.Path('../res/i18n/languages').resolve())
gettext.install('hello', langPath)
#print(_('Hello World !!'))
print(_('MSG000'))
langs = ['ja', 'en', 'de']
lang = 'ja'
while lang:
print(f'言語コードを入力してください(未入力+Enterで終了) {langs}: ', end='')
lang = input()
if lang not in langs: continue
l = gettext.translation('hello', langPath, languages=[lang])
l.install()
print(sub.get_message_goodbye())
# print(_('Welcome i18n !!'))
# print(mypackage.mymodule.get_message_goodluck())
#print(sub.get_message_goodbye())
|
the-stack_106_14051
|
"""
Data structures for Streaming, in-memory datasets
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
import time
import weakref
import numpy as np
import uuid
from itertools import \
chain, \
product, \
repeat
from collections import defaultdict
from numbers import Number as numeric_type
from yt.funcs import \
iterable, \
ensure_list, \
issue_deprecation_warning
from yt.utilities.io_handler import io_registry
from yt.data_objects.field_data import \
YTFieldData
from yt.data_objects.particle_unions import \
ParticleUnion
from yt.data_objects.grid_patch import \
AMRGridPatch
from yt.data_objects.static_output import \
ParticleFile
from yt.geometry.geometry_handler import \
YTDataChunk
from yt.geometry.grid_geometry_handler import \
GridIndex
from yt.data_objects.octree_subset import \
OctreeSubset
from yt.geometry.oct_geometry_handler import \
OctreeIndex
from yt.geometry.particle_geometry_handler import \
ParticleIndex
from yt.geometry.oct_container import \
OctreeContainer
from yt.geometry.unstructured_mesh_handler import \
UnstructuredIndex
from yt.data_objects.static_output import \
Dataset
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.lib.misc_utilities import \
get_box_grids_level
from yt.geometry.grid_container import \
GridTree, \
MatchPointsToGrids
from yt.utilities.decompose import \
decompose_array, get_psize
from yt.utilities.exceptions import \
YTIllDefinedAMR, \
YTInconsistentGridFieldShape, \
YTInconsistentParticleFieldShape, \
YTInconsistentGridFieldShapeGridDims
from yt.units.yt_array import \
YTQuantity, \
uconcatenate
from yt.utilities.flagging_methods import \
FlaggingGrid
from yt.data_objects.unstructured_mesh import \
SemiStructuredMesh, \
UnstructuredMesh
from yt.extern.six import string_types
from .fields import \
StreamFieldInfo
from yt.frontends.exodus_ii.util import \
get_num_pseudo_dims
from yt.data_objects.unions import MeshUnion
class StreamGrid(AMRGridPatch):
"""
Class representing a single In-memory Grid instance.
"""
__slots__ = ['proc_num']
_id_offset = 0
def __init__(self, id, index):
"""
Returns an instance of StreamGrid with *id*, associated with *filename*
and *index*.
"""
#All of the field parameters will be passed to us as needed.
AMRGridPatch.__init__(self, id, filename = None, index = index)
self._children_ids = []
self._parent_id = -1
self.Level = -1
def set_filename(self, filename):
pass
def __repr__(self):
return "StreamGrid_%04i" % (self.id)
@property
def Parent(self):
if self._parent_id == -1: return None
return self.index.grids[self._parent_id - self._id_offset]
@property
def Children(self):
return [self.index.grids[cid - self._id_offset]
for cid in self._children_ids]
class StreamHandler(object):
def __init__(self, left_edges, right_edges, dimensions,
levels, parent_ids, particle_count, processor_ids,
fields, field_units, code_units, io = None,
particle_types = None, periodicity = (True, True, True)):
if particle_types is None: particle_types = {}
self.left_edges = np.array(left_edges)
self.right_edges = np.array(right_edges)
self.dimensions = dimensions
self.levels = levels
self.parent_ids = parent_ids
self.particle_count = particle_count
self.processor_ids = processor_ids
self.num_grids = self.levels.size
self.fields = fields
self.field_units = field_units
self.code_units = code_units
self.io = io
self.particle_types = particle_types
self.periodicity = periodicity
def get_fields(self):
return self.fields.all_fields
def get_particle_type(self, field):
if field in self.particle_types:
return self.particle_types[field]
else:
return False
class StreamHierarchy(GridIndex):
grid = StreamGrid
def __init__(self, ds, dataset_type = None):
self.dataset_type = dataset_type
self.float_type = 'float64'
self.dataset = weakref.proxy(ds) # for _obtain_enzo
self.stream_handler = ds.stream_handler
self.float_type = "float64"
self.directory = os.getcwd()
GridIndex.__init__(self, ds, dataset_type)
def _count_grids(self):
self.num_grids = self.stream_handler.num_grids
def _parse_index(self):
self.grid_dimensions = self.stream_handler.dimensions
self.grid_left_edge[:] = self.stream_handler.left_edges
self.grid_right_edge[:] = self.stream_handler.right_edges
self.grid_levels[:] = self.stream_handler.levels
self.grid_procs = self.stream_handler.processor_ids
self.grid_particle_count[:] = self.stream_handler.particle_count
mylog.debug("Copying reverse tree")
self.grids = []
# We enumerate, so it's 0-indexed id and 1-indexed pid
for id in range(self.num_grids):
self.grids.append(self.grid(id, self))
self.grids[id].Level = self.grid_levels[id, 0]
parent_ids = self.stream_handler.parent_ids
if parent_ids is not None:
reverse_tree = self.stream_handler.parent_ids.tolist()
# Initial setup:
for gid, pid in enumerate(reverse_tree):
if pid >= 0:
self.grids[gid]._parent_id = pid
self.grids[pid]._children_ids.append(self.grids[gid].id)
else:
mylog.debug("Reconstructing parent-child relationships")
self._reconstruct_parent_child()
self.max_level = self.grid_levels.max()
mylog.debug("Preparing grids")
temp_grids = np.empty(self.num_grids, dtype='object')
for i, grid in enumerate(self.grids):
if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
grid.filename = None
grid._prepare_grid()
grid._setup_dx()
grid.proc_num = self.grid_procs[i]
temp_grids[i] = grid
self.grids = temp_grids
mylog.debug("Prepared")
def _reconstruct_parent_child(self):
mask = np.empty(len(self.grids), dtype='int32')
mylog.debug("First pass; identifying child grids")
for i, grid in enumerate(self.grids):
get_box_grids_level(self.grid_left_edge[i,:],
self.grid_right_edge[i,:],
self.grid_levels[i] + 1,
self.grid_left_edge, self.grid_right_edge,
self.grid_levels, mask)
ids = np.where(mask.astype("bool"))
grid._children_ids = ids[0] # where is a tuple
mylog.debug("Second pass; identifying parents")
self.stream_handler.parent_ids = np.zeros(
self.stream_handler.num_grids, "int64") - 1
for i, grid in enumerate(self.grids): # Second pass
for child in grid.Children:
child._parent_id = i
# _id_offset = 0
self.stream_handler.parent_ids[child.id] = i
def _initialize_grid_arrays(self):
GridIndex._initialize_grid_arrays(self)
self.grid_procs = np.zeros((self.num_grids,1),'int32')
def _detect_output_fields(self):
# NOTE: Because particle unions add to the actual field list, without
# having the keys in the field list itself, we need to double check
# here.
fl = set(self.stream_handler.get_fields())
fl.update(set(getattr(self, "field_list", [])))
self.field_list = list(fl)
def _populate_grid_objects(self):
for g in self.grids:
g._setup_dx()
self.max_level = self.grid_levels.max()
def _setup_data_io(self):
if self.stream_handler.io is not None:
self.io = self.stream_handler.io
else:
self.io = io_registry[self.dataset_type](self.ds)
def _reset_particle_count(self):
self.grid_particle_count[:] = self.stream_handler.particle_count
for i, grid in enumerate(self.grids):
grid.NumberOfParticles = self.grid_particle_count[i, 0]
def update_data(self, data):
"""
Update the stream data with a new data dict. If fields already exist,
they will be replaced, but if they do not, they will be added. Fields
already in the stream but not part of the data dict will be left
alone.
"""
particle_types = set_particle_types(data[0])
self.stream_handler.particle_types.update(particle_types)
self.ds._find_particle_types()
for i, grid in enumerate(self.grids):
field_units, gdata, number_of_particles = process_data(data[i])
self.stream_handler.particle_count[i] = number_of_particles
self.stream_handler.field_units.update(field_units)
for field in gdata:
if field in grid.field_data:
grid.field_data.pop(field, None)
self.stream_handler.fields[grid.id][field] = gdata[field]
self._reset_particle_count()
# We only want to create a superset of fields here.
for field in self.ds.field_list:
if field[0] == "all":
self.ds.field_list.remove(field)
self._detect_output_fields()
self.ds.create_field_info()
mylog.debug("Creating Particle Union 'all'")
pu = ParticleUnion("all", list(self.ds.particle_types_raw))
self.ds.add_particle_union(pu)
self.ds.particle_types = tuple(set(self.ds.particle_types))
class StreamDataset(Dataset):
_index_class = StreamHierarchy
_field_info_class = StreamFieldInfo
_dataset_type = 'stream'
def __init__(self, stream_handler, storage_filename=None,
geometry="cartesian", unit_system="cgs"):
self.fluid_types += ("stream",)
self.geometry = geometry
self.stream_handler = stream_handler
self._find_particle_types()
name = "InMemoryParameterFile_%s" % uuid.uuid4().hex
from yt.data_objects.static_output import _cached_datasets
_cached_datasets[name] = self
Dataset.__init__(self, name, self._dataset_type,
unit_system=unit_system)
def _parse_parameter_file(self):
self.basename = self.stream_handler.name
self.parameters['CurrentTimeIdentifier'] = time.time()
self.unique_identifier = self.parameters["CurrentTimeIdentifier"]
self.domain_left_edge = self.stream_handler.domain_left_edge.copy()
self.domain_right_edge = self.stream_handler.domain_right_edge.copy()
self.refine_by = self.stream_handler.refine_by
self.dimensionality = self.stream_handler.dimensionality
self.periodicity = self.stream_handler.periodicity
self.domain_dimensions = self.stream_handler.domain_dimensions
self.current_time = self.stream_handler.simulation_time
self.gamma = 5./3.
self.parameters['EOSType'] = -1
self.parameters['CosmologyHubbleConstantNow'] = 1.0
self.parameters['CosmologyCurrentRedshift'] = 1.0
self.parameters['HydroMethod'] = -1
if self.stream_handler.cosmology_simulation:
self.cosmological_simulation = 1
self.current_redshift = self.stream_handler.current_redshift
self.omega_lambda = self.stream_handler.omega_lambda
self.omega_matter = self.stream_handler.omega_matter
self.hubble_constant = self.stream_handler.hubble_constant
else:
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
def _set_units(self):
self.field_units = self.stream_handler.field_units
def _set_code_unit_attributes(self):
base_units = self.stream_handler.code_units
attrs = ('length_unit', 'mass_unit', 'time_unit', 'velocity_unit', 'magnetic_unit')
cgs_units = ('cm', 'g', 's', 'cm/s', 'gauss')
for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units):
if isinstance(unit, string_types):
uq = self.quan(1.0, unit)
elif isinstance(unit, numeric_type):
uq = self.quan(unit, cgs_unit)
elif isinstance(unit, YTQuantity):
uq = unit
elif isinstance(unit, tuple):
uq = self.quan(unit[0], unit[1])
else:
raise RuntimeError("%s (%s) is invalid." % (attr, unit))
setattr(self, attr, uq)
@classmethod
def _is_valid(cls, *args, **kwargs):
return False
@property
def _skip_cache(self):
return True
def _find_particle_types(self):
particle_types = set([])
for k, v in self.stream_handler.particle_types.items():
if v:
particle_types.add(k[0])
self.particle_types = tuple(particle_types)
self.particle_types_raw = self.particle_types
class StreamDictFieldHandler(dict):
_additional_fields = ()
@property
def all_fields(self):
self_fields = chain.from_iterable(s.keys() for s in self.values())
self_fields = list(set(self_fields))
fields = list(self._additional_fields) + self_fields
fields = list(set(fields))
return fields
def set_particle_types(data):
particle_types = {}
for key in data.keys():
if key == "number_of_particles":
continue
if len(data[key].shape) == 1:
particle_types[key] = True
else:
particle_types[key] = False
return particle_types
def assign_particle_data(ds, pdata, bbox):
"""
Assign particle data to the grids using MatchPointsToGrids. This
will overwrite any existing particle data, so be careful!
"""
for ptype in ds.particle_types_raw:
check_fields = [(ptype, "particle_position_x"),
(ptype, "particle_position")]
if all(f not in pdata for f in check_fields):
pdata_ftype = {}
for f in [k for k in sorted(pdata)]:
if not hasattr(pdata[f], "shape"):
continue
if f == 'number_of_particles':
continue
mylog.debug("Reassigning '%s' to ('%s','%s')", f, ptype, f)
pdata_ftype[ptype, f] = pdata.pop(f)
pdata_ftype.update(pdata)
pdata = pdata_ftype
# Note: what we need to do here is a bit tricky. Because occasionally this
# gets called before we property handle the field detection, we cannot use
# any information about the index. Fortunately for us, we can generate
# most of the GridTree utilizing information we already have from the
# stream handler.
if len(ds.stream_handler.fields) > 1:
pdata.pop("number_of_particles", None)
num_grids = len(ds.stream_handler.fields)
parent_ids = ds.stream_handler.parent_ids
num_children = np.zeros(num_grids, dtype='int64')
# We're going to do this the slow way
mask = np.empty(num_grids, dtype="bool")
for i in range(num_grids):
np.equal(parent_ids, i, mask)
num_children[i] = mask.sum()
levels = ds.stream_handler.levels.astype("int64").ravel()
grid_tree = GridTree(num_grids,
ds.stream_handler.left_edges,
ds.stream_handler.right_edges,
ds.stream_handler.dimensions,
ds.stream_handler.parent_ids,
levels, num_children)
grid_pdata = []
for i in range(num_grids):
grid = {"number_of_particles": 0}
grid_pdata.append(grid)
for ptype in ds.particle_types_raw:
if (ptype, "particle_position_x") in pdata:
x, y, z = (pdata[ptype, "particle_position_%s" % ax] for ax in 'xyz')
elif (ptype, "particle_position") in pdata:
x, y, z = pdata[ptype, "particle_position"].T
else:
raise KeyError(
"Cannot decompose particle data without position fields!")
pts = MatchPointsToGrids(grid_tree, len(x), x, y, z)
particle_grid_inds = pts.find_points_in_tree()
assigned_particles, = (particle_grid_inds >= 0).nonzero()
num_particles = particle_grid_inds.size
num_unassigned = num_particles - assigned_particles.size
if num_unassigned > 0:
m = ("Discarding %s particles (out of %s) that are outside "
"bounding box. ")
eps = np.finfo(x.dtype).eps
s = np.array([[x.min() - eps, x.max() + eps],
[y.min() - eps, y.max() + eps],
[z.min() - eps, z.max() + eps]])
sug_bbox = [
[min(bbox[0, 0], s[0, 0]), max(bbox[0, 1], s[0, 1])],
[min(bbox[1, 0], s[1, 0]), max(bbox[1, 1], s[1, 1])],
[min(bbox[2, 0], s[2, 0]), max(bbox[2, 1], s[2, 1])]]
m += ("Set bbox=%s to avoid this in the future.")
mylog.warning(m % (num_unassigned, num_particles, sug_bbox))
particle_grid_inds = particle_grid_inds[assigned_particles]
x = x[assigned_particles]
y = y[assigned_particles]
z = z[assigned_particles]
idxs = np.argsort(particle_grid_inds)
particle_grid_count = np.bincount(particle_grid_inds.astype("intp"),
minlength=num_grids)
particle_indices = np.zeros(num_grids + 1, dtype='int64')
if num_grids > 1:
np.add.accumulate(particle_grid_count.squeeze(),
out=particle_indices[1:])
else:
particle_indices[1] = particle_grid_count.squeeze()
for i, pcount in enumerate(particle_grid_count):
grid_pdata[i]["number_of_particles"] += pcount
start = particle_indices[i]
end = particle_indices[i+1]
for key in pdata.keys():
if key[0] == ptype:
grid_pdata[i][key] = pdata[key][idxs][start:end]
else:
grid_pdata = [pdata]
for pd, gi in zip(grid_pdata, sorted(ds.stream_handler.fields)):
ds.stream_handler.fields[gi].update(pd)
ds.stream_handler.particle_types.update(set_particle_types(pd))
npart = ds.stream_handler.fields[gi].pop("number_of_particles", 0)
ds.stream_handler.particle_count[gi] = npart
def process_data(data, grid_dims=None):
new_data, field_units = {}, {}
for field, val in data.items():
# val is a data array
if isinstance(val, np.ndarray):
# val is a YTArray
if hasattr(val, "units"):
field_units[field] = val.units
new_data[field] = val.copy().d
# val is a numpy array
else:
field_units[field] = ""
new_data[field] = val.copy()
# val is a tuple of (data, units)
elif isinstance(val, tuple) and len(val) == 2:
try:
assert isinstance(field, (string_types, tuple)), \
"Field name is not a string!"
assert isinstance(val[0], np.ndarray), \
"Field data is not an ndarray!"
assert isinstance(val[1], string_types), \
"Unit specification is not a string!"
field_units[field] = val[1]
new_data[field] = val[0]
except AssertionError as e:
raise RuntimeError(
"The data dict appears to be invalid.\n" + str(e))
# val is a list of data to be turned into an array
elif iterable(val):
field_units[field] = ""
new_data[field] = np.asarray(val)
else:
raise RuntimeError("The data dict appears to be invalid. "
"The data dictionary must map from field "
"names to (numpy array, unit spec) tuples. ")
data = new_data
# At this point, we have arrays for all our fields
new_data = {}
for field in data:
n_shape = len(data[field].shape)
if isinstance(field, tuple):
new_field = field
elif n_shape in (1, 2):
new_field = ("io", field)
elif n_shape == 3:
new_field = ("stream", field)
else:
raise RuntimeError
new_data[new_field] = data[field]
field_units[new_field] = field_units.pop(field)
known_fields = StreamFieldInfo.known_particle_fields \
+ StreamFieldInfo.known_other_fields
# We do not want to override any of the known ones, if it's not
# overridden here.
if any(f[0] == new_field[1] for f in known_fields) and \
field_units[new_field] == "":
field_units.pop(new_field)
data = new_data
# Sanity checking that all fields have the same dimensions.
g_shapes = []
p_shapes = defaultdict(list)
for field in data:
f_shape = data[field].shape
n_shape = len(f_shape)
if n_shape in (1, 2):
p_shapes[field[0]].append((field[1], f_shape[0]))
elif n_shape == 3:
g_shapes.append((field, f_shape))
if len(g_shapes) > 0:
g_s = np.array([s[1] for s in g_shapes])
if not np.all(g_s == g_s[0]):
raise YTInconsistentGridFieldShape(g_shapes)
if grid_dims is not None:
if not np.all(g_s == grid_dims):
raise YTInconsistentGridFieldShapeGridDims(g_shapes, grid_dims)
if len(p_shapes) > 0:
for ptype, p_shape in p_shapes.items():
p_s = np.array([s[1] for s in p_shape])
if not np.all(p_s == p_s[0]):
raise YTInconsistentParticleFieldShape(ptype, p_shape)
# Now that we know the particle fields are consistent, determine the number
# of particles.
if len(p_shapes) > 0:
number_of_particles = np.sum([s[0][1] for s in p_shapes.values()])
else:
number_of_particles = 0
return field_units, data, number_of_particles
def load_uniform_grid(data, domain_dimensions, length_unit=None, bbox=None,
nprocs=1, sim_time=0.0, mass_unit=None, time_unit=None,
velocity_unit=None, magnetic_unit=None,
periodicity=(True, True, True),
geometry="cartesian", unit_system="cgs"):
r"""Load a uniform grid of data into yt as a
:class:`~yt.frontends.stream.data_structures.StreamHandler`.
This should allow a uniform grid of data to be loaded directly into yt and
analyzed as would any others. This comes with several caveats:
* Units will be incorrect unless the unit system is explicitly
specified.
* Some functions may behave oddly, and parallelism will be
disappointing or non-existent in most cases.
* Particles may be difficult to integrate.
Particle fields are detected as one-dimensional fields.
Parameters
----------
data : dict
This is a dict of numpy arrays or (numpy array, unit spec) tuples.
The keys are the field names.
domain_dimensions : array_like
This is the domain dimensions of the grid
length_unit : string
Unit to use for lengths. Defaults to unitless.
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units specified by length_unit.
Defaults to a cubic unit-length domain.
nprocs: integer, optional
If greater than 1, will create this number of subarrays out of data
sim_time : float, optional
The simulation time in seconds
mass_unit : string
Unit to use for masses. Defaults to unitless.
time_unit : string
Unit to use for times. Defaults to unitless.
velocity_unit : string
Unit to use for velocities. Defaults to unitless.
magnetic_unit : string
Unit to use for magnetic fields. Defaults to unitless.
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
geometry : string or tuple
"cartesian", "cylindrical", "polar", "spherical", "geographic" or
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
Examples
--------
>>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
>>> arr = np.random.random((128, 128, 128))
>>> data = dict(density=arr)
>>> ds = load_uniform_grid(data, arr.shape, length_unit='cm',
... bbox=bbox, nprocs=12)
>>> dd = ds.all_data()
>>> dd['density']
YTArray([ 0.87568064, 0.33686453, 0.70467189, ..., 0.70439916,
0.97506269, 0.03047113]) g/cm**3
"""
domain_dimensions = np.array(domain_dimensions)
if bbox is None:
bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
domain_left_edge = np.array(bbox[:, 0], 'float64')
domain_right_edge = np.array(bbox[:, 1], 'float64')
grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
# If someone included this throw it away--old API
if "number_of_particles" in data:
issue_deprecation_warning("It is no longer necessary to include "
"the number of particles in the data "
"dict. The number of particles is "
"determined from the sizes of the "
"particle fields.")
data.pop("number_of_particles")
# First we fix our field names, apply units to data
# and check for consistency of field shapes
field_units, data, number_of_particles = process_data(
data, grid_dims=tuple(domain_dimensions))
sfh = StreamDictFieldHandler()
if number_of_particles > 0:
particle_types = set_particle_types(data)
# Used much further below.
pdata = {"number_of_particles": number_of_particles}
for key in list(data.keys()):
if len(data[key].shape) == 1 or key[0] == 'io':
if not isinstance(key, tuple):
field = ("io", key)
mylog.debug("Reassigning '%s' to '%s'", key, field)
else:
field = key
sfh._additional_fields += (field,)
pdata[field] = data.pop(key)
else:
particle_types = {}
if nprocs > 1:
temp = {}
new_data = {}
for key in data.keys():
psize = get_psize(np.array(data[key].shape), nprocs)
grid_left_edges, grid_right_edges, shapes, slices = \
decompose_array(data[key].shape, psize, bbox)
grid_dimensions = np.array([shape for shape in shapes],
dtype="int32")
temp[key] = [data[key][slice] for slice in slices]
for gid in range(nprocs):
new_data[gid] = {}
for key in temp.keys():
new_data[gid].update({key:temp[key][gid]})
sfh.update(new_data)
del new_data, temp
else:
sfh.update({0:data})
grid_left_edges = domain_left_edge
grid_right_edges = domain_right_edge
grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32")
if length_unit is None:
length_unit = 'code_length'
if mass_unit is None:
mass_unit = 'code_mass'
if time_unit is None:
time_unit = 'code_time'
if velocity_unit is None:
velocity_unit = 'code_velocity'
if magnetic_unit is None:
magnetic_unit = 'code_magnetic'
handler = StreamHandler(
grid_left_edges,
grid_right_edges,
grid_dimensions,
grid_levels,
-np.ones(nprocs, dtype='int64'),
np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # particle count
np.zeros(nprocs).reshape((nprocs,1)),
sfh,
field_units,
(length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
particle_types=particle_types,
periodicity=periodicity
)
handler.name = "UniformGridData"
handler.domain_left_edge = domain_left_edge
handler.domain_right_edge = domain_right_edge
handler.refine_by = 2
if np.all(domain_dimensions[1:] == 1):
dimensionality = 1
elif domain_dimensions[2] == 1:
dimensionality = 2
else:
dimensionality = 3
handler.dimensionality = dimensionality
handler.domain_dimensions = domain_dimensions
handler.simulation_time = sim_time
handler.cosmology_simulation = 0
sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system)
# Now figure out where the particles go
if number_of_particles > 0:
# This will update the stream handler too
assign_particle_data(sds, pdata, bbox)
return sds
def load_amr_grids(grid_data, domain_dimensions,
bbox=None, sim_time=0.0, length_unit=None,
mass_unit=None, time_unit=None, velocity_unit=None,
magnetic_unit=None, periodicity=(True, True, True),
geometry="cartesian", refine_by=2, unit_system="cgs"):
r"""Load a set of grids of data into yt as a
:class:`~yt.frontends.stream.data_structures.StreamHandler`.
This should allow a sequence of grids of varying resolution of data to be
loaded directly into yt and analyzed as would any others. This comes with
several caveats:
* Units will be incorrect unless the unit system is explicitly specified.
* Some functions may behave oddly, and parallelism will be
disappointing or non-existent in most cases.
* Particles may be difficult to integrate.
* No consistency checks are performed on the index
Parameters
----------
grid_data : list of dicts
This is a list of dicts. Each dict must have entries "left_edge",
"right_edge", "dimensions", "level", and then any remaining entries are
assumed to be fields. Field entries must map to an NDArray. The grid_data
may also include a particle count. If no particle count is supplied, the
dataset is understood to contain no particles. The grid_data will be
modified in place and can't be assumed to be static.
domain_dimensions : array_like
This is the domain dimensions of the grid
length_unit : string or float
Unit to use for lengths. Defaults to unitless. If set to be a string, the bbox
dimensions are assumed to be in the corresponding units. If set to a float, the
value is a assumed to be the conversion from bbox dimensions to centimeters.
mass_unit : string or float
Unit to use for masses. Defaults to unitless.
time_unit : string or float
Unit to use for times. Defaults to unitless.
velocity_unit : string or float
Unit to use for velocities. Defaults to unitless.
magnetic_unit : string or float
Unit to use for magnetic fields. Defaults to unitless.
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units specified by length_unit.
Defaults to a cubic unit-length domain.
sim_time : float, optional
The simulation time in seconds
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
geometry : string or tuple
"cartesian", "cylindrical", "polar", "spherical", "geographic" or
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
refine_by : integer or list/array of integers.
Specifies the refinement ratio between levels. Defaults to 2. This
can be an array, in which case it specifies for each dimension. For
instance, this can be used to say that some datasets have refinement of
1 in one dimension, indicating that they span the full range in that
dimension.
Examples
--------
>>> grid_data = [
... dict(left_edge = [0.0, 0.0, 0.0],
... right_edge = [1.0, 1.0, 1.],
... level = 0,
... dimensions = [32, 32, 32],
... number_of_particles = 0),
... dict(left_edge = [0.25, 0.25, 0.25],
... right_edge = [0.75, 0.75, 0.75],
... level = 1,
... dimensions = [32, 32, 32],
... number_of_particles = 0)
... ]
...
>>> for g in grid_data:
... g["density"] = (np.random.random(g["dimensions"])*2**g["level"], "g/cm**3")
...
>>> ds = load_amr_grids(grid_data, [32, 32, 32], length_unit=1.0)
"""
domain_dimensions = np.array(domain_dimensions)
ngrids = len(grid_data)
if bbox is None:
bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
domain_left_edge = np.array(bbox[:, 0], 'float64')
domain_right_edge = np.array(bbox[:, 1], 'float64')
grid_levels = np.zeros((ngrids, 1), dtype='int32')
grid_left_edges = np.zeros((ngrids, 3), dtype="float64")
grid_right_edges = np.zeros((ngrids, 3), dtype="float64")
grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
number_of_particles = np.zeros((ngrids,1), dtype='int64')
parent_ids = np.zeros(ngrids, dtype="int64") - 1
sfh = StreamDictFieldHandler()
for i, g in enumerate(grid_data):
grid_left_edges[i,:] = g.pop("left_edge")
grid_right_edges[i,:] = g.pop("right_edge")
grid_dimensions[i,:] = g.pop("dimensions")
grid_levels[i,:] = g.pop("level")
# If someone included this throw it away--old API
if "number_of_particles" in g:
issue_deprecation_warning("It is no longer necessary to include "
"the number of particles in the data "
"dict. The number of particles is "
"determined from the sizes of the "
"particle fields.")
g.pop("number_of_particles")
field_units, data, n_particles = process_data(
g, grid_dims=tuple(grid_dimensions[i,:]))
number_of_particles[i, :] = n_particles
sfh[i] = data
# We now reconstruct our parent ids, so that our particle assignment can
# proceed.
mask = np.empty(ngrids, dtype='int32')
for gi in range(ngrids):
get_box_grids_level(grid_left_edges[gi,:],
grid_right_edges[gi,:],
grid_levels[gi] + 1,
grid_left_edges, grid_right_edges,
grid_levels, mask)
ids = np.where(mask.astype("bool"))
for ci in ids:
parent_ids[ci] = gi
# Check if the grid structure is properly aligned (bug #1295)
for lvl in range(grid_levels.min() + 1, grid_levels.max() + 1):
idx = grid_levels.flatten() == lvl
dims = domain_dimensions * refine_by ** (lvl - 1)
for iax, ax in enumerate('xyz'):
cell_edges = np.linspace(domain_left_edge[iax],
domain_right_edge[iax],
dims[iax], endpoint=False)
if set(grid_left_edges[idx, iax]) - set(cell_edges):
raise YTIllDefinedAMR(lvl, ax)
if length_unit is None:
length_unit = 'code_length'
if mass_unit is None:
mass_unit = 'code_mass'
if time_unit is None:
time_unit = 'code_time'
if velocity_unit is None:
velocity_unit = 'code_velocity'
if magnetic_unit is None:
magnetic_unit = 'code_magnetic'
particle_types = {}
for grid in sfh.values():
particle_types.update(set_particle_types(grid))
handler = StreamHandler(
grid_left_edges,
grid_right_edges,
grid_dimensions,
grid_levels,
parent_ids,
number_of_particles,
np.zeros(ngrids).reshape((ngrids,1)),
sfh,
field_units,
(length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
particle_types=particle_types,
periodicity=periodicity
)
handler.name = "AMRGridData"
handler.domain_left_edge = domain_left_edge
handler.domain_right_edge = domain_right_edge
handler.refine_by = refine_by
if np.all(domain_dimensions[1:] == 1):
dimensionality = 1
elif domain_dimensions[2] == 1:
dimensionality = 2
else:
dimensionality = 3
handler.dimensionality = dimensionality
handler.domain_dimensions = domain_dimensions
handler.simulation_time = sim_time
handler.cosmology_simulation = 0
sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system)
return sds
def refine_amr(base_ds, refinement_criteria, fluid_operators, max_level,
callback=None):
r"""Given a base dataset, repeatedly apply refinement criteria and
fluid operators until a maximum level is reached.
Parameters
----------
base_ds : ~yt.data_objects.static_output.Dataset
This is any static output. It can also be a stream static output, for
instance as returned by load_uniform_data.
refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
These criteria will be applied in sequence to identify cells that need
to be refined.
fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
These fluid operators will be applied in sequence to all resulting
grids.
max_level : int
The maximum level to which the data will be refined
callback : function, optional
A function that will be called at the beginning of each refinement
cycle, with the current dataset.
Examples
--------
>>> domain_dims = (32, 32, 32)
>>> data = np.zeros(domain_dims) + 0.25
>>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
>>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
>>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
>>> ds = refine_amr(ug, rc, fo, 5)
"""
# If we have particle data, set it aside for now
number_of_particles = np.sum([grid.NumberOfParticles
for grid in base_ds.index.grids])
if number_of_particles > 0:
pdata = {}
for field in base_ds.field_list:
if not isinstance(field, tuple):
field = ("unknown", field)
fi = base_ds._get_field_info(*field)
if fi.particle_type and field[0] in base_ds.particle_types_raw:
pdata[field] = uconcatenate([grid[field]
for grid in base_ds.index.grids])
pdata["number_of_particles"] = number_of_particles
last_gc = base_ds.index.num_grids
cur_gc = -1
ds = base_ds
bbox = np.array([(ds.domain_left_edge[i], ds.domain_right_edge[i])
for i in range(3)])
while ds.index.max_level < max_level and last_gc != cur_gc:
mylog.info("Refining another level. Current max level: %s",
ds.index.max_level)
last_gc = ds.index.grids.size
for m in fluid_operators: m.apply(ds)
if callback is not None: callback(ds)
grid_data = []
for g in ds.index.grids:
gd = dict(left_edge=g.LeftEdge,
right_edge=g.RightEdge,
level=g.Level,
dimensions=g.ActiveDimensions)
for field in ds.field_list:
if not isinstance(field, tuple):
field = ("unknown", field)
fi = ds._get_field_info(*field)
if not fi.particle_type:
gd[field] = g[field]
grid_data.append(gd)
if g.Level < ds.index.max_level: continue
fg = FlaggingGrid(g, refinement_criteria)
nsg = fg.find_subgrids()
for sg in nsg:
LE = sg.left_index * g.dds + ds.domain_left_edge
dims = sg.dimensions * ds.refine_by
grid = ds.smoothed_covering_grid(g.Level + 1, LE, dims)
gd = dict(left_edge=LE, right_edge=grid.right_edge,
level=g.Level + 1, dimensions=dims)
for field in ds.field_list:
if not isinstance(field, tuple):
field = ("unknown", field)
fi = ds._get_field_info(*field)
if not fi.particle_type:
gd[field] = grid[field]
grid_data.append(gd)
ds = load_amr_grids(grid_data, ds.domain_dimensions, bbox=bbox)
ds.particle_types_raw = base_ds.particle_types_raw
ds.particle_types = ds.particle_types_raw
# Now figure out where the particles go
if number_of_particles > 0:
# This will update the stream handler too
assign_particle_data(ds, pdata, bbox)
cur_gc = ds.index.num_grids
return ds
class StreamParticleIndex(ParticleIndex):
def __init__(self, ds, dataset_type = None):
self.stream_handler = ds.stream_handler
super(StreamParticleIndex, self).__init__(ds, dataset_type)
def _setup_data_io(self):
if self.stream_handler.io is not None:
self.io = self.stream_handler.io
else:
self.io = io_registry[self.dataset_type](self.ds)
class StreamParticleFile(ParticleFile):
pass
class StreamParticlesDataset(StreamDataset):
_index_class = StreamParticleIndex
_file_class = StreamParticleFile
_field_info_class = StreamFieldInfo
_dataset_type = "stream_particles"
file_count = 1
filename_template = "stream_file"
n_ref = 64
over_refine_factor = 1
def load_particles(data, length_unit = None, bbox=None,
sim_time=0.0, mass_unit = None, time_unit = None,
velocity_unit=None, magnetic_unit=None,
periodicity=(True, True, True),
n_ref = 64, over_refine_factor = 1, geometry = "cartesian",
unit_system="cgs"):
r"""Load a set of particles into yt as a
:class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
This should allow a collection of particle data to be loaded directly into
yt and analyzed as would any others. This comes with several caveats:
* There must be sufficient space in memory to contain both the particle
data and the octree used to index the particles.
* Parallelism will be disappointing or non-existent in most cases.
This will initialize an Octree of data. Note that fluid fields will not
work yet, or possibly ever.
Parameters
----------
data : dict
This is a dict of numpy arrays or (numpy array, unit name) tuples,
where the keys are the field names. Particles positions must be named
"particle_position_x", "particle_position_y", and "particle_position_z".
length_unit : float
Conversion factor from simulation length units to centimeters
mass_unit : float
Conversion factor from simulation mass units to grams
time_unit : float
Conversion factor from simulation time units to seconds
velocity_unit : float
Conversion factor from simulation velocity units to cm/s
magnetic_unit : float
Conversion factor from simulation magnetic units to gauss
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units of the length_unit
sim_time : float, optional
The simulation time in seconds
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
n_ref : int
The number of particles that result in refining an oct used for
indexing the particles.
Examples
--------
>>> pos = [np.random.random(128*128*128) for i in range(3)]
>>> data = dict(particle_position_x = pos[0],
... particle_position_y = pos[1],
... particle_position_z = pos[2])
>>> bbox = np.array([[0., 1.0], [0.0, 1.0], [0.0, 1.0]])
>>> ds = load_particles(data, 3.08e24, bbox=bbox)
"""
domain_dimensions = np.ones(3, "int32") * (1 << over_refine_factor)
nprocs = 1
if bbox is None:
bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
else:
bbox = np.array(bbox)
domain_left_edge = np.array(bbox[:, 0], 'float64')
domain_right_edge = np.array(bbox[:, 1], 'float64')
grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
field_units, data, _ = process_data(data)
sfh = StreamDictFieldHandler()
pdata = {}
for key in data.keys() :
if not isinstance(key, tuple):
field = ("io", key)
mylog.debug("Reassigning '%s' to '%s'", key, field)
else:
field = key
pdata[field] = data[key]
sfh._additional_fields += (field,)
data = pdata # Drop reference count
particle_types = set_particle_types(data)
sfh.update({'stream_file':data})
grid_left_edges = domain_left_edge
grid_right_edges = domain_right_edge
grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
if length_unit is None:
length_unit = 'code_length'
if mass_unit is None:
mass_unit = 'code_mass'
if time_unit is None:
time_unit = 'code_time'
if velocity_unit is None:
velocity_unit = 'code_velocity'
if magnetic_unit is None:
magnetic_unit = 'code_magnetic'
# I'm not sure we need any of this.
handler = StreamHandler(
grid_left_edges,
grid_right_edges,
grid_dimensions,
grid_levels,
-np.ones(nprocs, dtype='int64'),
np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
np.zeros(nprocs).reshape((nprocs,1)),
sfh,
field_units,
(length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
particle_types=particle_types,
periodicity=periodicity
)
handler.name = "ParticleData"
handler.domain_left_edge = domain_left_edge
handler.domain_right_edge = domain_right_edge
handler.refine_by = 2
handler.dimensionality = 3
handler.domain_dimensions = domain_dimensions
handler.simulation_time = sim_time
handler.cosmology_simulation = 0
sds = StreamParticlesDataset(handler, geometry=geometry, unit_system=unit_system)
sds.n_ref = n_ref
sds.over_refine_factor = over_refine_factor
return sds
_cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
dtype=np.int64, count = 8*3)
_cis.shape = (8, 3)
def hexahedral_connectivity(xgrid, ygrid, zgrid):
r"""Define the cell coordinates and cell neighbors of a hexahedral mesh
for a semistructured grid. Used to specify the connectivity and
coordinates parameters used in
:func:`~yt.frontends.stream.data_structures.load_hexahedral_mesh`.
Parameters
----------
xgrid : array_like
x-coordinates of boundaries of the hexahedral cells. Should be a
one-dimensional array.
ygrid : array_like
y-coordinates of boundaries of the hexahedral cells. Should be a
one-dimensional array.
zgrid : array_like
z-coordinates of boundaries of the hexahedral cells. Should be a
one-dimensional array.
Returns
-------
coords : array_like
The list of (x,y,z) coordinates of the vertices of the mesh.
Is of size (M,3) where M is the number of vertices.
connectivity : array_like
For each hexahedron h in the mesh, gives the index of each of h's
neighbors. Is of size (N,8), where N is the number of hexahedra.
Examples
--------
>>> xgrid = np.array([-1,-0.25,0,0.25,1])
>>> coords, conn = hexahedral_connectivity(xgrid,xgrid,xgrid)
>>> coords
array([[-1. , -1. , -1. ],
[-1. , -1. , -0.25],
[-1. , -1. , 0. ],
...,
[ 1. , 1. , 0. ],
[ 1. , 1. , 0.25],
[ 1. , 1. , 1. ]])
>>> conn
array([[ 0, 1, 5, 6, 25, 26, 30, 31],
[ 1, 2, 6, 7, 26, 27, 31, 32],
[ 2, 3, 7, 8, 27, 28, 32, 33],
...,
[ 91, 92, 96, 97, 116, 117, 121, 122],
[ 92, 93, 97, 98, 117, 118, 122, 123],
[ 93, 94, 98, 99, 118, 119, 123, 124]])
"""
nx = len(xgrid)
ny = len(ygrid)
nz = len(zgrid)
coords = np.zeros((nx, ny, nz, 3), dtype="float64", order="C")
coords[:,:,:,0] = xgrid[:,None,None]
coords[:,:,:,1] = ygrid[None,:,None]
coords[:,:,:,2] = zgrid[None,None,:]
coords.shape = (nx * ny * nz, 3)
cycle = np.rollaxis(np.indices((nx-1,ny-1,nz-1)), 0, 4)
cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
off = _cis + cycle[:, np.newaxis]
connectivity = np.array(((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2], order='C')
return coords, connectivity
class StreamHexahedralMesh(SemiStructuredMesh):
_connectivity_length = 8
_index_offset = 0
class StreamHexahedralHierarchy(UnstructuredIndex):
def __init__(self, ds, dataset_type = None):
self.stream_handler = ds.stream_handler
super(StreamHexahedralHierarchy, self).__init__(ds, dataset_type)
def _initialize_mesh(self):
coords = self.stream_handler.fields.pop('coordinates')
connect = self.stream_handler.fields.pop('connectivity')
self.meshes = [StreamHexahedralMesh(0,
self.index_filename, connect, coords, self)]
def _setup_data_io(self):
if self.stream_handler.io is not None:
self.io = self.stream_handler.io
else:
self.io = io_registry[self.dataset_type](self.ds)
def _detect_output_fields(self):
self.field_list = list(set(self.stream_handler.get_fields()))
class StreamHexahedralDataset(StreamDataset):
_index_class = StreamHexahedralHierarchy
_field_info_class = StreamFieldInfo
_dataset_type = "stream_hexahedral"
def load_hexahedral_mesh(data, connectivity, coordinates,
length_unit = None, bbox=None, sim_time=0.0,
mass_unit = None, time_unit = None,
velocity_unit = None, magnetic_unit = None,
periodicity=(True, True, True),
geometry = "cartesian", unit_system="cgs"):
r"""Load a hexahedral mesh of data into yt as a
:class:`~yt.frontends.stream.data_structures.StreamHandler`.
This should allow a semistructured grid of data to be loaded directly into
yt and analyzed as would any others. This comes with several caveats:
* Units will be incorrect unless the data has already been converted to
cgs.
* Some functions may behave oddly, and parallelism will be
disappointing or non-existent in most cases.
* Particles may be difficult to integrate.
Particle fields are detected as one-dimensional fields. The number of particles
is set by the "number_of_particles" key in data.
Parameters
----------
data : dict
This is a dict of numpy arrays, where the keys are the field names.
There must only be one. Note that the data in the numpy arrays should
define the cell-averaged value for of the quantity in in the hexahedral
cell.
connectivity : array_like
This should be of size (N,8) where N is the number of zones.
coordinates : array_like
This should be of size (M,3) where M is the number of vertices
indicated in the connectivity matrix.
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units of the length unit.
sim_time : float, optional
The simulation time in seconds
mass_unit : string
Unit to use for masses. Defaults to unitless.
time_unit : string
Unit to use for times. Defaults to unitless.
velocity_unit : string
Unit to use for velocities. Defaults to unitless.
magnetic_unit : string
Unit to use for magnetic fields. Defaults to unitless.
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
geometry : string or tuple
"cartesian", "cylindrical", "polar", "spherical", "geographic" or
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
"""
domain_dimensions = np.ones(3, "int32") * 2
nprocs = 1
if bbox is None:
bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
domain_left_edge = np.array(bbox[:, 0], 'float64')
domain_right_edge = np.array(bbox[:, 1], 'float64')
grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
field_units, data, _ = process_data(data)
sfh = StreamDictFieldHandler()
particle_types = set_particle_types(data)
sfh.update({'connectivity': connectivity,
'coordinates': coordinates,
0: data})
# Simple check for axis length correctness
if len(data) > 0:
fn = list(sorted(data))[0]
array_values = data[fn]
if array_values.size != connectivity.shape[0]:
mylog.error("Dimensions of array must be one fewer than the" +
" coordinate set.")
raise RuntimeError
grid_left_edges = domain_left_edge
grid_right_edges = domain_right_edge
grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
if length_unit is None:
length_unit = 'code_length'
if mass_unit is None:
mass_unit = 'code_mass'
if time_unit is None:
time_unit = 'code_time'
if velocity_unit is None:
velocity_unit = 'code_velocity'
if magnetic_unit is None:
magnetic_unit = 'code_magnetic'
# I'm not sure we need any of this.
handler = StreamHandler(
grid_left_edges,
grid_right_edges,
grid_dimensions,
grid_levels,
-np.ones(nprocs, dtype='int64'),
np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
np.zeros(nprocs).reshape((nprocs,1)),
sfh,
field_units,
(length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
particle_types=particle_types,
periodicity=periodicity
)
handler.name = "HexahedralMeshData"
handler.domain_left_edge = domain_left_edge
handler.domain_right_edge = domain_right_edge
handler.refine_by = 2
handler.dimensionality = 3
handler.domain_dimensions = domain_dimensions
handler.simulation_time = sim_time
handler.cosmology_simulation = 0
sds = StreamHexahedralDataset(handler, geometry=geometry, unit_system=unit_system)
return sds
class StreamOctreeSubset(OctreeSubset):
domain_id = 1
_domain_offset = 1
def __init__(self, base_region, ds, oct_handler, over_refine_factor = 1):
self._num_zones = 1 << (over_refine_factor)
self.field_data = YTFieldData()
self.field_parameters = {}
self.ds = ds
self.oct_handler = oct_handler
self._last_mask = None
self._last_selector_id = None
self._current_particle_type = 'io'
self._current_fluid_type = self.ds.default_fluid_type
self.base_region = base_region
self.base_selector = base_region.selector
def fill(self, content, dest, selector, offset):
# Here we get a copy of the file, which we skip through and read the
# bits we want.
oct_handler = self.oct_handler
cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
selector, self.domain_id, cell_count)
levels[:] = 0
dest.update((field, np.empty(cell_count, dtype="float64"))
for field in content)
# Make references ...
count = oct_handler.fill_level(0, levels, cell_inds, file_inds,
dest, content, offset)
return count
class StreamOctreeHandler(OctreeIndex):
def __init__(self, ds, dataset_type = None):
self.stream_handler = ds.stream_handler
self.dataset_type = dataset_type
super(StreamOctreeHandler, self).__init__(ds, dataset_type)
def _setup_data_io(self):
if self.stream_handler.io is not None:
self.io = self.stream_handler.io
else:
self.io = io_registry[self.dataset_type](self.ds)
def _initialize_oct_handler(self):
header = dict(dims = [1, 1, 1],
left_edge = self.ds.domain_left_edge,
right_edge = self.ds.domain_right_edge,
octree = self.ds.octree_mask,
over_refine = self.ds.over_refine_factor,
partial_coverage = self.ds.partial_coverage)
self.oct_handler = OctreeContainer.load_octree(header)
def _identify_base_chunk(self, dobj):
if getattr(dobj, "_chunk_info", None) is None:
base_region = getattr(dobj, "base_region", dobj)
subset = [StreamOctreeSubset(base_region, self.dataset,
self.oct_handler,
self.ds.over_refine_factor)]
dobj._chunk_info = subset
dobj._current_chunk = list(self._chunk_all(dobj))[0]
def _chunk_all(self, dobj):
oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
yield YTDataChunk(dobj, "all", oobjs, None)
def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
# We actually do not really use the data files except as input to the
# ParticleOctreeSubset.
# This is where we will perform cutting of the Octree and
# load-balancing. That may require a specialized selector object to
# cut based on some space-filling curve index.
for i,og in enumerate(sobjs):
if ngz > 0:
g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
else:
g = og
yield YTDataChunk(dobj, "spatial", [g])
def _chunk_io(self, dobj, cache = True, local_only = False):
oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
for subset in oobjs:
yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
def _setup_classes(self):
dd = self._get_data_reader_dict()
super(StreamOctreeHandler, self)._setup_classes(dd)
def _detect_output_fields(self):
# NOTE: Because particle unions add to the actual field list, without
# having the keys in the field list itself, we need to double check
# here.
fl = set(self.stream_handler.get_fields())
fl.update(set(getattr(self, "field_list", [])))
self.field_list = list(fl)
class StreamOctreeDataset(StreamDataset):
_index_class = StreamOctreeHandler
_field_info_class = StreamFieldInfo
_dataset_type = "stream_octree"
def load_octree(octree_mask, data,
bbox=None, sim_time=0.0, length_unit=None,
mass_unit=None, time_unit=None,
velocity_unit=None, magnetic_unit=None,
periodicity=(True, True, True),
over_refine_factor = 1, partial_coverage = 1,
unit_system="cgs"):
r"""Load an octree mask into yt.
Octrees can be saved out by calling save_octree on an OctreeContainer.
This enables them to be loaded back in.
This will initialize an Octree of data. Note that fluid fields will not
work yet, or possibly ever.
Parameters
----------
octree_mask : np.ndarray[uint8_t]
This is a depth-first refinement mask for an Octree. It should be
of size n_octs * 8 (but see note about the root oct below), where
each item is 1 for an oct-cell being refined and 0 for it not being
refined. For over_refine_factors != 1, the children count will
still be 8, so there will stil be n_octs * 8 entries. Note that if
the root oct is not refined, there will be only one entry
for the root, so the size of the mask will be (n_octs - 1)*8 + 1.
data : dict
A dictionary of 1D arrays. Note that these must of the size of the
number of "False" values in the ``octree_mask``.
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units of length
sim_time : float, optional
The simulation time in seconds
length_unit : string
Unit to use for lengths. Defaults to unitless.
mass_unit : string
Unit to use for masses. Defaults to unitless.
time_unit : string
Unit to use for times. Defaults to unitless.
velocity_unit : string
Unit to use for velocities. Defaults to unitless.
magnetic_unit : string
Unit to use for magnetic fields. Defaults to unitless.
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
partial_coverage : boolean
Whether or not an oct can be refined cell-by-cell, or whether all
8 get refined.
Example
-------
>>> import yt
>>> import numpy as np
>>> oct_mask = [8, 0, 0, 0, 0, 8, 0, 8,
... 0, 0, 0, 0, 0, 0, 0, 0,
... 8, 0, 0, 0, 0, 0, 0, 0,
... 0]
>>>
>>> octree_mask = np.array(oct_mask, dtype=np.uint8)
>>> quantities = {}
>>> quantities['gas', 'density'] = np.random.random((22, 1), dtype='f8')
>>> bbox = np.array([[-10., 10.], [-10., 10.], [-10., 10.]])
>>>
>>> ds = yt.load_octree(octree_mask=octree_mask,
... data=quantities,
... bbox=bbox,
... over_refine_factor=0,
... partial_coverage=0)
"""
if not isinstance(octree_mask, np.ndarray) or octree_mask.dtype != np.uint8:
raise TypeError("octree_mask should be a Numpy array with type uint8")
nz = (1 << (over_refine_factor))
domain_dimensions = np.array([nz, nz, nz])
nprocs = 1
if bbox is None:
bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
domain_left_edge = np.array(bbox[:, 0], 'float64')
domain_right_edge = np.array(bbox[:, 1], 'float64')
grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
field_units, data, _ = process_data(data)
sfh = StreamDictFieldHandler()
particle_types = set_particle_types(data)
sfh.update({0:data})
grid_left_edges = domain_left_edge
grid_right_edges = domain_right_edge
grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
if length_unit is None:
length_unit = 'code_length'
if mass_unit is None:
mass_unit = 'code_mass'
if time_unit is None:
time_unit = 'code_time'
if velocity_unit is None:
velocity_unit = 'code_velocity'
if magnetic_unit is None:
magnetic_unit = 'code_magnetic'
# I'm not sure we need any of this.
handler = StreamHandler(
grid_left_edges,
grid_right_edges,
grid_dimensions,
grid_levels,
-np.ones(nprocs, dtype='int64'),
np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
np.zeros(nprocs).reshape((nprocs,1)),
sfh,
field_units,
(length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
particle_types=particle_types,
periodicity=periodicity
)
handler.name = "OctreeData"
handler.domain_left_edge = domain_left_edge
handler.domain_right_edge = domain_right_edge
handler.refine_by = 2
handler.dimensionality = 3
handler.domain_dimensions = domain_dimensions
handler.simulation_time = sim_time
handler.cosmology_simulation = 0
sds = StreamOctreeDataset(handler, unit_system=unit_system)
sds.octree_mask = octree_mask
sds.partial_coverage = partial_coverage
sds.over_refine_factor = over_refine_factor
return sds
class StreamUnstructuredMesh(UnstructuredMesh):
_index_offset = 0
def __init__(self, *args, **kwargs):
super(StreamUnstructuredMesh, self).__init__(*args, **kwargs)
self._connectivity_length = self.connectivity_indices.shape[1]
class StreamUnstructuredIndex(UnstructuredIndex):
def __init__(self, ds, dataset_type = None):
self.stream_handler = ds.stream_handler
super(StreamUnstructuredIndex, self).__init__(ds, dataset_type)
def _initialize_mesh(self):
coords = ensure_list(self.stream_handler.fields.pop("coordinates"))
connect = ensure_list(self.stream_handler.fields.pop("connectivity"))
self.meshes = [StreamUnstructuredMesh(
i, self.index_filename, c1, c2, self)
for i, (c1, c2) in enumerate(zip(connect, repeat(coords[0])))]
self.mesh_union = MeshUnion("mesh_union", self.meshes)
def _setup_data_io(self):
if self.stream_handler.io is not None:
self.io = self.stream_handler.io
else:
self.io = io_registry[self.dataset_type](self.ds)
def _detect_output_fields(self):
self.field_list = list(set(self.stream_handler.get_fields()))
fnames = list(set([fn for ft, fn in self.field_list]))
self.field_list += [('all', fname) for fname in fnames]
class StreamUnstructuredMeshDataset(StreamDataset):
_index_class = StreamUnstructuredIndex
_field_info_class = StreamFieldInfo
_dataset_type = "stream_unstructured"
def _find_particle_types(self):
pass
def load_unstructured_mesh(connectivity, coordinates, node_data=None,
elem_data=None, length_unit=None, bbox=None,
sim_time=0.0, mass_unit=None, time_unit=None,
velocity_unit=None, magnetic_unit=None,
periodicity=(False, False, False),
geometry = "cartesian", unit_system="cgs"):
r"""Load an unstructured mesh of data into yt as a
:class:`~yt.frontends.stream.data_structures.StreamHandler`.
This should allow an unstructured mesh data to be loaded directly into
yt and analyzed as would any others. Not all functionality for
visualization will be present, and some analysis functions may not yet have
been implemented.
Particle fields are detected as one-dimensional fields. The number of
particles is set by the "number_of_particles" key in data.
In the parameter descriptions below, a "vertex" is a 3D point in space, an
"element" is a single polyhedron whose location is defined by a set of
vertices, and a "mesh" is a set of polyhedral elements, each with the same
number of vertices.
Parameters
----------
connectivity : list of array_like or array_like
This should either be a single 2D array or list of 2D arrays. If this
is a list, each element in the list corresponds to the connectivity
information for a distinct mesh. Each array can have different
connectivity length and should be of shape (N,M) where N is the number
of elements and M is the number of vertices per element.
coordinates : array_like
The 3D coordinates of mesh vertices. This should be of size (L, D) where
L is the number of vertices and D is the number of coordinates per vertex
(the spatial dimensions of the dataset). Currently this must be either 2 or 3.
When loading more than one mesh, the data for each mesh should be concatenated
into a single coordinates array.
node_data : dict or list of dicts
For a single mesh, a dict mapping field names to 2D numpy arrays,
representing data defined at element vertices. For multiple meshes,
this must be a list of dicts. Note that these are not the values as a
function of the coordinates, but of the connectivity. Their shape
should be the same as the connectivity. This means that if the data is
in the shape of the coordinates, you may need to reshape them using the
`connectivity` array as an index.
elem_data : dict or list of dicts
For a single mesh, a dict mapping field names to 1D numpy arrays, where
each array has a length equal to the number of elements. The data
must be defined at the center of each mesh element and there must be
only one data value for each element. For multiple meshes, this must be
a list of dicts, with one dict for each mesh.
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units of the length unit.
sim_time : float, optional
The simulation time in seconds
mass_unit : string
Unit to use for masses. Defaults to unitless.
time_unit : string
Unit to use for times. Defaults to unitless.
velocity_unit : string
Unit to use for velocities. Defaults to unitless.
magnetic_unit : string
Unit to use for magnetic fields. Defaults to unitless.
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
geometry : string or tuple
"cartesian", "cylindrical", "polar", "spherical", "geographic" or
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
Examples
--------
Load a simple mesh consisting of two tets.
>>> # Coordinates for vertices of two tetrahedra
>>> coordinates = np.array([[0.0, 0.0, 0.5], [0.0, 1.0, 0.5],
... [0.5, 1, 0.5], [0.5, 0.5, 0.0],
... [0.5, 0.5, 1.0]])
>>> # The indices in the coordinates array of mesh vertices.
>>> # This mesh has two elements.
>>> connectivity = np.array([[0, 1, 2, 4], [0, 1, 2, 3]])
>>>
>>> # Field data defined at the centers of the two mesh elements.
>>> elem_data = {
... ('connect1', 'elem_field'): np.array([1, 2])
... }
>>>
>>> # Field data defined at node vertices
>>> node_data = {
... ('connect1', 'node_field'): np.array([[0.0, 1.0, 2.0, 4.0],
... [0.0, 1.0, 2.0, 3.0]])
... }
>>>
>>> ds = yt.load_unstructured_mesh(connectivity, coordinates,
... elem_data=elem_data,
... node_data=node_data)
"""
dimensionality = coordinates.shape[1]
domain_dimensions = np.ones(3, "int32") * 2
nprocs = 1
if elem_data is None and node_data is None:
raise RuntimeError("No data supplied in load_unstructured_mesh.")
if isinstance(connectivity, list):
num_meshes = len(connectivity)
else:
num_meshes = 1
connectivity = ensure_list(connectivity)
if elem_data is None:
elem_data = [{} for i in range(num_meshes)]
elem_data = ensure_list(elem_data)
if node_data is None:
node_data = [{} for i in range(num_meshes)]
node_data = ensure_list(node_data)
data = [{} for i in range(num_meshes)]
for elem_dict, data_dict in zip(elem_data, data):
for field, values in elem_dict.items():
data_dict[field] = values
for node_dict, data_dict in zip(node_data, data):
for field, values in node_dict.items():
data_dict[field] = values
data = ensure_list(data)
if bbox is None:
bbox = [[coordinates[:,i].min() - 0.1 * abs(coordinates[:,i].min()),
coordinates[:,i].max() + 0.1 * abs(coordinates[:,i].max())]
for i in range(dimensionality)]
if dimensionality < 3:
bbox.append([0.0, 1.0])
if dimensionality < 2:
bbox.append([0.0, 1.0])
# handle pseudo-dims here
num_pseudo_dims = get_num_pseudo_dims(coordinates)
dimensionality -= num_pseudo_dims
for i in range(dimensionality, 3):
bbox[i][0] = 0.0
bbox[i][1] = 1.0
bbox = np.array(bbox, dtype=np.float64)
domain_left_edge = np.array(bbox[:, 0], 'float64')
domain_right_edge = np.array(bbox[:, 1], 'float64')
grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
field_units = {}
particle_types = {}
sfh = StreamDictFieldHandler()
sfh.update({'connectivity': connectivity,
'coordinates': coordinates})
for i, d in enumerate(data):
_f_unit, _data, _ = process_data(d)
field_units.update(_f_unit)
sfh[i] = _data
particle_types.update(set_particle_types(d))
# Simple check for axis length correctness
if 0 and len(data) > 0:
fn = list(sorted(data))[0]
array_values = data[fn]
if array_values.size != connectivity.shape[0]:
mylog.error("Dimensions of array must be one fewer than the" +
" coordinate set.")
raise RuntimeError
grid_left_edges = domain_left_edge
grid_right_edges = domain_right_edge
grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32")
if length_unit is None:
length_unit = 'code_length'
if mass_unit is None:
mass_unit = 'code_mass'
if time_unit is None:
time_unit = 'code_time'
if velocity_unit is None:
velocity_unit = 'code_velocity'
if magnetic_unit is None:
magnetic_unit = 'code_magnetic'
# I'm not sure we need any of this.
handler = StreamHandler(
grid_left_edges,
grid_right_edges,
grid_dimensions,
grid_levels,
-np.ones(nprocs, dtype='int64'),
np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
np.zeros(nprocs).reshape((nprocs,1)),
sfh,
field_units,
(length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
particle_types=particle_types,
periodicity=periodicity
)
handler.name = "UnstructuredMeshData"
handler.domain_left_edge = domain_left_edge
handler.domain_right_edge = domain_right_edge
handler.refine_by = 2
handler.dimensionality = dimensionality
handler.domain_dimensions = domain_dimensions
handler.simulation_time = sim_time
handler.cosmology_simulation = 0
sds = StreamUnstructuredMeshDataset(handler, geometry=geometry,
unit_system=unit_system)
fluid_types = ['all']
for i in range(1, num_meshes + 1):
fluid_types += ['connect%d' % i]
sds.fluid_types = tuple(fluid_types)
def flatten(l):
return [item for sublist in l for item in sublist]
sds._node_fields = flatten([[f[1] for f in m] for m in node_data if m])
sds._elem_fields = flatten([[f[1] for f in m] for m in elem_data if m])
sds.default_field = [f for f in sds.field_list
if f[0] == 'connect1'][-1]
return sds
|
the-stack_106_14053
|
from sys import setrecursionlimit
setrecursionlimit(10000)
britcoins = [200,100,50,20,10,5,2] #1p coins are considered run off
uscoins = [25,10,5]
def num_possibilities(amount, coins, memo = {}, firstTime = True):
if firstTime == True:
memo = {}
total = 0
for i in range(len(coins)):
if amount - coins[i] >= 0:
try:
extra = memo[amount - coins[i], coins[i]]
except:
extra = num_possibilities(amount-coins[i], coins[i:],memo,False)
memo[amount - coins[i], coins[i]] = extra
total += extra
#Only allow coins equal to or smaller than last denom
#This prevents redundant solutions & inf. recursion
return total + 1 #Account for using all remaining 1p coins and none larger
if __name__ == "__main__":
print(num_possibilities(200,britcoins))
#Just so you know, this does way more than Euler asks
|
the-stack_106_14054
|
from setuptools import setup, find_packages
VERSION = '0.0.0'
with open('VERSION', 'r') as version:
VERSION = version.read().replace("\n", "")
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name="skelebot",
version=VERSION,
description="ML Build Tool",
author="Sean Shookman",
author_email="[email protected]",
packages=find_packages(),
zip_safe=False,
setup_requires=["pytest-runner"],
tests_require=requirements,
install_requires=requirements,
entry_points={
'console_scripts': [
'skelebot = skelebot:main',
],
}
)
|
the-stack_106_14055
|
"""COMMAND : .lovestory"""
import random, re
#from uniborg.util import admin_cmd
import asyncio
from telethon import events
from userbot.events import register
from asyncio import sleep
import time
@register(pattern=".lovestory")
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 14)
#input_str = event.pattern_match.group(1)
#if input_str == "lovestory":
await event.edit("Starting asf")
animation_chars = [
"1 ❤️ love story",
" 😐 😕 \n/👕\ <👗\ \n 👖 /|",
" 😉 😳 \n/👕\ /👗\ \n 👖 /|",
" 😚 😒 \n/👕\ <👗> \n 👖 /|",
" 😍 ☺️ \n/👕\ /👗\ \n 👖 /|",
" 😍 😍 \n/👕\ /👗\ \n 👖 /|",
" 😘 😊 \n /👕\/👗\ \n 👖 /|",
" 😳 😁 \n /|\ /👙\ \n / / |",
"😈 /😰\ \n<|\ 👙 \n /🍆 / |",
"😅 \n/(),✊😮 \n /\ _/\\/|",
"😎 \n/\\_,__😫 \n // // \\",
"😖 \n/\\_,💦_😋 \n // // \\",
" 😭 ☺️ \n /|\ /(👶)\ \n /!\ / \ ",
"The End 😂..."
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 14])
|
the-stack_106_14056
|
import json
def write_jsonl(records, path):
with open(path, "w") as w:
for r in records:
w.write(json.dumps(r, ensure_ascii=False) + "\n")
def read_jsonl(file_name):
records = []
with open(file_name, "r") as r:
for line in r:
record = json.loads(line)
records.append(record)
return records
|
the-stack_106_14057
|
'''
Author: He,Yifan
Date: 2022-02-18 16:06:00
LastEditors: He,Yifan
LastEditTime: 2022-02-18 16:34:39
'''
from typing import Sequence, Set, Dict
import re
from pgsyn.push.type_library import PushTypeLibrary, RESERVED_PSEUDO_STACKS
from pgsyn.push.instruction import Instruction
from pgsyn.push.instructions import core_instructions
class InstructionSet(Dict[str, Instruction]):
"""A collection of Instruction objects stored by name.
Attributes
----------
type_library : PushTypeLibrary, optional
The PushTypeLibrary which denote the PushTypes (and thus stacks)
are supported. Default is None, which corresponds to the core set of types.
register_core : bool, optional
If True, all instructions in the core instruction set will be registered
upon instantiation. Default is False.
strip_docstrings : bool, optional
If True, the docstring attribute of registered instructions will be
removed to reduce memory footprint. Default is True.
"""
def __init__(self,
type_library: PushTypeLibrary = None,
register_core: bool = False,
strip_docstrings: bool = True):
super().__init__()
self.strip_docstrings = strip_docstrings
if type_library is None:
type_library = PushTypeLibrary()
self.type_library = type_library
if register_core:
self.register_core()
def set_type_library(self, type_library: PushTypeLibrary):
"""Set the type library attribute and return self.
Parameters
----------
type_library
PushTypeLibrary to set.
Returns
-------
InstructionSet
A reference to the InstructionSet.
"""
self.type_library = type_library
return self
def register(self, instr: Instruction):
"""Register an Instruction object.
Parameters
----------
instr
Instruction to register.
Returns
-------
InstructionSet
A reference to the InstructionSet.
"""
if self.strip_docstrings and hasattr(instr, "docstring"):
del instr.docstring
if instr.required_stacks() <= self.type_library.supported_stacks():
self[instr.name] = instr
return self
def register_list(self, instrs: Sequence[Instruction]):
"""Register a list of Instruction objects.
Parameters
----------
instrs
List of Instruction objects to register.
Returns
-------
InstructionSet
A reference to the InstructionSet.
"""
for i in instrs:
self.register(i)
return self
def register_core_by_stack(self, include_stacks: Set[str], *, exclude_stacks: Set[str] = None):
"""Register all instructions that make use of the given type name.
Parameters
----------
include_stacks
List of PushType names.
exclude_stacks
List of PushType names.
Returns
-------
InstructionSet
A reference to the InstructionSet.
"""
for i in core_instructions(self.type_library):
req_stacks = i.required_stacks()
if req_stacks <= include_stacks:
if exclude_stacks is None or len(req_stacks & exclude_stacks) == 0:
self.register(i)
return self
def register_core_by_name(self, name_pattern: str):
"""Register all instructions whose name match the given pattern.
Parameters
----------
name_pattern
A regex string.
Returns
-------
InstructionSet
A reference to the InstructionSet.
"""
re_pat = re.compile(name_pattern)
for i in core_instructions(self.type_library):
if re.match(re_pat, i.name) is not None:
self.register(i)
return self
def register_core(self):
"""Register all core instructions defined in pyshgp.
Returns
-------
InstructionSet
A reference to the InstructionSet.
"""
self.register_list(core_instructions(self.type_library))
return self
def unregister(self, instruction_name: str):
"""Unregister an instruction by name.
Parameters
----------
instruction_name
The name of the instruction to unregister.
Returns
-------
InstructionSet
A reference to the InstructionSet.
"""
self.pop(instruction_name, None)
return self
def required_stacks(self) -> Set[str]:
"""Return all stack names used throughout the registered instructions.
Returns
-------
Set[str]
The set of stack names that are used by the registered instructions.
"""
all_types = set()
for instr in self.values():
all_types.update(instr.required_stacks())
return all_types - RESERVED_PSEUDO_STACKS
|
the-stack_106_14058
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Titancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running titancoind with the -rpcbind and -rpcallowip options."""
import socket
import sys
from test_framework.test_framework import TitancoinTestFramework, SkipTest
from test_framework.util import *
from test_framework.netutil import *
class RPCBindTest(TitancoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
self.nodes[0].rpchost = None
self.start_nodes([base_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
raise SkipTest("This test requires at least one non-loopback IPv4 interface.")
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("::1",1))
s.close
except OSError:
raise SkipTest("This test requires IPv6 support.")
self.log.info("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport)
if __name__ == '__main__':
RPCBindTest().main()
|
the-stack_106_14061
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='stream', parent_name='scattercarpet', **kwargs
):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Stream',
data_docs="""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to *50*, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.""",
**kwargs
)
|
the-stack_106_14062
|
from sklearn.feature_extraction.text import TfidfVectorizer as CV
class TfidfVectorizer(CV):
def inverse_transform(self, X):
"""
Note: this method overrides the original one to retain the order of the features passed as argument.
Return a list of words for each document, keeping the order of the transformed words indexes.
"""
self._check_vocabulary()
all_undid = [] # Let's undo that.
for doc in X:
undid_doc = [self.get_feature_names()[i] for i in doc]
all_undid.append(undid_doc)
return all_undid
|
the-stack_106_14063
|
"""
GwList is a class that holds a list of GwSeries objects and
provides methodes for iterating
=======
GwList
======
Object for maintaining a groundwater series iterator
"""
import os
import os.path
import warnings
from pandas import Series, DataFrame
import pandas as pd
import time
from datetime import datetime
import acequia as aq
def headsfiles(srcdir=None,srctype=None,loclist=None):
"""Return list of sourcefiles in directory
Parameters
----------
srcdir : str
directory with sourcefiles
srctype : {'dinocsv','json'}, optional
sourcefiletype
loclist : list, optional
list of strings with location names
Return
------
pd.DataFrame with series sourcefilelist
"""
gws = aq.GwList(srcdir=srcdir,srctype=srctype,loclist=loclist)
return gws.filetable()
class GwList():
"""Holds a list of GwSeries objects
Parameters
----------
srcdir : str
directory with groundwater head sourcefiles
srctype : {'dinocsv','json','hymon'}, optional
sourcefiletype
loclist : list, optional
list of location names
srcfile : str, optional
path to file with paths to sourcefiles
(if srcfile is given, srcdir is ignored)
Examples
--------
Create GwList object and load multiple sourcefiles:
>>>gwl = GwList(srcdir=<sourcedir>,srctype='dinocsv',loclist=<mylist>)
>>>gwl = GwList(srcdir=<sourcedir>,srctype='json',loclist=<mylist>)
>>>gwl = GwList(srcfile=<filepath>,srctype=<'json' or ' dinocsv'>)
>>>gwl = GwList(srfile=<hydromonitor csv export>,srctype='hymon')
Return table with location properties:
>>>locp = gwl.locprops()
Return list of soourcefiles in <srcdir> of type ' dinocsv':
>>>gwl.filelist()
Notes
-----
When only srcdir is given, result will be a list of
GwSeries objects for all sourcefiles in srcdir.
When both srcdir and srcfile are given, all files from srcdir will
be selected and srcfile will be ignored.
When loclist is given, names in this list will be used for
selecting files in srcdir. All series that belong to a location
will be selected as seperate series. For managing series that
belong to one location, us the GwLocs object.
"""
_valid_srctype = ['dinocsv','json','hymon','waterweb']
def __repr__(self):
mylen = len(self)
return (f'{self.__class__.__name__}({mylen})')
def __init__(self,srcdir=None,srctype='dinocsv',loclist=None,
srcfile=None):
"""Return GwList object"""
self._srcdir = srcdir
self._srctype = srctype
self._loclist = loclist
self._srcfile = srcfile
self._itercount = 0
if self._srctype not in self._valid_srctype:
raise ValueError((f'{self._srctype} is not a valid ')
(f'sourcefile type. Valid sourcefiletypes are ')
(f'{self._valid_srctype}'))
if (self._srcdir is None) and (self._srcfile is None):
raise ValueError((f'At least one of parameters srcdir or ')
(f'srclist must be given.'))
if (self._srcdir is not None) and (self._srcfile is not None):
self._srcfile = None # given value for srcfile is ignored!
warnings.warn((f'Ambigious combination of parameter values: ')
(f'srcdir is {self._srcdir} (not None) and srcfile is ')
(f'{self._srcfile} (not None). Given value for srcfile ')
(f'will be ignored.'))
##logger.warning(msg)
if self._srcdir is not None:
if not os.path.isdir(self._srcdir):
raise ValueError(f'Directory {_srcdir} does not exist')
self._flist = self.filetable() #_sourcefiles()
if (self._srcfile is not None) and (
self._srctype in ['dinocsv','json']):
if not os.path.exists(self._srcfile):
raise ValueError(f'Filepath {self._srcfile} does not exist.')
self._flist = self.filetable()
if (self._srcfile is not None) and (self._srctype=='hymon'):
self.hm = aq.HydroMonitor.from_csv(filepath=srcfile)
if (self._srcfile is not None) and (self._srctype=='waterweb'):
self._wwn = aq.WaterWeb.read_csv(srcfile,network=None)
def filetable(self):
""" Return list of sourcefile names """
if (self._srcdir is not None) and (self._srctype
in ['dinocsv','json']):
return self._sourcefiles()
if (self._srcfile is not None) and (self._srctype in ['dinocsv','json']):
ftime = datetime.fromtimestamp(os.path.getmtime(self._srcfile))
fileage = datetime.now()-ftime
if fileage.days > 1:
msg = f'Age of {self._srcfile} is {fileage.days} days.'
warnings.warn(msg)
#TODO: check if flist contains valid sourcefilenames
## flist must be a list of sourcefilesnames
flist = pd.read_csv(self._srcfile,index_col=False)
return flist
if (self._srcfile is not None) and (self._srctype=='hymon'):
msg = ' '.join([
f'function filelist() not supported for sourcetype',
f'\'hymon\' ',])
warnings.warn(msg)
##logger.warning(msg)
return None
warnings.warn((f'Unexpected combination of given parameters. ')
(f'No list of GwSeries objects is returned.'))
##logger.warning(msg)
return None
def __iter__(self):
""" return iterator """
return self
def __next__(self):
""" return next gwseries object in list """
if self._itercount >= self.__len__():
self._itercount = 0
raise StopIteration
if self._srctype == 'dinocsv':
idx = self._flist.index[self._itercount]
filename = self._flist.at[self._itercount,'path']
self.gw = aq.GwSeries.from_dinogws(filename)
if self._srctype == 'json':
idx = self._flist.index[self._itercount]
filename = self._flist.at[idx,'path']
self.gw = aq.GwSeries.from_json(filename)
if self._srctype == 'hymon':
self.gw = next(self.hm)
if self._srctype == 'waterweb':
srname = self._wwn.srnames()[self._itercount]
self.gw = self._wwn.gwseries(srname)
self._itercount += 1
return self.gw
def __len__(self):
if self._srctype in ['dinocsv','json']:
return len(self._flist)
if self._srctype in ['hymon']:
return len(self.hm)
if self._srctype=='waterweb':
return len(self._wwn)
def is_callable(self):
"""Return True if object is waiting for a call"""
if self._itercount==0:
is_callable = True
else:
is_callable = False
return is_callable
def _sourcefiles(self):
""" return list of sourcefiles in directory dir"""
if self._srctype=='dinocsv':
pathlist = aq.listdir(self._srcdir, filetype='csv')
filelist = [os.path.split(path)[-1] for path in pathlist if path.split('_')[-1].endswith('1.csv')]
dnfiles = pd.DataFrame({"file":filelist})
dnfiles["loc"] = dnfiles["file"].apply(lambda x:x[0:8])
dnfiles["fil"] = dnfiles["file"].apply(
lambda x:x[8:11].lstrip("0"))
dnfiles["kaartblad"] = dnfiles["loc"].apply(lambda x:x[1:4])
dnfiles["series"]= dnfiles["loc"]+"_"+dnfiles["fil"]
dnfiles["path"]= dnfiles["file"].apply(lambda x:self._srcdir+x)
if self._loclist is not None:
mask = dnfiles['loc'].isin(self._loclist)
dnfiles = dnfiles[mask].reset_index(drop=True)
return dnfiles
if self._srctype=='json':
files = aq.listdir(self._srcdir, filetype='json')
jsf = pd.DataFrame({"file":files})
jsf["series"]= jsf["file"].apply(lambda x:x.split('.')[0])
jsf["loc"] = jsf["series"].apply(lambda x:x.split('_')[0])
jsf["fil"] = jsf["series"].apply(
lambda x:x.split('_')[-1].lstrip("0"))
jsf["path"]= jsf['file'].apply(lambda x:os.path.join(
self._srcdir,x))
if self._loclist is not None:
mask = jsf['loc'].isin(self._loclist)
jsf = jsf[mask].reset_index(drop=True)
return jsf
def gwseries(self,srname):
""" Return single named GwSeries object from list
Parameters
----------
srname : str
series name of requested GwSeries object
Returns
-------
acequia.GwSeries object
"""
if self._srctype in ['dinocsv','json']:
row = self._flist[self._flist['series']==srname]
indexval = row.index.values[0]
filepath = self._flist.loc[indexval,'path']
if self._srctype=='json':
gw = aq.GwSeries.from_json(filepath)
if self._srctype=='dinocsv':
gw = aq.GwSeries.from_dinogws(filepath)
if self._srctype=='hymon':
for gw in self.hm:
if gw.name==srname:
break
if self._srctype=='waterweb':
gw = self._wwn.gwseries(srname)
return gw
|
the-stack_106_14064
|
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Reading the file
data=pd.read_csv(path)
#Code starts here
# Step 1
#Reading the file
#Creating a new variable to store the value counts
loan_status = data['Loan_Status'].value_counts()
#Plotting bar plot
plt.bar(loan_status.index, loan_status)
plt.show()
# Step 2
#Plotting an unstacked bar plot
property_and_loan = data.groupby(['Property_Area','Loan_Status'])
property_and_loan.size().unstack()
property_and_loan.plot(kind='bar', stacked=False, figsize=(15,10))
#Changing the x-axis label
plt.xlabel('Property_Area')
#Changing the y-axis label
plt.ylabel('Loan Status')
#Rotating the ticks of X-axis
plt.xticks(rotation=45)
# Step 3
#Plotting a stacked bar plot
education_and_loan = data.groupby(['Education','Loan_Status'])
education_and_loan = education_and_loan.size().unstack()
education_and_loan.plot(kind='bar',stacked=True, figsize=(15,10))
#Changing the x-axis label
plt.xlabel('Education Status')
#Changing the y-axis label
plt.ylabel('Loan Status')
#Rotating the ticks of X-axis
plt.xticks(rotation=45)
# Step 4
#Subsetting the dataframe based on 'Education' column
graduate = data[data['Education'] == 'Graduate']
#Subsetting the dataframe based on 'Education' column
not_graduate = data[data['Education'] == 'Not Graduate']
#Plotting density plot for 'Graduate'
graduate['LoanAmount'].plot(kind='density', label='Graduate')
#Plotting density plot for 'Graduate'
not_graduate['LoanAmount'].plot(kind='density', label='Not Graduate')
#For automatic legend display
plt.legend()
# Step 5
#Setting up the subplots
fig ,(ax_1,ax_2,ax_3) = plt.subplots(1,3, figsize=(20,8))
#Plotting scatter plot
ax_1.scatter(data['ApplicantIncome'],data["LoanAmount"])
#Setting the subplot axis title
ax_1.set(title='Applicant Income')
#Plotting scatter plot
ax_2.scatter(data['CoapplicantIncome'],data["LoanAmount"])
#Setting the subplot axis title
ax_2.set(title='Coapplicant Income')
#Creating a new column 'TotalIncome'
data['TotalIncome']= data['ApplicantIncome']+ data['CoapplicantIncome']
#Plotting scatter plot
ax_3.scatter(data['TotalIncome'],data["LoanAmount"])
#Setting the subplot axis title
ax_3.set(title='Total Income')
#Plotting bar plot
# Step 2
#Plotting an unstacked bar plot
#Changing the x-axis label
#Changing the y-axis label
#Rotating the ticks of X-axis
# Step 3
#Plotting a stacked bar plot
#Changing the x-axis label
#Changing the y-axis label
#Rotating the ticks of X-axis
# Step 4
#Subsetting the dataframe based on 'Education' column
#Subsetting the dataframe based on 'Education' column
#Plotting density plot for 'Graduate'
#Plotting density plot for 'Graduate'
#For automatic legend display
# Step 5
#Setting up the subplots
#Plotting scatter plot
#Setting the subplot axis title
#Plotting scatter plot
#Setting the subplot axis title
#Creating a new column 'TotalIncome'
#Plotting scatter plot
#Setting the subplot axis title
|
the-stack_106_14065
|
"""SCons.Tool.pdflatex
Tool-specific initialization for pdflatex.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "/home/scons/scons/branch.0/branch.96/baseline/src/engine/SCons/Tool/pdflatex.py 0.96.93.D001 2006/11/06 08:31:54 knight"
import SCons.Action
import SCons.Util
import SCons.Tool.pdf
import SCons.Tool.tex
PDFLaTeXAction = None
def PDFLaTeXAuxFunction(target = None, source= None, env=None):
SCons.Tool.tex.InternalLaTeXAuxAction( PDFLaTeXAction, target, source, env )
PDFLaTeXAuxAction = None
def generate(env):
"""Add Builders and construction variables for pdflatex to an Environment."""
global PDFLaTeXAction
if PDFLaTeXAction is None:
PDFLaTeXAction = SCons.Action.Action('$PDFLATEXCOM', '$PDFLATEXCOMSTR')
global PDFLaTeXAuxAction
if PDFLaTeXAuxAction is None:
PDFLaTeXAuxAction = SCons.Action.Action(PDFLaTeXAuxFunction,
strfunction=None)
import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.ltx', PDFLaTeXAuxAction)
bld.add_action('.latex', PDFLaTeXAuxAction)
bld.add_emitter('.ltx', SCons.Tool.tex.tex_emitter)
bld.add_emitter('.latex', SCons.Tool.tex.tex_emitter)
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('')
env['PDFLATEXCOM'] = '$PDFLATEX $PDFLATEXFLAGS $SOURCE'
env['LATEXRETRIES'] = 3
def exists(env):
return env.Detect('pdflatex')
|
the-stack_106_14066
|
# USAGE
# python encode_faces.py --dataset dataset --encodings encodings.pickle
# import the necessary packages
from imutils import paths
import face_recognition
import argparse
import pickle
import cv2
import os
print("[INFO] quantifying faces...")
imagePaths = list(paths.list_images("dataset"))
# initialize the list of known encodings and known names
knownEncodings = []
knownNames = []
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
# extract the person name from the image path
print("[INFO] processing image {}/{}".format(i + 1,
len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
# load the input image and convert it from RGB (OpenCV ordering)
# to dlib ordering (RGB)
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input image
boxes = face_recognition.face_locations(rgb,
model="hog")
# compute the facial embedding for the face
encodings = face_recognition.face_encodings(rgb, boxes)
# loop over the encodings
for encoding in encodings:
# add each encoding + name to our set of known names and
# encodings
knownEncodings.append(encoding)
knownNames.append(name)
# dump the facial encodings + names to disk
print("[INFO] serializing encodings...")
data = {"encodings": knownEncodings, "names": knownNames}
f = open("encoding.pickle", "wb")
f.write(pickle.dumps(data))
f.close()
|
the-stack_106_14068
|
# client.py -- Implementation of the client side git protocols
# Copyright (C) 2008-2013 Jelmer Vernooij <[email protected]>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Client side support for the Git protocol.
The Dulwich client supports the following capabilities:
* thin-pack
* multi_ack_detailed
* multi_ack
* side-band-64k
* ofs-delta
* quiet
* report-status
* delete-refs
Known capabilities that are not supported:
* shallow
* no-progress
* include-tag
"""
from contextlib import closing
from io import BytesIO, BufferedReader
import select
import socket
import subprocess
import sys
try:
from urllib import quote as urlquote
from urllib import unquote as urlunquote
except ImportError:
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import dulwich
from dulwich.errors import (
GitProtocolError,
NotGitRepository,
SendPackError,
UpdateRefsError,
)
from dulwich.protocol import (
HangupException,
_RBUFSIZE,
agent_string,
capability_agent,
extract_capability_names,
CAPABILITY_AGENT,
CAPABILITY_DELETE_REFS,
CAPABILITY_MULTI_ACK,
CAPABILITY_MULTI_ACK_DETAILED,
CAPABILITY_OFS_DELTA,
CAPABILITY_QUIET,
CAPABILITY_REPORT_STATUS,
CAPABILITY_SHALLOW,
CAPABILITY_SYMREF,
CAPABILITY_SIDE_BAND_64K,
CAPABILITY_THIN_PACK,
CAPABILITIES_REF,
KNOWN_RECEIVE_CAPABILITIES,
KNOWN_UPLOAD_CAPABILITIES,
COMMAND_DEEPEN,
COMMAND_SHALLOW,
COMMAND_UNSHALLOW,
COMMAND_DONE,
COMMAND_HAVE,
COMMAND_WANT,
SIDE_BAND_CHANNEL_DATA,
SIDE_BAND_CHANNEL_PROGRESS,
SIDE_BAND_CHANNEL_FATAL,
PktLineParser,
Protocol,
ProtocolFile,
TCP_GIT_PORT,
ZERO_SHA,
extract_capabilities,
parse_capability,
)
from dulwich.pack import (
write_pack_data,
write_pack_objects,
)
from dulwich.refs import (
read_info_refs,
ANNOTATED_TAG_SUFFIX,
)
class InvalidWants(Exception):
"""Invalid wants."""
def __init__(self, wants):
Exception.__init__(
self,
"requested wants not in server provided refs: %r" % wants)
def _fileno_can_read(fileno):
"""Check if a file descriptor is readable.
"""
return len(select.select([fileno], [], [], 0)[0]) > 0
def _win32_peek_avail(handle):
"""Wrapper around PeekNamedPipe to check how many bytes are available.
"""
from ctypes import byref, wintypes, windll
c_avail = wintypes.DWORD()
c_message = wintypes.DWORD()
success = windll.kernel32.PeekNamedPipe(
handle, None, 0, None, byref(c_avail),
byref(c_message))
if not success:
raise OSError(wintypes.GetLastError())
return c_avail.value
COMMON_CAPABILITIES = [CAPABILITY_OFS_DELTA, CAPABILITY_SIDE_BAND_64K]
UPLOAD_CAPABILITIES = ([CAPABILITY_THIN_PACK, CAPABILITY_MULTI_ACK,
CAPABILITY_MULTI_ACK_DETAILED, CAPABILITY_SHALLOW]
+ COMMON_CAPABILITIES)
RECEIVE_CAPABILITIES = (
[CAPABILITY_REPORT_STATUS, CAPABILITY_DELETE_REFS]
+ COMMON_CAPABILITIES)
class ReportStatusParser(object):
"""Handle status as reported by servers with 'report-status' capability."""
def __init__(self):
self._done = False
self._pack_status = None
self._ref_status_ok = True
self._ref_statuses = []
def check(self):
"""Check if there were any errors and, if so, raise exceptions.
Raises:
SendPackError: Raised when the server could not unpack
UpdateRefsError: Raised when refs could not be updated
"""
if self._pack_status not in (b'unpack ok', None):
raise SendPackError(self._pack_status)
if not self._ref_status_ok:
ref_status = {}
ok = set()
for status in self._ref_statuses:
if b' ' not in status:
# malformed response, move on to the next one
continue
status, ref = status.split(b' ', 1)
if status == b'ng':
if b' ' in ref:
ref, status = ref.split(b' ', 1)
else:
ok.add(ref)
ref_status[ref] = status
# TODO(jelmer): don't assume encoding of refs is ascii.
raise UpdateRefsError(', '.join([
refname.decode('ascii') for refname in ref_status
if refname not in ok]) +
' failed to update', ref_status=ref_status)
def handle_packet(self, pkt):
"""Handle a packet.
Raises:
GitProtocolError: Raised when packets are received after a flush
packet.
"""
if self._done:
raise GitProtocolError("received more data after status report")
if pkt is None:
self._done = True
return
if self._pack_status is None:
self._pack_status = pkt.strip()
else:
ref_status = pkt.strip()
self._ref_statuses.append(ref_status)
if not ref_status.startswith(b'ok '):
self._ref_status_ok = False
def read_pkt_refs(proto):
server_capabilities = None
refs = {}
# Receive refs from server
for pkt in proto.read_pkt_seq():
(sha, ref) = pkt.rstrip(b'\n').split(None, 1)
if sha == b'ERR':
raise GitProtocolError(ref.decode('utf-8', 'replace'))
if server_capabilities is None:
(ref, server_capabilities) = extract_capabilities(ref)
refs[ref] = sha
if len(refs) == 0:
return {}, set([])
if refs == {CAPABILITIES_REF: ZERO_SHA}:
refs = {}
return refs, set(server_capabilities)
class FetchPackResult(object):
"""Result of a fetch-pack operation.
Attributes:
refs: Dictionary with all remote refs
symrefs: Dictionary with remote symrefs
agent: User agent string
"""
_FORWARDED_ATTRS = [
'clear', 'copy', 'fromkeys', 'get', 'has_key', 'items',
'iteritems', 'iterkeys', 'itervalues', 'keys', 'pop', 'popitem',
'setdefault', 'update', 'values', 'viewitems', 'viewkeys',
'viewvalues']
def __init__(self, refs, symrefs, agent, new_shallow=None,
new_unshallow=None):
self.refs = refs
self.symrefs = symrefs
self.agent = agent
self.new_shallow = new_shallow
self.new_unshallow = new_unshallow
def _warn_deprecated(self):
import warnings
warnings.warn(
"Use FetchPackResult.refs instead.",
DeprecationWarning, stacklevel=3)
def __eq__(self, other):
if isinstance(other, dict):
self._warn_deprecated()
return (self.refs == other)
return (self.refs == other.refs and
self.symrefs == other.symrefs and
self.agent == other.agent)
def __contains__(self, name):
self._warn_deprecated()
return name in self.refs
def __getitem__(self, name):
self._warn_deprecated()
return self.refs[name]
def __len__(self):
self._warn_deprecated()
return len(self.refs)
def __iter__(self):
self._warn_deprecated()
return iter(self.refs)
def __getattribute__(self, name):
if name in type(self)._FORWARDED_ATTRS:
self._warn_deprecated()
return getattr(self.refs, name)
return super(FetchPackResult, self).__getattribute__(name)
def __repr__(self):
return "%s(%r, %r, %r)" % (
self.__class__.__name__, self.refs, self.symrefs, self.agent)
def _read_shallow_updates(proto):
new_shallow = set()
new_unshallow = set()
for pkt in proto.read_pkt_seq():
cmd, sha = pkt.split(b' ', 1)
if cmd == COMMAND_SHALLOW:
new_shallow.add(sha.strip())
elif cmd == COMMAND_UNSHALLOW:
new_unshallow.add(sha.strip())
else:
raise GitProtocolError('unknown command %s' % pkt)
return (new_shallow, new_unshallow)
# TODO(durin42): this doesn't correctly degrade if the server doesn't
# support some capabilities. This should work properly with servers
# that don't support multi_ack.
class GitClient(object):
"""Git smart server client."""
def __init__(self, thin_packs=True, report_activity=None, quiet=False):
"""Create a new GitClient instance.
Args:
thin_packs: Whether or not thin packs should be retrieved
report_activity: Optional callback for reporting transport
activity.
"""
self._report_activity = report_activity
self._report_status_parser = None
self._fetch_capabilities = set(UPLOAD_CAPABILITIES)
self._fetch_capabilities.add(capability_agent())
self._send_capabilities = set(RECEIVE_CAPABILITIES)
self._send_capabilities.add(capability_agent())
if quiet:
self._send_capabilities.add(CAPABILITY_QUIET)
if not thin_packs:
self._fetch_capabilities.remove(CAPABILITY_THIN_PACK)
def get_url(self, path):
"""Retrieves full url to given path.
Args:
path: Repository path (as string)
Returns:
Url to path (as string)
"""
raise NotImplementedError(self.get_url)
@classmethod
def from_parsedurl(cls, parsedurl, **kwargs):
"""Create an instance of this client from a urlparse.parsed object.
Args:
parsedurl: Result of urlparse.urlparse()
Returns:
A `GitClient` object
"""
raise NotImplementedError(cls.from_parsedurl)
def send_pack(self, path, update_refs, generate_pack_data,
progress=None):
"""Upload a pack to a remote repository.
Args:
path: Repository path (as bytestring)
update_refs: Function to determine changes to remote refs. Receive
dict with existing remote refs, returns dict with
changed refs (name -> sha, where sha=ZERO_SHA for deletions)
generate_pack_data: Function that can return a tuple
with number of objects and list of pack data to include
progress: Optional progress function
Returns:
new_refs dictionary containing the changes that were made
{refname: new_ref}, including deleted refs.
Raises:
SendPackError: if server rejects the pack data
UpdateRefsError: if the server supports report-status
and rejects ref updates
"""
raise NotImplementedError(self.send_pack)
def fetch(self, path, target, determine_wants=None, progress=None,
depth=None):
"""Fetch into a target repository.
Args:
path: Path to fetch from (as bytestring)
target: Target repository to fetch into
determine_wants: Optional function to determine what refs to fetch.
Receives dictionary of name->sha, should return
list of shas to fetch. Defaults to all shas.
progress: Optional progress function
depth: Depth to fetch at
Returns:
Dictionary with all remote refs (not just those fetched)
"""
if determine_wants is None:
determine_wants = target.object_store.determine_wants_all
if CAPABILITY_THIN_PACK in self._fetch_capabilities:
# TODO(jelmer): Avoid reading entire file into memory and
# only processing it after the whole file has been fetched.
f = BytesIO()
def commit():
if f.tell():
f.seek(0)
target.object_store.add_thin_pack(f.read, None)
def abort():
pass
else:
f, commit, abort = target.object_store.add_pack()
try:
result = self.fetch_pack(
path, determine_wants, target.get_graph_walker(), f.write,
progress=progress, depth=depth)
except BaseException:
abort()
raise
else:
commit()
target.update_shallow(result.new_shallow, result.new_unshallow)
return result
def fetch_pack(self, path, determine_wants, graph_walker, pack_data,
progress=None, depth=None):
"""Retrieve a pack from a git smart server.
Args:
path: Remote path to fetch from
determine_wants: Function determine what refs
to fetch. Receives dictionary of name->sha, should return
list of shas to fetch.
graph_walker: Object with next() and ack().
pack_data: Callback called for each bit of data in the pack
progress: Callback for progress reports (strings)
depth: Shallow fetch depth
Returns:
FetchPackResult object
"""
raise NotImplementedError(self.fetch_pack)
def get_refs(self, path):
"""Retrieve the current refs from a git smart server.
Args:
path: Path to the repo to fetch from. (as bytestring)
Returns:
"""
raise NotImplementedError(self.get_refs)
def _parse_status_report(self, proto):
unpack = proto.read_pkt_line().strip()
if unpack != b'unpack ok':
st = True
# flush remaining error data
while st is not None:
st = proto.read_pkt_line()
raise SendPackError(unpack)
statuses = []
errs = False
ref_status = proto.read_pkt_line()
while ref_status:
ref_status = ref_status.strip()
statuses.append(ref_status)
if not ref_status.startswith(b'ok '):
errs = True
ref_status = proto.read_pkt_line()
if errs:
ref_status = {}
ok = set()
for status in statuses:
if b' ' not in status:
# malformed response, move on to the next one
continue
status, ref = status.split(b' ', 1)
if status == b'ng':
if b' ' in ref:
ref, status = ref.split(b' ', 1)
else:
ok.add(ref)
ref_status[ref] = status
raise UpdateRefsError(', '.join([
refname for refname in ref_status if refname not in ok]) +
b' failed to update', ref_status=ref_status)
def _read_side_band64k_data(self, proto, channel_callbacks):
"""Read per-channel data.
This requires the side-band-64k capability.
Args:
proto: Protocol object to read from
channel_callbacks: Dictionary mapping channels to packet
handlers to use. None for a callback discards channel data.
"""
for pkt in proto.read_pkt_seq():
channel = ord(pkt[:1])
pkt = pkt[1:]
try:
cb = channel_callbacks[channel]
except KeyError:
raise AssertionError('Invalid sideband channel %d' % channel)
else:
if cb is not None:
cb(pkt)
def _handle_receive_pack_head(self, proto, capabilities, old_refs,
new_refs):
"""Handle the head of a 'git-receive-pack' request.
Args:
proto: Protocol object to read from
capabilities: List of negotiated capabilities
old_refs: Old refs, as received from the server
new_refs: Refs to change
Returns:
have, want) tuple
"""
want = []
have = [x for x in old_refs.values() if not x == ZERO_SHA]
sent_capabilities = False
for refname in new_refs:
if not isinstance(refname, bytes):
raise TypeError('refname is not a bytestring: %r' % refname)
old_sha1 = old_refs.get(refname, ZERO_SHA)
if not isinstance(old_sha1, bytes):
raise TypeError('old sha1 for %s is not a bytestring: %r' %
(refname, old_sha1))
new_sha1 = new_refs.get(refname, ZERO_SHA)
if not isinstance(new_sha1, bytes):
raise TypeError('old sha1 for %s is not a bytestring %r' %
(refname, new_sha1))
if old_sha1 != new_sha1:
if sent_capabilities:
proto.write_pkt_line(old_sha1 + b' ' + new_sha1 + b' ' +
refname)
else:
proto.write_pkt_line(
old_sha1 + b' ' + new_sha1 + b' ' + refname + b'\0' +
b' '.join(sorted(capabilities)))
sent_capabilities = True
if new_sha1 not in have and new_sha1 != ZERO_SHA:
want.append(new_sha1)
proto.write_pkt_line(None)
return (have, want)
def _negotiate_receive_pack_capabilities(self, server_capabilities):
negotiated_capabilities = (
self._send_capabilities & server_capabilities)
unknown_capabilities = ( # noqa: F841
extract_capability_names(server_capabilities) -
KNOWN_RECEIVE_CAPABILITIES)
# TODO(jelmer): warn about unknown capabilities
return negotiated_capabilities
def _handle_receive_pack_tail(self, proto, capabilities, progress=None):
"""Handle the tail of a 'git-receive-pack' request.
Args:
proto: Protocol object to read from
capabilities: List of negotiated capabilities
progress: Optional progress reporting function
Returns:
"""
if CAPABILITY_SIDE_BAND_64K in capabilities:
if progress is None:
def progress(x):
pass
channel_callbacks = {2: progress}
if CAPABILITY_REPORT_STATUS in capabilities:
channel_callbacks[1] = PktLineParser(
self._report_status_parser.handle_packet).parse
self._read_side_band64k_data(proto, channel_callbacks)
else:
if CAPABILITY_REPORT_STATUS in capabilities:
for pkt in proto.read_pkt_seq():
self._report_status_parser.handle_packet(pkt)
if self._report_status_parser is not None:
self._report_status_parser.check()
def _negotiate_upload_pack_capabilities(self, server_capabilities):
unknown_capabilities = ( # noqa: F841
extract_capability_names(server_capabilities) -
KNOWN_UPLOAD_CAPABILITIES)
# TODO(jelmer): warn about unknown capabilities
symrefs = {}
agent = None
for capability in server_capabilities:
k, v = parse_capability(capability)
if k == CAPABILITY_SYMREF:
(src, dst) = v.split(b':', 1)
symrefs[src] = dst
if k == CAPABILITY_AGENT:
agent = v
negotiated_capabilities = (
self._fetch_capabilities & server_capabilities)
return (negotiated_capabilities, symrefs, agent)
def _handle_upload_pack_head(self, proto, capabilities, graph_walker,
wants, can_read, depth):
"""Handle the head of a 'git-upload-pack' request.
Args:
proto: Protocol object to read from
capabilities: List of negotiated capabilities
graph_walker: GraphWalker instance to call .ack() on
wants: List of commits to fetch
can_read: function that returns a boolean that indicates
whether there is extra graph data to read on proto
depth: Depth for request
Returns:
"""
assert isinstance(wants, list) and isinstance(wants[0], bytes)
proto.write_pkt_line(COMMAND_WANT + b' ' + wants[0] + b' ' +
b' '.join(sorted(capabilities)) + b'\n')
for want in wants[1:]:
proto.write_pkt_line(COMMAND_WANT + b' ' + want + b'\n')
if depth not in (0, None) or getattr(graph_walker, 'shallow', None):
if CAPABILITY_SHALLOW not in capabilities:
raise GitProtocolError(
"server does not support shallow capability required for "
"depth")
for sha in graph_walker.shallow:
proto.write_pkt_line(COMMAND_SHALLOW + b' ' + sha + b'\n')
if depth is not None:
proto.write_pkt_line(COMMAND_DEEPEN + b' ' +
str(depth).encode('ascii') + b'\n')
proto.write_pkt_line(None)
if can_read is not None:
(new_shallow, new_unshallow) = _read_shallow_updates(proto)
else:
new_shallow = new_unshallow = None
else:
new_shallow = new_unshallow = set()
proto.write_pkt_line(None)
have = next(graph_walker)
while have:
proto.write_pkt_line(COMMAND_HAVE + b' ' + have + b'\n')
if can_read is not None and can_read():
pkt = proto.read_pkt_line()
parts = pkt.rstrip(b'\n').split(b' ')
if parts[0] == b'ACK':
graph_walker.ack(parts[1])
if parts[2] in (b'continue', b'common'):
pass
elif parts[2] == b'ready':
break
else:
raise AssertionError(
"%s not in ('continue', 'ready', 'common)" %
parts[2])
have = next(graph_walker)
proto.write_pkt_line(COMMAND_DONE + b'\n')
return (new_shallow, new_unshallow)
def _handle_upload_pack_tail(self, proto, capabilities, graph_walker,
pack_data, progress=None, rbufsize=_RBUFSIZE):
"""Handle the tail of a 'git-upload-pack' request.
Args:
proto: Protocol object to read from
capabilities: List of negotiated capabilities
graph_walker: GraphWalker instance to call .ack() on
pack_data: Function to call with pack data
progress: Optional progress reporting function
rbufsize: Read buffer size
Returns:
"""
pkt = proto.read_pkt_line()
while pkt:
parts = pkt.rstrip(b'\n').split(b' ')
if parts[0] == b'ACK':
graph_walker.ack(parts[1])
if len(parts) < 3 or parts[2] not in (
b'ready', b'continue', b'common'):
break
pkt = proto.read_pkt_line()
if CAPABILITY_SIDE_BAND_64K in capabilities:
if progress is None:
# Just ignore progress data
def progress(x):
pass
self._read_side_band64k_data(proto, {
SIDE_BAND_CHANNEL_DATA: pack_data,
SIDE_BAND_CHANNEL_PROGRESS: progress}
)
else:
while True:
data = proto.read(rbufsize)
if data == b"":
break
pack_data(data)
def check_wants(wants, refs):
"""Check that a set of wants is valid.
Args:
wants: Set of object SHAs to fetch
refs: Refs dictionary to check against
Returns:
"""
missing = set(wants) - {
v for (k, v) in refs.items()
if not k.endswith(ANNOTATED_TAG_SUFFIX)}
if missing:
raise InvalidWants(missing)
def remote_error_from_stderr(stderr):
if stderr is None:
return HangupException()
for l in stderr.readlines():
if l.startswith(b'ERROR: '):
return GitProtocolError(
l[len(b'ERROR: '):].decode('utf-8', 'replace'))
return GitProtocolError(l.decode('utf-8', 'replace'))
return HangupException()
class TraditionalGitClient(GitClient):
"""Traditional Git client."""
DEFAULT_ENCODING = 'utf-8'
def __init__(self, path_encoding=DEFAULT_ENCODING, **kwargs):
self._remote_path_encoding = path_encoding
super(TraditionalGitClient, self).__init__(**kwargs)
def _connect(self, cmd, path):
"""Create a connection to the server.
This method is abstract - concrete implementations should
implement their own variant which connects to the server and
returns an initialized Protocol object with the service ready
for use and a can_read function which may be used to see if
reads would block.
Args:
cmd: The git service name to which we should connect.
path: The path we should pass to the service. (as bytestirng)
"""
raise NotImplementedError()
def send_pack(self, path, update_refs, generate_pack_data,
progress=None):
"""Upload a pack to a remote repository.
Args:
path: Repository path (as bytestring)
update_refs: Function to determine changes to remote refs.
Receive dict with existing remote refs, returns dict with
changed refs (name -> sha, where sha=ZERO_SHA for deletions)
generate_pack_data: Function that can return a tuple with
number of objects and pack data to upload.
progress: Optional callback called with progress updates
Returns:
new_refs dictionary containing the changes that were made
{refname: new_ref}, including deleted refs.
Raises:
SendPackError: if server rejects the pack data
UpdateRefsError: if the server supports report-status
and rejects ref updates
"""
proto, unused_can_read, stderr = self._connect(b'receive-pack', path)
with proto:
try:
old_refs, server_capabilities = read_pkt_refs(proto)
except HangupException:
raise remote_error_from_stderr(stderr)
negotiated_capabilities = \
self._negotiate_receive_pack_capabilities(server_capabilities)
if CAPABILITY_REPORT_STATUS in negotiated_capabilities:
self._report_status_parser = ReportStatusParser()
report_status_parser = self._report_status_parser
try:
new_refs = orig_new_refs = update_refs(dict(old_refs))
except BaseException:
proto.write_pkt_line(None)
raise
if CAPABILITY_DELETE_REFS not in server_capabilities:
# Server does not support deletions. Fail later.
new_refs = dict(orig_new_refs)
for ref, sha in orig_new_refs.items():
if sha == ZERO_SHA:
if CAPABILITY_REPORT_STATUS in negotiated_capabilities:
report_status_parser._ref_statuses.append(
b'ng ' + sha +
b' remote does not support deleting refs')
report_status_parser._ref_status_ok = False
del new_refs[ref]
if new_refs is None:
proto.write_pkt_line(None)
return old_refs
if len(new_refs) == 0 and len(orig_new_refs):
# NOOP - Original new refs filtered out by policy
proto.write_pkt_line(None)
if report_status_parser is not None:
report_status_parser.check()
return old_refs
(have, want) = self._handle_receive_pack_head(
proto, negotiated_capabilities, old_refs, new_refs)
if (not want and
set(new_refs.items()).issubset(set(old_refs.items()))):
return new_refs
pack_data_count, pack_data = generate_pack_data(
have, want,
ofs_delta=(CAPABILITY_OFS_DELTA in negotiated_capabilities))
dowrite = bool(pack_data_count)
dowrite = dowrite or any(old_refs.get(ref) != sha
for (ref, sha) in new_refs.items()
if sha != ZERO_SHA)
if dowrite:
write_pack_data(proto.write_file(), pack_data_count, pack_data)
self._handle_receive_pack_tail(
proto, negotiated_capabilities, progress)
return new_refs
def fetch_pack(self, path, determine_wants, graph_walker, pack_data,
progress=None, depth=None):
"""Retrieve a pack from a git smart server.
Args:
path: Remote path to fetch from
determine_wants: Function determine what refs
to fetch. Receives dictionary of name->sha, should return
list of shas to fetch.
graph_walker: Object with next() and ack().
pack_data: Callback called for each bit of data in the pack
progress: Callback for progress reports (strings)
depth: Shallow fetch depth
Returns:
FetchPackResult object
"""
proto, can_read, stderr = self._connect(b'upload-pack', path)
with proto:
try:
refs, server_capabilities = read_pkt_refs(proto)
except HangupException:
raise remote_error_from_stderr(stderr)
negotiated_capabilities, symrefs, agent = (
self._negotiate_upload_pack_capabilities(
server_capabilities))
if refs is None:
proto.write_pkt_line(None)
return FetchPackResult(refs, symrefs, agent)
try:
wants = determine_wants(refs)
except BaseException:
proto.write_pkt_line(None)
raise
if wants is not None:
wants = [cid for cid in wants if cid != ZERO_SHA]
if not wants:
proto.write_pkt_line(None)
return FetchPackResult(refs, symrefs, agent)
(new_shallow, new_unshallow) = self._handle_upload_pack_head(
proto, negotiated_capabilities, graph_walker, wants, can_read,
depth=depth)
self._handle_upload_pack_tail(
proto, negotiated_capabilities, graph_walker, pack_data,
progress)
return FetchPackResult(
refs, symrefs, agent, new_shallow, new_unshallow)
def get_refs(self, path):
"""Retrieve the current refs from a git smart server.
"""
# stock `git ls-remote` uses upload-pack
proto, _, stderr = self._connect(b'upload-pack', path)
with proto:
try:
refs, _ = read_pkt_refs(proto)
except HangupException:
raise remote_error_from_stderr(stderr)
proto.write_pkt_line(None)
return refs
def archive(self, path, committish, write_data, progress=None,
write_error=None, format=None, subdirs=None, prefix=None):
proto, can_read, stderr = self._connect(b'upload-archive', path)
with proto:
if format is not None:
proto.write_pkt_line(b"argument --format=" + format)
proto.write_pkt_line(b"argument " + committish)
if subdirs is not None:
for subdir in subdirs:
proto.write_pkt_line(b"argument " + subdir)
if prefix is not None:
proto.write_pkt_line(b"argument --prefix=" + prefix)
proto.write_pkt_line(None)
try:
pkt = proto.read_pkt_line()
except HangupException:
raise remote_error_from_stderr(stderr)
if pkt == b"NACK\n":
return
elif pkt == b"ACK\n":
pass
elif pkt.startswith(b"ERR "):
raise GitProtocolError(
pkt[4:].rstrip(b"\n").decode('utf-8', 'replace'))
else:
raise AssertionError("invalid response %r" % pkt)
ret = proto.read_pkt_line()
if ret is not None:
raise AssertionError("expected pkt tail")
self._read_side_band64k_data(proto, {
SIDE_BAND_CHANNEL_DATA: write_data,
SIDE_BAND_CHANNEL_PROGRESS: progress,
SIDE_BAND_CHANNEL_FATAL: write_error})
class TCPGitClient(TraditionalGitClient):
"""A Git Client that works over TCP directly (i.e. git://)."""
def __init__(self, host, port=None, **kwargs):
if port is None:
port = TCP_GIT_PORT
self._host = host
self._port = port
super(TCPGitClient, self).__init__(**kwargs)
@classmethod
def from_parsedurl(cls, parsedurl, **kwargs):
return cls(parsedurl.hostname, port=parsedurl.port, **kwargs)
def get_url(self, path):
netloc = self._host
if self._port is not None and self._port != TCP_GIT_PORT:
netloc += ":%d" % self._port
return urlparse.urlunsplit(("git", netloc, path, '', ''))
def _connect(self, cmd, path):
if not isinstance(cmd, bytes):
raise TypeError(cmd)
if not isinstance(path, bytes):
path = path.encode(self._remote_path_encoding)
sockaddrs = socket.getaddrinfo(
self._host, self._port, socket.AF_UNSPEC, socket.SOCK_STREAM)
s = None
err = socket.error("no address found for %s" % self._host)
for (family, socktype, proto, canonname, sockaddr) in sockaddrs:
s = socket.socket(family, socktype, proto)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
s.connect(sockaddr)
break
except socket.error as e:
err = e
if s is not None:
s.close()
s = None
if s is None:
raise err
# -1 means system default buffering
rfile = s.makefile('rb', -1)
# 0 means unbuffered
wfile = s.makefile('wb', 0)
def close():
rfile.close()
wfile.close()
s.close()
proto = Protocol(rfile.read, wfile.write, close,
report_activity=self._report_activity)
if path.startswith(b"/~"):
path = path[1:]
# TODO(jelmer): Alternative to ascii?
proto.send_cmd(
b'git-' + cmd, path, b'host=' + self._host.encode('ascii'))
return proto, lambda: _fileno_can_read(s), None
class SubprocessWrapper(object):
"""A socket-like object that talks to a subprocess via pipes."""
def __init__(self, proc):
self.proc = proc
if sys.version_info[0] == 2:
self.read = proc.stdout.read
else:
self.read = BufferedReader(proc.stdout).read
self.write = proc.stdin.write
@property
def stderr(self):
return self.proc.stderr
def can_read(self):
if sys.platform == 'win32':
from msvcrt import get_osfhandle
handle = get_osfhandle(self.proc.stdout.fileno())
return _win32_peek_avail(handle) != 0
else:
return _fileno_can_read(self.proc.stdout.fileno())
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
if self.proc.stderr:
self.proc.stderr.close()
self.proc.wait()
def find_git_command():
"""Find command to run for system Git (usually C Git)."""
if sys.platform == 'win32': # support .exe, .bat and .cmd
try: # to avoid overhead
import win32api
except ImportError: # run through cmd.exe with some overhead
return ['cmd', '/c', 'git']
else:
status, git = win32api.FindExecutable('git')
return [git]
else:
return ['git']
class SubprocessGitClient(TraditionalGitClient):
"""Git client that talks to a server using a subprocess."""
@classmethod
def from_parsedurl(cls, parsedurl, **kwargs):
return cls(**kwargs)
git_command = None
def _connect(self, service, path):
if not isinstance(service, bytes):
raise TypeError(service)
if isinstance(path, bytes):
path = path.decode(self._remote_path_encoding)
if self.git_command is None:
git_command = find_git_command()
argv = git_command + [service.decode('ascii'), path]
p = subprocess.Popen(argv, bufsize=0, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
pw = SubprocessWrapper(p)
return (Protocol(pw.read, pw.write, pw.close,
report_activity=self._report_activity),
pw.can_read, p.stderr)
class LocalGitClient(GitClient):
"""Git Client that just uses a local Repo."""
def __init__(self, thin_packs=True, report_activity=None, config=None):
"""Create a new LocalGitClient instance.
Args:
thin_packs: Whether or not thin packs should be retrieved
report_activity: Optional callback for reporting transport
activity.
"""
self._report_activity = report_activity
# Ignore the thin_packs argument
def get_url(self, path):
return urlparse.urlunsplit(('file', '', path, '', ''))
@classmethod
def from_parsedurl(cls, parsedurl, **kwargs):
return cls(**kwargs)
@classmethod
def _open_repo(cls, path):
from dulwich.repo import Repo
if not isinstance(path, str):
path = path.decode(sys.getfilesystemencoding())
return closing(Repo(path))
def send_pack(self, path, update_refs, generate_pack_data,
progress=None):
"""Upload a pack to a remote repository.
Args:
path: Repository path (as bytestring)
update_refs: Function to determine changes to remote refs.
Receive dict with existing remote refs, returns dict with
changed refs (name -> sha, where sha=ZERO_SHA for deletions)
generate_pack_data: Function that can return a tuple
with number of items and pack data to upload.
progress: Optional progress function
Returns:
new_refs dictionary containing the changes that were made
{refname: new_ref}, including deleted refs.
Raises:
SendPackError: if server rejects the pack data
UpdateRefsError: if the server supports report-status
and rejects ref updates
"""
if not progress:
def progress(x):
pass
with self._open_repo(path) as target:
old_refs = target.get_refs()
new_refs = update_refs(dict(old_refs))
have = [sha1 for sha1 in old_refs.values() if sha1 != ZERO_SHA]
want = []
for refname, new_sha1 in new_refs.items():
if (new_sha1 not in have and
new_sha1 not in want and
new_sha1 != ZERO_SHA):
want.append(new_sha1)
if (not want and
set(new_refs.items()).issubset(set(old_refs.items()))):
return new_refs
target.object_store.add_pack_data(
*generate_pack_data(have, want, ofs_delta=True))
for refname, new_sha1 in new_refs.items():
old_sha1 = old_refs.get(refname, ZERO_SHA)
if new_sha1 != ZERO_SHA:
if not target.refs.set_if_equals(
refname, old_sha1, new_sha1):
progress('unable to set %s to %s' %
(refname, new_sha1))
else:
if not target.refs.remove_if_equals(refname, old_sha1):
progress('unable to remove %s' % refname)
return new_refs
def fetch(self, path, target, determine_wants=None, progress=None,
depth=None):
"""Fetch into a target repository.
Args:
path: Path to fetch from (as bytestring)
target: Target repository to fetch into
determine_wants: Optional function determine what refs
to fetch. Receives dictionary of name->sha, should return
list of shas to fetch. Defaults to all shas.
progress: Optional progress function
depth: Shallow fetch depth
Returns:
FetchPackResult object
"""
with self._open_repo(path) as r:
refs = r.fetch(target, determine_wants=determine_wants,
progress=progress, depth=depth)
return FetchPackResult(refs, r.refs.get_symrefs(),
agent_string())
def fetch_pack(self, path, determine_wants, graph_walker, pack_data,
progress=None, depth=None):
"""Retrieve a pack from a git smart server.
Args:
path: Remote path to fetch from
determine_wants: Function determine what refs
to fetch. Receives dictionary of name->sha, should return
list of shas to fetch.
graph_walker: Object with next() and ack().
pack_data: Callback called for each bit of data in the pack
progress: Callback for progress reports (strings)
depth: Shallow fetch depth
Returns:
FetchPackResult object
"""
with self._open_repo(path) as r:
objects_iter = r.fetch_objects(
determine_wants, graph_walker, progress=progress, depth=depth)
symrefs = r.refs.get_symrefs()
agent = agent_string()
# Did the process short-circuit (e.g. in a stateless RPC call)?
# Note that the client still expects a 0-object pack in most cases.
if objects_iter is None:
return FetchPackResult(None, symrefs, agent)
protocol = ProtocolFile(None, pack_data)
write_pack_objects(protocol, objects_iter)
return FetchPackResult(r.get_refs(), symrefs, agent)
def get_refs(self, path):
"""Retrieve the current refs from a git smart server.
"""
with self._open_repo(path) as target:
return target.get_refs()
# What Git client to use for local access
default_local_git_client_cls = LocalGitClient
class SSHVendor(object):
"""A client side SSH implementation."""
def connect_ssh(self, host, command, username=None, port=None,
password=None, key_filename=None):
# This function was deprecated in 0.9.1
import warnings
warnings.warn(
"SSHVendor.connect_ssh has been renamed to SSHVendor.run_command",
DeprecationWarning)
return self.run_command(host, command, username=username, port=port,
password=password, key_filename=key_filename)
def run_command(self, host, command, username=None, port=None,
password=None, key_filename=None):
"""Connect to an SSH server.
Run a command remotely and return a file-like object for interaction
with the remote command.
Args:
host: Host name
command: Command to run (as argv array)
username: Optional ame of user to log in as
port: Optional SSH port to use
password: Optional ssh password for login or private key
key_filename: Optional path to private keyfile
Returns:
"""
raise NotImplementedError(self.run_command)
class StrangeHostname(Exception):
"""Refusing to connect to strange SSH hostname."""
def __init__(self, hostname):
super(StrangeHostname, self).__init__(hostname)
class SubprocessSSHVendor(SSHVendor):
"""SSH vendor that shells out to the local 'ssh' command."""
def run_command(self, host, command, username=None, port=None,
password=None, key_filename=None):
if password is not None:
raise NotImplementedError(
"Setting password not supported by SubprocessSSHVendor.")
args = ['ssh', '-x']
if port:
args.extend(['-p', str(port)])
if key_filename:
args.extend(['-i', str(key_filename)])
if username:
host = '%s@%s' % (username, host)
if host.startswith('-'):
raise StrangeHostname(hostname=host)
args.append(host)
proc = subprocess.Popen(args + [command], bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return SubprocessWrapper(proc)
class PLinkSSHVendor(SSHVendor):
"""SSH vendor that shells out to the local 'plink' command."""
def run_command(self, host, command, username=None, port=None,
password=None, key_filename=None):
if sys.platform == 'win32':
args = ['plink.exe', '-ssh']
else:
args = ['plink', '-ssh']
if password is not None:
import warnings
warnings.warn(
"Invoking PLink with a password exposes the password in the "
"process list.")
args.extend(['-pw', str(password)])
if port:
args.extend(['-P', str(port)])
if key_filename:
args.extend(['-i', str(key_filename)])
if username:
host = '%s@%s' % (username, host)
if host.startswith('-'):
raise StrangeHostname(hostname=host)
args.append(host)
proc = subprocess.Popen(args + [command], bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return SubprocessWrapper(proc)
def ParamikoSSHVendor(**kwargs):
import warnings
warnings.warn(
"ParamikoSSHVendor has been moved to dulwich.contrib.paramiko_vendor.",
DeprecationWarning)
from dulwich.contrib.paramiko_vendor import ParamikoSSHVendor
return ParamikoSSHVendor(**kwargs)
# Can be overridden by users
get_ssh_vendor = SubprocessSSHVendor
class SSHGitClient(TraditionalGitClient):
def __init__(self, host, port=None, username=None, vendor=None,
config=None, password=None, key_filename=None, **kwargs):
self.host = host
self.port = port
self.username = username
self.password = password
self.key_filename = key_filename
super(SSHGitClient, self).__init__(**kwargs)
self.alternative_paths = {}
if vendor is not None:
self.ssh_vendor = vendor
else:
self.ssh_vendor = get_ssh_vendor()
def get_url(self, path):
netloc = self.host
if self.port is not None:
netloc += ":%d" % self.port
if self.username is not None:
netloc = urlquote(self.username, '@/:') + "@" + netloc
return urlparse.urlunsplit(('ssh', netloc, path, '', ''))
@classmethod
def from_parsedurl(cls, parsedurl, **kwargs):
return cls(host=parsedurl.hostname, port=parsedurl.port,
username=parsedurl.username, **kwargs)
def _get_cmd_path(self, cmd):
cmd = self.alternative_paths.get(cmd, b'git-' + cmd)
assert isinstance(cmd, bytes)
return cmd
def _connect(self, cmd, path):
if not isinstance(cmd, bytes):
raise TypeError(cmd)
if isinstance(path, bytes):
path = path.decode(self._remote_path_encoding)
if path.startswith("/~"):
path = path[1:]
argv = (self._get_cmd_path(cmd).decode(self._remote_path_encoding) +
" '" + path + "'")
kwargs = {}
if self.password is not None:
kwargs['password'] = self.password
if self.key_filename is not None:
kwargs['key_filename'] = self.key_filename
con = self.ssh_vendor.run_command(
self.host, argv, port=self.port, username=self.username,
**kwargs)
return (Protocol(con.read, con.write, con.close,
report_activity=self._report_activity),
con.can_read, getattr(con, 'stderr', None))
def default_user_agent_string():
# Start user agent with "git/", because GitHub requires this. :-( See
# https://github.com/jelmer/dulwich/issues/562 for details.
return "git/dulwich/%s" % ".".join([str(x) for x in dulwich.__version__])
def default_urllib3_manager(config, **override_kwargs):
"""Return `urllib3` connection pool manager.
Honour detected proxy configurations.
Args:
config: dulwich.config.ConfigDict` instance with Git configuration.
kwargs: Additional arguments for urllib3.ProxyManager
Returns:
urllib3.ProxyManager` instance for proxy configurations,
`urllib3.PoolManager` otherwise.
"""
proxy_server = user_agent = None
ca_certs = ssl_verify = None
if config is not None:
try:
proxy_server = config.get(b"http", b"proxy")
except KeyError:
pass
try:
user_agent = config.get(b"http", b"useragent")
except KeyError:
pass
# TODO(jelmer): Support per-host settings
try:
ssl_verify = config.get_boolean(b"http", b"sslVerify")
except KeyError:
ssl_verify = True
try:
ca_certs = config.get(b"http", b"sslCAInfo")
except KeyError:
ca_certs = None
if user_agent is None:
user_agent = default_user_agent_string()
headers = {"User-agent": user_agent}
kwargs = {}
if ssl_verify is True:
kwargs['cert_reqs'] = "CERT_REQUIRED"
elif ssl_verify is False:
kwargs['cert_reqs'] = 'CERT_NONE'
else:
# Default to SSL verification
kwargs['cert_reqs'] = "CERT_REQUIRED"
if ca_certs is not None:
kwargs['ca_certs'] = ca_certs
kwargs.update(override_kwargs)
# Try really hard to find a SSL certificate path
if 'ca_certs' not in kwargs and kwargs.get('cert_reqs') != 'CERT_NONE':
try:
import certifi
except ImportError:
pass
else:
kwargs['ca_certs'] = certifi.where()
import urllib3
if proxy_server is not None:
# `urllib3` requires a `str` object in both Python 2 and 3, while
# `ConfigDict` coerces entries to `bytes` on Python 3. Compensate.
if not isinstance(proxy_server, str):
proxy_server = proxy_server.decode()
manager = urllib3.ProxyManager(proxy_server, headers=headers,
**kwargs)
else:
manager = urllib3.PoolManager(headers=headers, **kwargs)
return manager
class HttpGitClient(GitClient):
def __init__(self, base_url, dumb=None, pool_manager=None, config=None,
username=None, password=None, **kwargs):
self._base_url = base_url.rstrip("/") + "/"
self._username = username
self._password = password
self.dumb = dumb
if pool_manager is None:
self.pool_manager = default_urllib3_manager(config)
else:
self.pool_manager = pool_manager
if username is not None:
# No escaping needed: ":" is not allowed in username:
# https://tools.ietf.org/html/rfc2617#section-2
credentials = "%s:%s" % (username, password)
import urllib3.util
basic_auth = urllib3.util.make_headers(basic_auth=credentials)
self.pool_manager.headers.update(basic_auth)
GitClient.__init__(self, **kwargs)
def get_url(self, path):
return self._get_url(path).rstrip("/")
@classmethod
def from_parsedurl(cls, parsedurl, **kwargs):
password = parsedurl.password
if password is not None:
kwargs['password'] = urlunquote(password)
username = parsedurl.username
if username is not None:
kwargs['username'] = urlunquote(username)
netloc = parsedurl.hostname
if parsedurl.port:
netloc = "%s:%s" % (netloc, parsedurl.port)
if parsedurl.username:
netloc = "%s@%s" % (parsedurl.username, netloc)
parsedurl = parsedurl._replace(netloc=netloc)
return cls(urlparse.urlunparse(parsedurl), **kwargs)
def __repr__(self):
return "%s(%r, dumb=%r)" % (
type(self).__name__, self._base_url, self.dumb)
def _get_url(self, path):
if not isinstance(path, str):
# TODO(jelmer): this is unrelated to the local filesystem;
# This is not necessarily the right encoding to decode the path
# with.
path = path.decode(sys.getfilesystemencoding())
return urlparse.urljoin(self._base_url, path).rstrip("/") + "/"
def _http_request(self, url, headers=None, data=None,
allow_compression=False):
"""Perform HTTP request.
Args:
url: Request URL.
headers: Optional custom headers to override defaults.
data: Request data.
allow_compression: Allow GZipped communication.
Returns:
Tuple (`response`, `read`), where response is an `urllib3`
response object with additional `content_type` and
`redirect_location` properties, and `read` is a consumable read
method for the response data.
"""
req_headers = self.pool_manager.headers.copy()
if headers is not None:
req_headers.update(headers)
req_headers["Pragma"] = "no-cache"
if allow_compression:
req_headers["Accept-Encoding"] = "gzip"
else:
req_headers["Accept-Encoding"] = "identity"
if data is None:
resp = self.pool_manager.request("GET", url, headers=req_headers)
else:
resp = self.pool_manager.request("POST", url, headers=req_headers,
body=data)
if resp.status == 404:
raise NotGitRepository()
elif resp.status != 200:
raise GitProtocolError("unexpected http resp %d for %s" %
(resp.status, url))
# TODO: Optimization available by adding `preload_content=False` to the
# request and just passing the `read` method on instead of going via
# `BytesIO`, if we can guarantee that the entire response is consumed
# before issuing the next to still allow for connection reuse from the
# pool.
read = BytesIO(resp.data).read
resp.content_type = resp.getheader("Content-Type")
resp_url = resp.geturl()
resp.redirect_location = resp_url if resp_url != url else ''
return resp, read
def _discover_references(self, service, base_url):
assert base_url[-1] == "/"
tail = "info/refs"
headers = {"Accept": "*/*"}
if self.dumb is not True:
tail += "?service=%s" % service.decode('ascii')
url = urlparse.urljoin(base_url, tail)
resp, read = self._http_request(url, headers, allow_compression=True)
if resp.redirect_location:
# Something changed (redirect!), so let's update the base URL
if not resp.redirect_location.endswith(tail):
raise GitProtocolError(
"Redirected from URL %s to URL %s without %s" % (
url, resp.redirect_location, tail))
base_url = resp.redirect_location[:-len(tail)]
try:
self.dumb = not resp.content_type.startswith("application/x-git-")
if not self.dumb:
proto = Protocol(read, None)
# The first line should mention the service
try:
[pkt] = list(proto.read_pkt_seq())
except ValueError:
raise GitProtocolError(
"unexpected number of packets received")
if pkt.rstrip(b'\n') != (b'# service=' + service):
raise GitProtocolError(
"unexpected first line %r from smart server" % pkt)
return read_pkt_refs(proto) + (base_url, )
else:
return read_info_refs(resp), set(), base_url
finally:
resp.close()
def _smart_request(self, service, url, data):
assert url[-1] == "/"
url = urlparse.urljoin(url, service)
result_content_type = "application/x-%s-result" % service
headers = {
"Content-Type": "application/x-%s-request" % service,
"Accept": result_content_type,
"Content-Length": str(len(data)),
}
resp, read = self._http_request(url, headers, data)
if resp.content_type != result_content_type:
raise GitProtocolError("Invalid content-type from server: %s"
% resp.content_type)
return resp, read
def send_pack(self, path, update_refs, generate_pack_data,
progress=None):
"""Upload a pack to a remote repository.
Args:
path: Repository path (as bytestring)
update_refs: Function to determine changes to remote refs.
Receive dict with existing remote refs, returns dict with
changed refs (name -> sha, where sha=ZERO_SHA for deletions)
generate_pack_data: Function that can return a tuple
with number of elements and pack data to upload.
progress: Optional progress function
Returns:
new_refs dictionary containing the changes that were made
{refname: new_ref}, including deleted refs.
Raises:
SendPackError: if server rejects the pack data
UpdateRefsError: if the server supports report-status
and rejects ref updates
"""
url = self._get_url(path)
old_refs, server_capabilities, url = self._discover_references(
b"git-receive-pack", url)
negotiated_capabilities = self._negotiate_receive_pack_capabilities(
server_capabilities)
negotiated_capabilities.add(capability_agent())
if CAPABILITY_REPORT_STATUS in negotiated_capabilities:
self._report_status_parser = ReportStatusParser()
new_refs = update_refs(dict(old_refs))
if new_refs is None:
# Determine wants function is aborting the push.
return old_refs
if self.dumb:
raise NotImplementedError(self.fetch_pack)
req_data = BytesIO()
req_proto = Protocol(None, req_data.write)
(have, want) = self._handle_receive_pack_head(
req_proto, negotiated_capabilities, old_refs, new_refs)
if not want and set(new_refs.items()).issubset(set(old_refs.items())):
return new_refs
pack_data_count, pack_data = generate_pack_data(
have, want,
ofs_delta=(CAPABILITY_OFS_DELTA in negotiated_capabilities))
if pack_data_count:
write_pack_data(req_proto.write_file(), pack_data_count, pack_data)
resp, read = self._smart_request("git-receive-pack", url,
data=req_data.getvalue())
try:
resp_proto = Protocol(read, None)
self._handle_receive_pack_tail(
resp_proto, negotiated_capabilities, progress)
return new_refs
finally:
resp.close()
def fetch_pack(self, path, determine_wants, graph_walker, pack_data,
progress=None, depth=None):
"""Retrieve a pack from a git smart server.
Args:
path: Path to fetch from
determine_wants: Callback that returns list of commits to fetch
graph_walker: Object with next() and ack().
pack_data: Callback called for each bit of data in the pack
progress: Callback for progress reports (strings)
depth: Depth for request
Returns:
FetchPackResult object
"""
url = self._get_url(path)
refs, server_capabilities, url = self._discover_references(
b"git-upload-pack", url)
negotiated_capabilities, symrefs, agent = (
self._negotiate_upload_pack_capabilities(
server_capabilities))
wants = determine_wants(refs)
if wants is not None:
wants = [cid for cid in wants if cid != ZERO_SHA]
if not wants:
return FetchPackResult(refs, symrefs, agent)
if self.dumb:
raise NotImplementedError(self.send_pack)
req_data = BytesIO()
req_proto = Protocol(None, req_data.write)
(new_shallow, new_unshallow) = self._handle_upload_pack_head(
req_proto, negotiated_capabilities, graph_walker, wants,
can_read=None, depth=depth)
resp, read = self._smart_request(
"git-upload-pack", url, data=req_data.getvalue())
try:
resp_proto = Protocol(read, None)
if new_shallow is None and new_unshallow is None:
(new_shallow, new_unshallow) = _read_shallow_updates(
resp_proto)
self._handle_upload_pack_tail(
resp_proto, negotiated_capabilities, graph_walker, pack_data,
progress)
return FetchPackResult(
refs, symrefs, agent, new_shallow, new_unshallow)
finally:
resp.close()
def get_refs(self, path):
"""Retrieve the current refs from a git smart server.
"""
url = self._get_url(path)
refs, _, _ = self._discover_references(
b"git-upload-pack", url)
return refs
def get_transport_and_path_from_url(url, config=None, **kwargs):
"""Obtain a git client from a URL.
Args:
url: URL to open (a unicode string)
config: Optional config object
thin_packs: Whether or not thin packs should be retrieved
report_activity: Optional callback for reporting transport
activity.
Returns:
Tuple with client instance and relative path.
"""
parsed = urlparse.urlparse(url)
if parsed.scheme == 'git':
return (TCPGitClient.from_parsedurl(parsed, **kwargs),
parsed.path)
elif parsed.scheme in ('git+ssh', 'ssh'):
return SSHGitClient.from_parsedurl(parsed, **kwargs), parsed.path
elif parsed.scheme in ('http', 'https'):
return HttpGitClient.from_parsedurl(
parsed, config=config, **kwargs), parsed.path
elif parsed.scheme == 'file':
return default_local_git_client_cls.from_parsedurl(
parsed, **kwargs), parsed.path
raise ValueError("unknown scheme '%s'" % parsed.scheme)
def parse_rsync_url(location):
"""Parse a rsync-style URL.
"""
if ':' in location and '@' not in location:
# SSH with no user@, zero or one leading slash.
(host, path) = location.split(':', 1)
user = None
elif ':' in location:
# SSH with user@host:foo.
user_host, path = location.split(':', 1)
if '@' in user_host:
user, host = user_host.rsplit('@', 1)
else:
user = None
host = user_host
else:
raise ValueError('not a valid rsync-style URL')
return (user, host, path)
def get_transport_and_path(location, **kwargs):
"""Obtain a git client from a URL.
Args:
location: URL or path (a string)
config: Optional config object
thin_packs: Whether or not thin packs should be retrieved
report_activity: Optional callback for reporting transport
activity.
Returns:
Tuple with client instance and relative path.
"""
# First, try to parse it as a URL
try:
return get_transport_and_path_from_url(location, **kwargs)
except ValueError:
pass
if (sys.platform == 'win32' and
location[0].isalpha() and location[1:3] == ':\\'):
# Windows local path
return default_local_git_client_cls(**kwargs), location
try:
(username, hostname, path) = parse_rsync_url(location)
except ValueError:
# Otherwise, assume it's a local path.
return default_local_git_client_cls(**kwargs), location
else:
return SSHGitClient(hostname, username=username, **kwargs), path
|
the-stack_106_14069
|
from __future__ import print_function
import datetime
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar']
#JSON file location
#CREDENTIALS_FILE = "C:\Users\ anshu\Desktop\JPEC Code\Email Checkout Tracker\credentials.json"
def main():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
return service
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print('Getting the upcoming 10 events')
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=10, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
print('No upcoming events found.')
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
print(start, event['summary'])
if __name__ == '__main__':
main()
|
the-stack_106_14070
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests of BOPES Sampler."""
import unittest
from functools import partial
import numpy as np
from qiskit.algorithms import NumPyMinimumEigensolver
from qiskit.utils import algorithm_globals
from qiskit_nature.algorithms import GroundStateEigensolver, BOPESSampler
from qiskit_nature.algorithms.pes_samplers import MorsePotential
from qiskit_nature.drivers import Molecule, PySCFDriver
from qiskit_nature.mappers.second_quantization import ParityMapper
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
class TestBOPES(unittest.TestCase):
"""Tests of BOPES Sampler."""
def test_h2_bopes_sampler(self):
"""Test BOPES Sampler on H2"""
seed = 50
algorithm_globals.random_seed = seed
# Molecule
dof = partial(Molecule.absolute_distance, atom_pair=(1, 0))
m = Molecule(
geometry=[["H", [0.0, 0.0, 1.0]], ["H", [0.0, 0.45, 1.0]]],
degrees_of_freedom=[dof],
)
mapper = ParityMapper()
converter = QubitConverter(mapper=mapper, two_qubit_reduction=True)
driver = PySCFDriver(molecule=m)
problem = ElectronicStructureProblem(driver)
solver = NumPyMinimumEigensolver()
me_gss = GroundStateEigensolver(converter, solver)
# BOPES sampler
sampler = BOPESSampler(gss=me_gss)
# absolute internuclear distance in Angstrom
points = [0.7, 1.0, 1.3]
results = sampler.sample(problem, points)
points_run = results.points
energies = results.energies
np.testing.assert_array_almost_equal(points_run, [0.7, 1.0, 1.3])
np.testing.assert_array_almost_equal(
energies, [-1.13618945, -1.10115033, -1.03518627], decimal=2
)
def test_potential_interface(self):
"""Tests potential interface."""
seed = 50
algorithm_globals.random_seed = seed
stretch = partial(Molecule.absolute_distance, atom_pair=(1, 0))
# H-H molecule near equilibrium geometry
m = Molecule(
geometry=[
["H", [0.0, 0.0, 0.0]],
["H", [1.0, 0.0, 0.0]],
],
degrees_of_freedom=[stretch],
masses=[1.6735328e-27, 1.6735328e-27],
)
mapper = ParityMapper()
converter = QubitConverter(mapper=mapper)
driver = PySCFDriver(molecule=m)
problem = ElectronicStructureProblem(driver)
solver = NumPyMinimumEigensolver()
me_gss = GroundStateEigensolver(converter, solver)
# Run BOPESSampler with exact eigensolution
points = np.arange(0.45, 5.3, 0.3)
sampler = BOPESSampler(gss=me_gss)
res = sampler.sample(problem, points)
# Testing Potential interface
pot = MorsePotential(m)
pot.fit(res.points, res.energies)
np.testing.assert_array_almost_equal([pot.alpha, pot.r_0], [2.235, 0.720], decimal=3)
np.testing.assert_array_almost_equal([pot.d_e, pot.m_shift], [0.2107, -1.1419], decimal=3)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_14073
|
def print_board(board):
print("N-queens Board")
[print(_) for _ in board]
def create_board(size):
return [[0 for _ in range(size)] for _ in range(size)]
def is_safe(board, row, col):
for r in range(len(board)):
if board[r][col] == 1:
return False
for c in range(len(board)):
if board[row][c] == 1:
return False
r = row+1
c = col+1
while r < len(board) and c < len(board):
if board[r][c] == 1:
return False
r += 1
c += 1
r = row-1
c = col-1
while r >=0 and c >= 0:
if board[r][c] == 1:
return False
r -= 1
c -= 1
r = row-1
c = col+1
while r >=0 and c < len(board):
if board[r][c] == 1:
return False
r -= 1
c += 1
r = row+1
c = col-1
while c >=0 and r < len(board):
if board[r][c] == 1:
return False
c -= 1
r += 1
return True
def nqueens(board, row, col, queens):
if queens == 0:
print_board(board)
return
if col == len(board):
col = 0
row += 1
for i in range(row, len(board)):
for j in range(col, len(board)):
if is_safe(board, i, j):
board[i][j] = 1
nqueens(board, row, col+1, queens-1)
board[i][j] = 0
return
if __name__ == "__main__":
size = 5
board = create_board(size)
print_board(board)
nqueens(board, 0, 0, size)
|
the-stack_106_14074
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
This CArchiveReader is used only by the archieve_viewer utility.
"""
# TODO clean up this module
import os
import struct
from PyInstaller.loader.pyimod02_archive import ArchiveReader
class NotAnArchiveError(Exception):
pass
class CTOCReader:
"""
A class encapsulating the table of contents of a CArchive.
When written to disk, it is easily read from C.
"""
# (structlen, dpos, dlen, ulen, flag, typcd) followed by name
ENTRYSTRUCT = '!iIIIBB'
ENTRYLEN = struct.calcsize(ENTRYSTRUCT)
def __init__(self):
self.data = []
def frombinary(self, s):
"""
Decode the binary string into an in memory list.
S is a binary string.
"""
p = 0
while p < len(s):
slen, dpos, dlen, ulen, flag, typcd = struct.unpack(self.ENTRYSTRUCT, s[p:p + self.ENTRYLEN])
nmlen = slen - self.ENTRYLEN
p = p + self.ENTRYLEN
nm, = struct.unpack('%is' % nmlen, s[p:p + nmlen])
p = p + nmlen
# nm may have up to 15 bytes of padding
nm = nm.rstrip(b'\0')
nm = nm.decode('utf-8')
typcd = chr(typcd)
self.data.append((dpos, dlen, ulen, flag, typcd, nm))
def get(self, ndx):
"""
Return the table of contents entry (tuple) at index NDX.
"""
return self.data[ndx]
def __getitem__(self, ndx):
return self.data[ndx]
def find(self, name):
"""
Return the index of the toc entry with name NAME.
Return -1 for failure.
"""
for i, nm in enumerate(self.data):
if nm[-1] == name:
return i
return -1
class CArchiveReader(ArchiveReader):
"""
An Archive subclass that can hold arbitrary data.
This class encapsulates all files that are bundled within an executable. It can contain ZlibArchive (Python .pyc
files), dlls, Python C extensions and all other data files that are bundled in --onefile mode.
Easily handled from C or from Python.
"""
# MAGIC is useful to verify that conversion of Python data types to C structure and back works properly.
MAGIC = b'MEI\014\013\012\013\016'
HDRLEN = 0
LEVEL = 9
# Cookie - holds some information for the bootloader. C struct format definition. '!' at the beginning means network
# byte order. C struct looks like:
#
# typedef struct _cookie {
# char magic[8]; /* 'MEI\014\013\012\013\016' */
# uint32_t len; /* len of entire package */
# uint32_t TOC; /* pos (rel to start) of TableOfContents */
# int TOClen; /* length of TableOfContents */
# int pyvers; /* new in v4 */
# char pylibname[64]; /* Filename of Python dynamic library. */
# } COOKIE;
#
_cookie_format = '!8sIIii64s'
_cookie_size = struct.calcsize(_cookie_format)
def __init__(self, archive_path=None, start=0, length=0, pylib_name=''):
"""
Constructor.
archive_path path name of file (create empty CArchive if path is None).
start is the seekposition within PATH.
len is the length of the CArchive (if 0, then read till EOF).
pylib_name name of Python DLL which bootloader will use.
"""
self.length = length
self._pylib_name = pylib_name
# A CArchive created from scratch starts at 0, no leading bootloader.
self.pkg_start = 0
super().__init__(archive_path, start)
def checkmagic(self):
"""
Verify that self is a valid CArchive.
Magic signature is at end of the archive.
This fuction is used by ArchiveViewer.py utility.
"""
# Magic is at EOF; if we're embedded, we need to figure where that is.
if self.length:
self.lib.seek(self.start + self.length, 0)
else:
self.lib.seek(0, os.SEEK_END)
end_pos = self.lib.tell()
SEARCH_CHUNK_SIZE = 8192
magic_offset = -1
while end_pos >= len(self.MAGIC):
start_pos = max(end_pos - SEARCH_CHUNK_SIZE, 0)
chunk_size = end_pos - start_pos
# Is the remaining chunk large enough to hold the pattern?
if chunk_size < len(self.MAGIC):
break
# Read and scan the chunk
self.lib.seek(start_pos, os.SEEK_SET)
buf = self.lib.read(chunk_size)
pos = buf.rfind(self.MAGIC)
if pos != -1:
magic_offset = start_pos + pos
break
# Adjust search location for next chunk; ensure proper overlap
end_pos = start_pos + len(self.MAGIC) - 1
if magic_offset == -1:
raise RuntimeError("%s is not a valid %s archive file" % (self.path, self.__class__.__name__))
filelen = magic_offset + self._cookie_size
# Read the whole cookie
self.lib.seek(magic_offset, os.SEEK_SET)
buf = self.lib.read(self._cookie_size)
magic, totallen, tocpos, toclen, pyvers, pylib_name = struct.unpack(self._cookie_format, buf)
if magic != self.MAGIC:
raise RuntimeError("%s is not a valid %s archive file" % (self.path, self.__class__.__name__))
self.pkg_start = filelen - totallen
if self.length:
if totallen != self.length or self.pkg_start != self.start:
raise RuntimeError('Problem with embedded archive in %s' % self.path)
# Verify presence of Python library name.
if not pylib_name:
raise RuntimeError('Python library filename not defined in archive.')
self.tocpos, self.toclen = tocpos, toclen
def loadtoc(self):
"""
Load the table of contents into memory.
"""
self.toc = CTOCReader()
self.lib.seek(self.pkg_start + self.tocpos)
tocstr = self.lib.read(self.toclen)
self.toc.frombinary(tocstr)
def extract(self, name):
"""
Get the contents of an entry.
NAME is an entry name OR the index to the TOC.
Return the tuple (ispkg, contents).
For non-Python resoures, ispkg is meaningless (and 0).
Used by the import mechanism.
"""
if isinstance(name, str):
ndx = self.toc.find(name)
if ndx == -1:
return None
else:
ndx = name
dpos, dlen, ulen, flag, typcd, nm = self.toc.get(ndx)
with self.lib:
self.lib.seek(self.pkg_start + dpos)
rslt = self.lib.read(dlen)
if flag == 1:
import zlib
rslt = zlib.decompress(rslt)
if typcd == 'M':
return 1, rslt
return typcd == 'M', rslt
def contents(self):
"""
Return the names of the entries.
"""
rslt = []
for dpos, dlen, ulen, flag, typcd, nm in self.toc:
rslt.append(nm)
return rslt
def openEmbedded(self, name):
"""
Open a CArchive of name NAME embedded within this CArchive.
This fuction is used by ArchiveViewer.py utility.
"""
ndx = self.toc.find(name)
if ndx == -1:
raise KeyError("Member '%s' not found in %s" % (name, self.path))
dpos, dlen, ulen, flag, typcd, nm = self.toc.get(ndx)
if typcd not in "zZ":
raise NotAnArchiveError('%s is not an archive' % name)
if flag:
raise ValueError('Cannot open compressed archive %s in place' % name)
return CArchiveReader(self.path, self.pkg_start + dpos, dlen)
|
the-stack_106_14075
|
import base64
import filecmp
import os
import re
import json
import threading
import uuid
import shutil
import tarfile
import yaml
from pkg_resources import resource_filename
import anchore_engine.configuration
import anchore_engine.common
import anchore_engine.auth.common
import anchore_engine.clients.skopeo_wrapper
#from anchore.anchore_utils import read_kvfile_todict
import anchore_engine.common.images
from anchore_engine.analyzers.utils import read_kvfile_todict
from anchore_engine.utils import AnchoreException
from anchore_engine import utils
anchorelock = threading.Lock()
anchorelocks = {}
try:
from anchore_engine.subsys import logger
# Separate logger for use during bootstrap when logging may not be fully configured
from twisted.python import log
except:
import logging
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
log = logger
def get_layertarfile(unpackdir, cachedir, layer):
layer_candidates = [os.path.join(unpackdir, 'raw', layer+".tar"), os.path.join(unpackdir, 'raw', layer), os.path.join(unpackdir, 'raw', 'blobs', 'sha256', layer)]
if cachedir:
layer_candidates.append(os.path.join(cachedir, 'sha256', layer))
layerfound = False
for layer_candidate in layer_candidates:
try:
if os.path.exists(layer_candidate):
try:
# try to update atime for the file
os.utime(layer_candidate, None)
except:
pass
return(layer_candidate)
except:
pass
return(None)
def handle_tar_error_post(unpackdir=None, rootfsdir=None, handled_post_metadata={}):
if not unpackdir or not rootfsdir or not handled_post_metadata:
# nothing to do
return(True)
logger.debug("handling post with metadata: {}".format(handled_post_metadata))
if handled_post_metadata.get('temporary_file_adds', []):
for tfile in handled_post_metadata.get('temporary_file_adds', []):
rmfile = os.path.join(rootfsdir, tfile)
if os.path.exists(rmfile):
logger.debug("removing temporary image file: {}".format(rmfile))
if os.path.isfile(rmfile):
os.remove(rmfile)
if handled_post_metadata.get('temporary_dir_adds', []):
for tfile in sorted(handled_post_metadata.get('temporary_dir_adds', []), reverse=True):
rmfile = os.path.join(rootfsdir, tfile)
if os.path.exists(rmfile):
logger.debug("removing temporary image dir: {}".format(rmfile))
if os.path.isdir(rmfile):
os.rmdir(rmfile)
return(True)
def handle_tar_error(tarcmd, rc, sout, serr, unpackdir=None, rootfsdir=None, cachedir=None, layer=None, layertar=None, layers=[]):
handled = False
handled_post_metadata = {}
try:
slinkre = "tar: (.*): Cannot open: File exists"
hlinkre = "tar: (.*): Cannot hard link to .(.*).: No such file or directory"
for errline in serr.splitlines():
patt = re.match(slinkre, errline)
patt1 = re.match(hlinkre, errline)
if patt:
matchfile = patt.group(1)
logger.debug("found 'file exists' error on name: " + str(matchfile))
if matchfile:
badfile = os.path.join(rootfsdir, patt.group(1))
if os.path.exists(badfile):
logger.debug("removing hierarchy: " + str(badfile))
shutil.rmtree(badfile)
handled = True
elif patt1:
missingfile = patt1.group(2)
basedir = os.path.dirname(missingfile)
logger.debug("found 'hard link' error on name: {}".format(missingfile))
if not os.path.exists(os.path.join(rootfsdir, missingfile)):
#for l in layers[layers.index("sha256:"+layer)::-1]:
for l in layers[-1::-1]:
missingdir = None
if not os.path.exists(os.path.join(rootfsdir, basedir)):
missingdir = basedir
dighash, lname = l.split(":")
ltar = get_layertarfile(unpackdir, cachedir, lname)
tarcmd = "tar -C {} -x -f {}".format(rootfsdir, ltar)
tarcmd_list = tarcmd.split() + ["{}".format(missingfile)]
#logger.debug("attempting to run command to extract missing hardlink target from layer {}: {}".format(l, tarcmd_list))
rc, sout, serr = utils.run_command_list(tarcmd_list)
sout = utils.ensure_str(sout)
serr = utils.ensure_str(serr)
#logger.debug("RESULT attempting to run command to extract missing hardlink target: {} : rc={} : serr={} : sout={}".format(tarcmd, rc, serr, sout))
if rc == 0:
if not handled_post_metadata.get('temporary_file_adds', False):
handled_post_metadata['temporary_file_adds'] = []
handled_post_metadata['temporary_file_adds'].append(missingfile)
if missingdir:
if not handled_post_metadata.get('temporary_dir_adds', False):
handled_post_metadata['temporary_dir_adds'] = []
handled_post_metadata['temporary_dir_adds'].append(missingdir)
handled = True
break
except Exception as err:
raise err
logger.debug("tar error handled: {}".format(handled))
return(handled, handled_post_metadata)
def get_tar_filenames(layertar):
ret = []
layertarfile = None
try:
logger.debug("using tarfile library to get file names from tarfile={}".format(layertar))
layertarfile = tarfile.open(layertar, mode='r', format=tarfile.PAX_FORMAT)
ret = layertarfile.getnames()
except:
# python tarfile fils to unpack some docker image layers due to PAX header issue, try another method
logger.debug("using tar command to get file names from tarfile={}".format(layertar))
tarcmd = "tar tf {}".format(layertar)
try:
ret = []
rc, sout, serr = utils.run_command(tarcmd)
sout = utils.ensure_str(sout)
serr = utils.ensure_str(serr)
if rc == 0 and sout:
for line in sout.splitlines():
re.sub("/+$", "", line)
ret.append(line)
else:
raise Exception("rc={} sout={} serr={}".format(rc, sout, serr))
except Exception as err:
logger.error("command failed with exception - " + str(err))
raise err
finally:
if layertarfile:
layertarfile.close()
return(ret)
def squash(unpackdir, cachedir, layers):
rootfsdir = unpackdir + "/rootfs"
if os.path.exists(unpackdir + "/squashed.tar"):
return (True)
if not os.path.exists(rootfsdir):
os.makedirs(rootfsdir)
revlayer = list(layers)
revlayer.reverse()
l_excludes = {}
l_opqexcludes = {} # stores list of special files to exclude only for next layer (.wh..wh..opq handling)
last_opqexcludes = {} # opq exlcudes for the last layer
for l in revlayer:
htype, layer = l.split(":",1)
layertar = get_layertarfile(unpackdir, cachedir, layer)
count = 0
logger.debug("\tPass 1: " + str(layertar))
#whpatt = re.compile(".*/\.wh\..*")
#whopqpatt = re.compile(".*/\.wh\.\.wh\.\.opq")
whpatt = re.compile("\.wh\..*")
whopqpatt = re.compile("\.wh\.\.wh\.\.opq")
l_opqexcludes[layer] = {}
myexcludes = {}
opqexcludes = {}
tarfilenames = get_tar_filenames(layertar)
for fname in tarfilenames:
# checks for whiteout conditions
if whopqpatt.match(os.path.basename(fname)):
#if whopqpatt.match(fname):
# found an opq entry, which means that this files in the next layer down (only) should not be included
fsub = re.sub(r"\.wh\.\.wh\.\.opq", "", fname, 1)
# never include the whiteout file itself
myexcludes[fname] = True
opqexcludes[fsub] = True
elif whpatt.match(os.path.basename(fname)):
#elif whpatt.match(fname):
# found a normal whiteout, which means that this file in any lower layer should be excluded
fsub = re.sub(r"\.wh\.", "", fname, 1)
# never include a whiteout file
myexcludes[fname] = True
myexcludes[fsub] = True
else:
# if the last processed layer had an opq whiteout, check file to see if it lives in the opq directory
if last_opqexcludes:
dtoks = fname.split("/")
for i in range(0, len(dtoks)):
dtok = '/'.join(dtoks[0:i])
dtokwtrail = '/'.join(dtoks[0:i]) + "/"
if dtok in last_opqexcludes or dtokwtrail in last_opqexcludes:
l_opqexcludes[layer][fname] = True
break
# build up the list of excludes as we move down the layers
for l in list(l_excludes.keys()):
myexcludes.update(l_excludes[l])
l_excludes[layer] = myexcludes
last_opqexcludes.update(opqexcludes)
logger.debug("Pass 3: untarring layers with exclusions")
imageSize = 0
for l in layers:
htype, layer = l.split(":",1)
layertar = get_layertarfile(unpackdir, cachedir, layer)
imageSize = imageSize + os.path.getsize(layertar)
# write out the exluded files, adding the per-layer excludes if present
with open(unpackdir+"/efile", 'w') as OFH:
for efile in l_excludes[layer]:
OFH.write("%s\n" % efile)
if layer in l_opqexcludes and l_opqexcludes[layer]:
for efile in l_opqexcludes[layer]:
logger.debug("adding special for layer exclude: " + str(efile))
OFH.write("%s\n" % efile)
retry = True
success = False
last_err = None
max_retries = 10
retries = 0
handled_post_metadata = {}
while (not success) and (retry):
tarcmd = "tar -C " + rootfsdir + " -x -X " + unpackdir+"/efile -f " + layertar
logger.debug("untarring squashed tarball: " + str(tarcmd))
try:
rc, sout, serr = utils.run_command(tarcmd)
sout = utils.ensure_str(sout)
serr = utils.ensure_str(serr)
if rc != 0:
logger.debug("tar error encountered, attempting to handle")
handled, handled_post_metadata = handle_tar_error(tarcmd, rc, sout, serr, unpackdir=unpackdir, rootfsdir=rootfsdir, cachedir=cachedir, layer=layer, layertar=layertar, layers=layers)
if not handled:
raise Exception("command failed: cmd="+str(tarcmd)+" exitcode="+str(rc)+" stdout="+str(sout).strip()+" stderr="+str(serr).strip())
else:
logger.debug("tar error successfully handled, retrying")
else:
logger.debug("command succeeded: stdout="+str(sout).strip()+" stderr="+str(serr).strip())
success = True
except Exception as err:
logger.error("command failed with exception - " + str(err))
last_err = err
success = False
retry = False
# safety net
if retries > max_retries:
retry = False
retries = retries + 1
if not success:
if last_err:
raise last_err
else:
raise Exception("unknown exception in untar")
else:
try:
handle_tar_error_post(unpackdir=unpackdir, rootfsdir=rootfsdir, handled_post_metadata=handled_post_metadata)
except Exception as err:
raise err
return ("done", imageSize)
def make_staging_dirs(rootdir, use_cache_dir=None):
if not os.path.exists(rootdir):
raise Exception("passed in root directory must exist ("+str(rootdir)+")")
rando = str(uuid.uuid4())
ret = {
'unpackdir': os.path.join(rootdir, rando),
'copydir': os.path.join(rootdir, rando, "raw"),
'rootfs': os.path.join(rootdir, rando, "rootfs"),
'outputdir': os.path.join(rootdir, rando, "output"),
'cachedir': use_cache_dir
}
for k in list(ret.keys()):
if not ret[k]:
continue
try:
if not os.path.exists(ret[k]):
logger.debug("making dir: " + k + " : " + str(ret[k]))
os.makedirs(ret[k])
except Exception as err:
raise Exception("unable to prep staging directory - exception: " + str(err))
return(ret)
def delete_staging_dirs(staging_dirs):
for k in list(staging_dirs.keys()):
if k == 'cachedir':
continue
localconfig = anchore_engine.configuration.localconfig.get_config()
myconfig = localconfig.get('services', {}).get('analyzer', {})
if not myconfig.get('keep_image_analysis_tmpfiles', False):
try:
if os.path.exists(staging_dirs[k]):
logger.debug("removing dir: " + k + " : " + str(staging_dirs[k]))
shutil.rmtree(staging_dirs[k])
except Exception as err:
raise Exception("unable to delete staging directory - exception: " + str(err))
else:
logger.debug("keep_image_analysis_tmpfiles is enabled - leaving analysis tmpdir in place {}".format(staging_dirs))
return(True)
def pull_image(staging_dirs, pullstring, registry_creds=[], manifest=None, dest_type='oci'):
outputdir = staging_dirs['outputdir']
unpackdir = staging_dirs['unpackdir']
copydir = staging_dirs['copydir']
cachedir = staging_dirs['cachedir']
user = pw = None
registry_verify = False
# extract user/pw/verify from registry_creds
try:
if registry_creds:
image_info = anchore_engine.common.images.get_image_info(None, 'docker', pullstring, registry_lookup=False)
user, pw, registry_verify = anchore_engine.auth.common.get_creds_by_registry(image_info['registry'], registry_creds=registry_creds)
except Exception as err:
raise err
# download
try:
rc = anchore_engine.clients.skopeo_wrapper.download_image(pullstring, copydir, user=user, pw=pw, verify=registry_verify, manifest=manifest, use_cache_dir=cachedir, dest_type=dest_type)
except Exception as err:
raise err
return(True)
def get_image_metadata_v1(staging_dirs, imageDigest, imageId, manifest_data, dockerfile_contents="", dockerfile_mode=""):
outputdir = staging_dirs['outputdir']
unpackdir = staging_dirs['unpackdir']
copydir = staging_dirs['copydir']
docker_history = []
layers = []
dockerfile_mode = "Guessed"
dockerfile_contents = dockerfile_contents
imageArch = ""
try:
imageArch = manifest_data['architecture']
except:
imageArch = ""
try:
for fslayer in manifest_data['fsLayers']:
layers.append(fslayer['blobSum'])
except Exception as err:
logger.error("cannot get layers - exception: " + str(err))
raise err
try:
hfinal = []
count=0
for rawhel in manifest_data['history']:
hel = json.loads(rawhel['v1Compatibility'])
try:
lsize = hel['Size']
except:
lsize = 0
try:
lcreatedby = ' '.join(hel['container_config']['Cmd'])
except:
lcreatedby = ""
try:
lcreated = hel['created']
except:
lcreated = ""
lid = layers[count]
count = count + 1
hfinal.append(
{
'Created': lcreated,
'CreatedBy': lcreatedby,
'Comment': '',
'Id': lid,
'Size': lsize,
'Tags': []
}
)
docker_history = hfinal
if hfinal:
with open(os.path.join(unpackdir, "docker_history.json"), 'w') as OFH:
OFH.write(json.dumps(hfinal))
except Exception as err:
logger.error("cannot construct history - exception: " + str(err))
raise err
if not dockerfile_contents:
# get dockerfile_contents (translate history to guessed DF)
# TODO 'FROM' guess?
dockerfile_contents = "FROM scratch\n"
for hel in docker_history:
patt = re.match("^/bin/sh -c #\(nop\) +(.*)", hel['CreatedBy'])
if patt:
cmd = patt.group(1)
elif hel['CreatedBy']:
cmd = "RUN " + hel['CreatedBy']
else:
cmd = None
if cmd:
dockerfile_contents = dockerfile_contents + cmd + "\n"
dockerfile_mode = "Guessed"
elif not dockerfile_mode:
dockerfile_mode = "Actual"
layers.reverse()
return(docker_history, layers, dockerfile_contents, dockerfile_mode, imageArch)
def get_image_metadata_v2(staging_dirs, imageDigest, imageId, manifest_data, dockerfile_contents="", dockerfile_mode=""):
outputdir = staging_dirs['outputdir']
unpackdir = staging_dirs['unpackdir']
copydir = staging_dirs['copydir']
cachedir = staging_dirs['cachedir']
rawlayers = list(manifest_data['layers'])
hfinal = []
layers = []
docker_history = []
imageArch = ""
# get "history"
if os.path.exists(os.path.join(copydir, imageId+".tar")):
try:
with open(os.path.join(copydir, imageId+".tar"), 'r') as FH:
configdata = json.loads(FH.read())
rawhistory = configdata['history']
imageArch = configdata['architecture']
imageOs = configdata.get('os', None)
if imageOs in ['windows']:
raise Exception("reported os type ({}) images are not supported".format(imageOs))
except Exception as err:
raise err
elif os.path.exists(os.path.join(copydir, "index.json")):
try:
blobdir = os.path.join(copydir, 'blobs', 'sha256')
if cachedir:
blobdir = os.path.join(cachedir, 'sha256')
dfile = nfile = None
with open(os.path.join(copydir, "index.json"), 'r') as FH:
idata = json.loads(FH.read())
d_digest = idata['manifests'][0]['digest'].split(":", 1)[1]
dfile = os.path.join(blobdir, d_digest)
if dfile:
with open(dfile, 'r') as FH:
n_data = json.loads(FH.read())
n_digest = n_data['config']['digest'].split(":", 1)[1]
nfile = os.path.join(blobdir, n_digest)
else:
raise Exception("could not find intermediate digest - no blob digest data file found in index.json")
if nfile:
with open(nfile, 'r') as FH:
configdata = json.loads(FH.read())
rawhistory = configdata['history']
imageArch = configdata['architecture']
imageOs = configdata.get('os', None)
if imageOs in ['windows']:
raise Exception("image os type ({}) not supported".format(imageOs))
else:
raise Exception("could not find final digest - no blob config file found in digest file: {}".format(dfile))
except Exception as err:
raise err
try:
done=False
idx = 0
while not done:
if not rawhistory:
done = True
else:
hel = rawhistory.pop(0)
if 'empty_layer' in hel and hel['empty_layer']:
lid = "<missing>"
lsize = 0
else:
lel = rawlayers.pop(0)
lid = lel['digest']
layers.append(lid)
lsize = lel['size']
try:
lcreatedby = hel['created_by']
except:
lcreatedby = ""
lcreated = hel['created']
hfinal.append(
{
'Created': lcreated,
'CreatedBy': lcreatedby,
'Comment': '',
'Id': lid,
'Size': lsize,
'Tags': []
}
)
docker_history = hfinal
if hfinal:
with open(os.path.join(unpackdir, "docker_history.json"), 'w') as OFH:
OFH.write(json.dumps(hfinal))
except Exception as err:
raise err
if not dockerfile_contents:
# get dockerfile_contents (translate history to guessed DF)
# TODO 'FROM' guess?
dockerfile_contents = "FROM scratch\n"
for hel in docker_history:
patt = re.match("^/bin/sh -c #\(nop\) +(.*)", hel['CreatedBy'])
if patt:
cmd = patt.group(1)
elif hel['CreatedBy']:
cmd = "RUN " + hel['CreatedBy']
else:
cmd = None
if cmd:
dockerfile_contents = dockerfile_contents + cmd + "\n"
dockerfile_mode = "Guessed"
elif not dockerfile_mode:
dockerfile_mode = "Actual"
return(docker_history, layers, dockerfile_contents, dockerfile_mode, imageArch)
def unpack(staging_dirs, layers):
outputdir = staging_dirs['outputdir']
unpackdir = staging_dirs['unpackdir']
copydir = staging_dirs['copydir']
cachedir = staging_dirs['cachedir']
try:
squashtar, imageSize = squash(unpackdir, cachedir, layers)
except Exception as err:
raise err
return(imageSize)
def list_analyzers():
"""
Return a list of the analyzer files
:return: list of str that are the names of the analyzer modules
"""
anchore_module_root = resource_filename("anchore_engine", "analyzers")
analyzer_root = os.path.join(anchore_module_root, "modules")
result = []
for f in os.listdir(analyzer_root):
thecmd = os.path.join(analyzer_root, f)
if re.match(".*\.py$", thecmd):
result.append(thecmd)
result.sort()
return result
def run_anchore_analyzers(staging_dirs, imageDigest, imageId, localconfig):
outputdir = staging_dirs['outputdir']
unpackdir = staging_dirs['unpackdir']
copydir = staging_dirs['copydir']
configdir = localconfig['service_dir']
# run analyzers
#anchore_module_root = resource_filename("anchore", "anchore-modules")
anchore_module_root = resource_filename("anchore_engine", "analyzers")
analyzer_root = os.path.join(anchore_module_root, "modules")
for f in list_analyzers():
#for f in os.listdir(analyzer_root):
# thecmd = os.path.join(analyzer_root, f)
# if re.match(".*\.py$", thecmd):
cmdstr = " ".join([f, configdir, imageId, unpackdir, outputdir, unpackdir])
if True:
try:
rc, sout, serr = utils.run_command(cmdstr)
sout = utils.ensure_str(sout)
serr = utils.ensure_str(serr)
if rc != 0:
raise Exception("command failed: cmd="+str(cmdstr)+" exitcode="+str(rc)+" stdout="+str(sout).strip()+" stderr="+str(serr).strip())
else:
logger.debug("command succeeded: cmd="+str(cmdstr)+" stdout="+str(sout).strip()+" stderr="+str(serr).strip())
except Exception as err:
logger.error("command failed with exception - " + str(err))
#raise err
analyzer_report = {}
for analyzer_output in os.listdir(os.path.join(outputdir, "analyzer_output")):
if analyzer_output not in analyzer_report:
analyzer_report[analyzer_output] = {}
for analyzer_output_el in os.listdir(os.path.join(outputdir, "analyzer_output", analyzer_output)):
if analyzer_output_el not in analyzer_report[analyzer_output]:
analyzer_report[analyzer_output][analyzer_output_el] = {'base': {}}
data = read_kvfile_todict(os.path.join(outputdir, "analyzer_output", analyzer_output, analyzer_output_el))
if data:
analyzer_report[analyzer_output][analyzer_output_el]['base'] = read_kvfile_todict(os.path.join(outputdir, "analyzer_output", analyzer_output, analyzer_output_el))
else:
analyzer_report[analyzer_output].pop(analyzer_output_el, None)
if not analyzer_report[analyzer_output]:
analyzer_report.pop(analyzer_output, None)
return(analyzer_report)
def generate_image_export(staging_dirs, imageDigest, imageId, analyzer_report, imageSize, fulltag, docker_history, dockerfile_mode, dockerfile_contents, layers, familytree, imageArch, rdigest, analyzer_manifest):
image_report = []
image_report.append(
{
'image':
{
'imageId': imageId,
'imagedata':
{
'analyzer_manifest': analyzer_manifest,
'analysis_report': analyzer_report,
'image_report': {
'meta': {
'shortparentId': '',
'sizebytes': imageSize,
'imageId': imageId,
'usertype': None,
'shortId': imageId[0:12],
'imagename': imageId,
'parentId': '',
'shortname': imageId[0:12],
'humanname': fulltag
},
'docker_history': docker_history,
'dockerfile_mode': dockerfile_mode,
'dockerfile_contents': dockerfile_contents,
'layers': layers,
'familytree': familytree,
'docker_data': {
'Architecture': imageArch,
'RepoDigests': [rdigest],
'RepoTags': [fulltag]
}
}
}
}
}
)
return(image_report)
def analyze_image(userId, manifest, image_record, tmprootdir, localconfig, registry_creds=[], use_cache_dir=None):
# need all this
imageId = None
imageDigest = None
layers = []
rawlayers = []
familytree = []
imageSize = 0
analyzer_manifest = {}
analyzer_report = {}
imageArch = ""
dockerfile_mode = ""
docker_history = {}
rdigest = ""
staging_dirs = None
manifest_schema_version = 0
dest_type = 'oci'
event = None
pullstring = None
fulltag = None
try:
imageDigest = image_record['imageDigest']
try:
manifest_data = json.loads(manifest)
manifest_schema_version = manifest_data['schemaVersion']
if manifest_schema_version == 1:
dest_type = 'dir'
else:
dest_type = 'oci'
#analyzer_manifest = {}
#analyzer_manifest.update(manifest_data)
except Exception as err:
raise Exception("cannot load manifest as JSON rawmanifest="+str(manifest)+") - exception: " + str(err))
if image_record['dockerfile_mode']:
dockerfile_mode = image_record['dockerfile_mode']
image_detail = image_record['image_detail'][0]
pullstring = image_detail['registry'] + "/" + image_detail['repo'] + "@" + image_detail['imageDigest']
fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']
imageId = image_detail['imageId']
if image_detail['dockerfile']:
dockerfile_contents = str(base64.decodebytes(image_detail['dockerfile'].encode('utf-8')), 'utf-8')
else:
dockerfile_contents = None
try:
staging_dirs = make_staging_dirs(tmprootdir, use_cache_dir=use_cache_dir)
except Exception as err:
raise err
try:
rc = pull_image(staging_dirs, pullstring, registry_creds=registry_creds, manifest=manifest, dest_type=dest_type)
except Exception as err:
raise ImagePullError(cause=err, pull_string=pullstring, tag=fulltag)
try:
if manifest_data['schemaVersion'] == 1:
docker_history, layers, dockerfile_contents, dockerfile_mode, imageArch = get_image_metadata_v1(staging_dirs, imageDigest, imageId, manifest_data, dockerfile_contents=dockerfile_contents, dockerfile_mode=dockerfile_mode)
elif manifest_data['schemaVersion'] == 2:
docker_history, layers, dockerfile_contents, dockerfile_mode, imageArch = get_image_metadata_v2(staging_dirs, imageDigest, imageId, manifest_data, dockerfile_contents=dockerfile_contents, dockerfile_mode=dockerfile_mode)
else:
raise ManifestSchemaVersionError(schema_version=manifest_data['schemaVersion'], pull_string=pullstring, tag=fulltag)
except ManifestSchemaVersionError:
raise
except Exception as err:
raise ManifestParseError(cause=err, pull_string=pullstring, tag=fulltag)
familytree = layers
try:
imageSize = unpack(staging_dirs, layers)
except Exception as err:
raise ImageUnpackError(cause=err, pull_string=pullstring, tag=fulltag)
familytree = layers
try:
analyzer_report = run_anchore_analyzers(staging_dirs, imageDigest, imageId, localconfig)
except Exception as err:
raise AnalyzerError(cause=err, pull_string=pullstring, tag=fulltag)
try:
image_report = generate_image_export(staging_dirs, imageDigest, imageId, analyzer_report, imageSize, fulltag, docker_history, dockerfile_mode, dockerfile_contents, layers, familytree, imageArch, pullstring, analyzer_manifest)
except Exception as err:
raise AnalysisReportGenerationError(cause=err, pull_string=pullstring, tag=fulltag)
except AnchoreException:
raise
except Exception as err:
raise AnalysisError(cause=err, pull_string=pullstring, tag=fulltag, msg='failed to download, unpack, analyze, and generate image export')
finally:
if staging_dirs:
rc = delete_staging_dirs(staging_dirs)
#if not imageDigest or not imageId or not manifest or not image_report:
if not image_report:
raise Exception("failed to analyze")
return(image_report)
class AnalysisError(AnchoreException):
def __init__(self, cause, pull_string, tag, msg):
self.cause = str(cause)
self.msg = msg
self.pull_string = str(pull_string)
self.tag = str(tag)
def __repr__(self):
return '{} ({}) - exception: {}'.format(self.msg, self.pull_string, self.cause)
def __str__(self):
return '{} ({}) - exception: {}'.format(self.msg, self.pull_string, self.cause)
def to_dict(self):
return {self.__class__.__name__: dict((key, '{}...(truncated)'.format(value[:256]) if key == 'cause' and isinstance(value, str) and len(value) > 256 else value)
for key, value in vars(self).items() if not key.startswith('_'))}
class ImagePullError(AnalysisError):
def __init__(self, cause, pull_string, tag, msg='Failed to pull image'):
super(ImagePullError, self).__init__(cause, pull_string, tag, msg)
class ManifestSchemaVersionError(AnalysisError):
def __init__(self, schema_version, pull_string, tag, msg='Manifest schema version unsupported'):
super(ManifestSchemaVersionError, self).__init__('No handlers for schemaVersion {}'.format(schema_version), pull_string, tag, msg)
class ManifestParseError(AnalysisError):
def __init__(self, cause, pull_string, tag, msg='Failed to parse image manifest'):
super(ManifestParseError, self).__init__(cause, pull_string, tag, msg)
class ImageUnpackError(AnalysisError):
def __init__(self, cause, pull_string, tag, msg='Failed to unpack image'):
super(ImageUnpackError, self).__init__(cause, pull_string, tag, msg)
class AnalyzerError(AnalysisError):
def __init__(self, cause, pull_string, tag, msg='Failed to run image through analyzers'):
super(AnalyzerError, self).__init__(cause, pull_string, tag, msg)
class AnalysisReportGenerationError(AnalysisError):
def __init__(self, cause, pull_string, tag, msg='Failed to generate image report'):
super(AnalysisReportGenerationError, self).__init__(cause, pull_string, tag, msg)
def get_anchorelock(lockId=None, driver=None):
global anchorelock, anchorelocks
ret = anchorelock
# first, check if we need to update the anchore configs
localconfig = anchore_engine.configuration.localconfig.get_config()
if not driver or driver in ['localanchore']:
if 'anchore_scanner_config' not in localconfig:
localconfig['anchore_scanner_config'] = get_config()
anchore_config = localconfig['anchore_scanner_config']
anchore_config = localconfig['anchore_scanner_config']
anchore_data_dir = anchore_config['anchore_data_dir']
else:
anchore_data_dir = "/root/.anchore"
if not os.path.exists(os.path.join(anchore_data_dir, 'conf')):
try:
os.makedirs(os.path.join(anchore_data_dir, 'conf'))
except:
pass
try:
for src,dst in [(localconfig['anchore_scanner_analyzer_config_file'], os.path.join(anchore_data_dir, 'conf', 'analyzer_config.yaml')), (os.path.join(localconfig['service_dir'], 'anchore_config.yaml'), os.path.join(anchore_data_dir, 'conf', 'config.yaml'))]:
logger.debug("checking defaults against installed: " + src + " : " + dst)
if os.path.exists(src):
default_file = src
installed_file = dst
do_copy = False
try:
same = filecmp.cmp(default_file, installed_file)
if not same:
do_copy = True
except:
do_copy = True
#if not filecmp.cmp(default_file, installed_file):
if do_copy:
logger.debug("checking source yaml ("+str(default_file)+")")
# check that it is at least valid yaml before copying in place
with open(default_file, 'r') as FH:
yaml.safe_load(FH)
logger.info("copying new config into place: " + str(src) + " -> " + str(dst))
shutil.copy(default_file, installed_file)
except Exception as err:
logger.warn("could not check/install analyzer anchore configurations (please check yaml format of your configuration files), continuing with default - exception: " + str(err))
if lockId:
lockId = base64.encodebytes(lockId.encode('utf-8'))
if lockId not in anchorelocks:
anchorelocks[lockId] = threading.Lock()
ret = anchorelocks[lockId]
logger.spew("all locks: " + str(anchorelocks))
else:
ret = anchorelock
return(ret)
def get_config():
ret = {}
logger.debug("fetching local anchore anchore_engine.configuration")
if True:
cmd = ['anchore', '--json', 'system', 'status', '--conf']
try:
rc, sout, serr = anchore_engine.utils.run_command_list(cmd)
sout = utils.ensure_str(sout)
serr = utils.ensure_str(serr)
ret = json.loads(sout)
except Exception as err:
logger.error(str(err))
return(ret)
|
the-stack_106_14076
|
import os
import sys
if __name__ == "__main__":
workdir: str = os.path.dirname(sys.argv[0])
if workdir.strip() != "":
os.chdir(workdir)
workdir = os.getcwd()
lang_counts: dict = dict()
with open("all.txt", "r") as f:
for line in f:
fields = line.split("|")
if len(fields) < 2:
continue
lang = fields[2]
if not lang in lang_counts.keys():
lang_counts[lang] = 0
lang_counts[lang] = lang_counts[lang] + 1
counts_list: list = list()
for key in lang_counts.keys():
counts_list.append((key, lang_counts[key]))
counts_list.sort(key=lambda value: value[1], reverse=True)
counter: int = 0
for idx in range(len(counts_list)):
count = counts_list[idx]
print(count)
counter += 1
langs: set = set()
with open("train.txt", "r") as f:
for line in f:
fields = line.split("|")
lang = fields[2]
if lang in langs:
continue
langs.add(lang)
with open("langs.txt", "w") as f:
for lang_count in counts_list:
lang: str = lang_count[0]
if not lang in langs:
continue
f.write(f"{lang_count[0]}: {int(lang_count[1]):,}")
f.write("\n")
|
the-stack_106_14077
|
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2019 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# William Schwartz <[email protected]>
# All rights reserved.
# BSD license.
#
# Authors: Aric Hagberg ([email protected])
# Dan Schult ([email protected])
# Brian Kiefer ([email protected])
"""Graph diameter, radius, eccentricity and other properties."""
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = ['extrema_bounding', 'eccentricity', 'diameter',
'radius', 'periphery', 'center', 'barycenter',
'resistance_distance']
def extrema_bounding(G, compute="diameter"):
"""Compute requested extreme distance metric of undirected graph G
Computation is based on smart lower and upper bounds, and in practice
linear in the number of nodes, rather than quadratic (except for some
border cases such as complete graphs or circle shaped graphs).
Parameters
----------
G : NetworkX graph
An undirected graph
compute : string denoting the requesting metric
"diameter" for the maximal eccentricity value,
"radius" for the minimal eccentricity value,
"periphery" for the set of nodes with eccentricity equal to the diameter
"center" for the set of nodes with eccentricity equal to the radius
Returns
-------
value : value of the requested metric
int for "diameter" and "radius" or
list of nodes for "center" and "periphery"
Raises
------
NetworkXError
If the graph consists of multiple components
Notes
-----
This algorithm was proposed in the following papers:
F.W. Takes and W.A. Kosters, Determining the Diameter of Small World
Networks, in Proceedings of the 20th ACM International Conference on
Information and Knowledge Management (CIKM 2011), pp. 1191-1196, 2011.
doi: https://doi.org/10.1145/2063576.2063748
F.W. Takes and W.A. Kosters, Computing the Eccentricity Distribution of
Large Graphs, Algorithms 6(1): 100-118, 2013.
doi: https://doi.org/10.3390/a6010100
M. Borassi, P. Crescenzi, M. Habib, W.A. Kosters, A. Marino and F.W. Takes,
Fast Graph Diameter and Radius BFS-Based Computation in (Weakly Connected)
Real-World Graphs, Theoretical Computer Science 586: 59-80, 2015.
doi: https://doi.org/10.1016/j.tcs.2015.02.033
"""
# init variables
degrees = dict(G.degree()) # start with the highest degree node
minlowernode = max(degrees, key=degrees.get)
N = len(degrees) # number of nodes
# alternate between smallest lower and largest upper bound
high = False
# status variables
ecc_lower = dict.fromkeys(G, 0)
ecc_upper = dict.fromkeys(G, N)
candidates = set(G)
# (re)set bound extremes
minlower = N
maxlower = 0
minupper = N
maxupper = 0
# repeat the following until there are no more candidates
while candidates:
if high:
current = maxuppernode # select node with largest upper bound
else:
current = minlowernode # select node with smallest lower bound
high = not high
# get distances from/to current node and derive eccentricity
dist = dict(nx.single_source_shortest_path_length(G, current))
if len(dist) != N:
msg = ('Cannot compute metric because graph is not connected.')
raise nx.NetworkXError(msg)
current_ecc = max(dist.values())
# print status update
# print ("ecc of " + str(current) + " (" + str(ecc_lower[current]) + "/"
# + str(ecc_upper[current]) + ", deg: " + str(dist[current]) + ") is "
# + str(current_ecc))
# print(ecc_upper)
# (re)set bound extremes
maxuppernode = None
minlowernode = None
# update node bounds
for i in candidates:
# update eccentricity bounds
d = dist[i]
ecc_lower[i] = low = max(ecc_lower[i], max(d, (current_ecc - d)))
ecc_upper[i] = upp = min(ecc_upper[i], current_ecc + d)
# update min/max values of lower and upper bounds
minlower = min(ecc_lower[i], minlower)
maxlower = max(ecc_lower[i], maxlower)
minupper = min(ecc_upper[i], minupper)
maxupper = max(ecc_upper[i], maxupper)
# update candidate set
if compute == 'diameter':
ruled_out = {i for i in candidates if ecc_upper[i] <= maxlower and
2 * ecc_lower[i] >= maxupper}
elif compute == 'radius':
ruled_out = {i for i in candidates if ecc_lower[i] >= minupper and
ecc_upper[i] + 1 <= 2 * minlower}
elif compute == 'periphery':
ruled_out = {i for i in candidates if ecc_upper[i] < maxlower and
(maxlower == maxupper or ecc_lower[i] > maxupper)}
elif compute == 'center':
ruled_out = {i for i in candidates if ecc_lower[i] > minupper and
(minlower == minupper or ecc_upper[i] + 1 < 2 * minlower)}
elif compute == 'eccentricities':
ruled_out = {}
ruled_out.update(i for i in candidates if ecc_lower[i] == ecc_upper[i])
candidates -= ruled_out
# for i in ruled_out:
# print("removing %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"%
# (i,ecc_upper[i],maxlower,ecc_lower[i],maxupper))
# print("node %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"%
# (4,ecc_upper[4],maxlower,ecc_lower[4],maxupper))
# print("NODE 4: %g"%(ecc_upper[4] <= maxlower))
# print("NODE 4: %g"%(2 * ecc_lower[4] >= maxupper))
# print("NODE 4: %g"%(ecc_upper[4] <= maxlower
# and 2 * ecc_lower[4] >= maxupper))
# updating maxuppernode and minlowernode for selection in next round
for i in candidates:
if minlowernode is None \
or (ecc_lower[i] == ecc_lower[minlowernode]
and degrees[i] > degrees[minlowernode]) \
or (ecc_lower[i] < ecc_lower[minlowernode]):
minlowernode = i
if maxuppernode is None \
or (ecc_upper[i] == ecc_upper[maxuppernode]
and degrees[i] > degrees[maxuppernode]) \
or (ecc_upper[i] > ecc_upper[maxuppernode]):
maxuppernode = i
# print status update
# print (" min=" + str(minlower) + "/" + str(minupper) +
# " max=" + str(maxlower) + "/" + str(maxupper) +
# " candidates: " + str(len(candidates)))
# print("cand:",candidates)
# print("ecc_l",ecc_lower)
# print("ecc_u",ecc_upper)
# wait = input("press Enter to continue")
# return the correct value of the requested metric
if compute == 'diameter':
return maxlower
elif compute == 'radius':
return minupper
elif compute == 'periphery':
p = [v for v in G if ecc_lower[v] == maxlower]
return p
elif compute == 'center':
c = [v for v in G if ecc_upper[v] == minupper]
return c
elif compute == 'eccentricities':
return ecc_lower
return None
def eccentricity(G, v=None, sp=None):
"""Returns the eccentricity of nodes in G.
The eccentricity of a node v is the maximum distance from v to
all other nodes in G.
Parameters
----------
G : NetworkX graph
A graph
v : node, optional
Return value of specified node
sp : dict of dicts, optional
All pairs shortest path lengths as a dictionary of dictionaries
Returns
-------
ecc : dictionary
A dictionary of eccentricity values keyed by node.
"""
# if v is None: # none, use entire graph
# nodes=G.nodes()
# elif v in G: # is v a single node
# nodes=[v]
# else: # assume v is a container of nodes
# nodes=v
order = G.order()
e = {}
for n in G.nbunch_iter(v):
if sp is None:
length = nx.single_source_shortest_path_length(G, n)
L = len(length)
else:
try:
length = sp[n]
L = len(length)
except TypeError:
raise nx.NetworkXError('Format of "sp" is invalid.')
if L != order:
if G.is_directed():
msg = ('Found infinite path length because the digraph is not'
' strongly connected')
else:
msg = ('Found infinite path length because the graph is not'
' connected')
raise nx.NetworkXError(msg)
e[n] = max(length.values())
if v in G:
return e[v] # return single value
else:
return e
def diameter(G, e=None, usebounds=False):
"""Returns the diameter of the graph G.
The diameter is the maximum eccentricity.
Parameters
----------
G : NetworkX graph
A graph
e : eccentricity dictionary, optional
A precomputed dictionary of eccentricities.
Returns
-------
d : integer
Diameter of graph
See Also
--------
eccentricity
"""
if usebounds is True and e is None and not G.is_directed():
return extrema_bounding(G, compute="diameter")
if e is None:
e = eccentricity(G)
return max(e.values())
def periphery(G, e=None, usebounds=False):
"""Returns the periphery of the graph G.
The periphery is the set of nodes with eccentricity equal to the diameter.
Parameters
----------
G : NetworkX graph
A graph
e : eccentricity dictionary, optional
A precomputed dictionary of eccentricities.
Returns
-------
p : list
List of nodes in periphery
See Also
--------
barycenter
center
"""
if usebounds is True and e is None and not G.is_directed():
return extrema_bounding(G, compute="periphery")
if e is None:
e = eccentricity(G)
diameter = max(e.values())
p = [v for v in e if e[v] == diameter]
return p
def radius(G, e=None, usebounds=False):
"""Returns the radius of the graph G.
The radius is the minimum eccentricity.
Parameters
----------
G : NetworkX graph
A graph
e : eccentricity dictionary, optional
A precomputed dictionary of eccentricities.
Returns
-------
r : integer
Radius of graph
"""
if usebounds is True and e is None and not G.is_directed():
return extrema_bounding(G, compute="radius")
if e is None:
e = eccentricity(G)
return min(e.values())
def center(G, e=None, usebounds=False):
"""Returns the center of the graph G.
The center is the set of nodes with eccentricity equal to radius.
Parameters
----------
G : NetworkX graph
A graph
e : eccentricity dictionary, optional
A precomputed dictionary of eccentricities.
Returns
-------
c : list
List of nodes in center
See Also
--------
barycenter
periphery
"""
if usebounds is True and e is None and not G.is_directed():
return extrema_bounding(G, compute="center")
if e is None:
e = eccentricity(G)
radius = min(e.values())
p = [v for v in e if e[v] == radius]
return p
def barycenter(G, weight=None, attr=None, sp=None):
r"""Calculate barycenter of a connected graph, optionally with edge weights.
The :dfn:`barycenter` a
:func:`connected <networkx.algorithms.components.is_connected>` graph
:math:`G` is the subgraph induced by the set of its nodes :math:`v`
minimizing the objective function
.. math::
\sum_{u \in V(G)} d_G(u, v),
where :math:`d_G` is the (possibly weighted) :func:`path length
<networkx.algorithms.shortest_paths.generic.shortest_path_length>`.
The barycenter is also called the :dfn:`median`. See [West01]_, p. 78.
Parameters
----------
G : :class:`networkx.Graph`
The connected graph :math:`G`.
weight : :class:`str`, optional
Passed through to
:func:`~networkx.algorithms.shortest_paths.generic.shortest_path_length`.
attr : :class:`str`, optional
If given, write the value of the objective function to each node's
`attr` attribute. Otherwise do not store the value.
sp : dict of dicts, optional
All pairs shortest path lengths as a dictionary of dictionaries
Returns
-------
:class:`list`
Nodes of `G` that induce the barycenter of `G`.
Raises
------
:exc:`networkx.NetworkXNoPath`
If `G` is disconnected. `G` may appear disconnected to
:func:`barycenter` if `sp` is given but is missing shortest path
lengths for any pairs.
:exc:`ValueError`
If `sp` and `weight` are both given.
See Also
--------
center
periphery
"""
if sp is None:
sp = nx.shortest_path_length(G, weight=weight)
else:
sp = sp.items()
if weight is not None:
raise ValueError('Cannot use both sp, weight arguments together')
smallest, barycenter_vertices, n = float('inf'), [], len(G)
for v, dists in sp:
if len(dists) < n:
raise nx.NetworkXNoPath(
("Input graph %r is disconnected, so every induced subgraph "
"has infinite barycentricity.") % G)
barycentricity = sum(dists.values())
if attr is not None:
G.nodes[v][attr] = barycentricity
if barycentricity < smallest:
smallest = barycentricity
barycenter_vertices = [v]
elif barycentricity == smallest:
barycenter_vertices.append(v)
return barycenter_vertices
def _laplacian_submatrix(node, mat, node_list):
"""Removes row/col from a sparse matrix and returns the submatrix
"""
j = node_list.index(node)
n = list(range(len(node_list)))
n.pop(j)
if mat.shape[0] != mat.shape[1]:
raise nx.NetworkXError('Matrix must be square')
elif len(node_list) != mat.shape[0]:
msg = "Node list length does not match matrix dimentions"
raise nx.NetworkXError(msg)
mat = mat.tocsr()
mat = mat[n, :]
mat = mat.tocsc()
mat = mat[:, n]
node_list.pop(j)
return mat, node_list
def _count_lu_permutations(perm_array):
"""Counts the number of permutations in SuperLU perm_c or perm_r
"""
perm_cnt = 0
arr = perm_array.tolist()
for i in range(len(arr)):
if i != arr[i]:
perm_cnt += 1
n = arr.index(i)
arr[n] = arr[i]
arr[i] = i
return perm_cnt
@not_implemented_for('directed')
def resistance_distance(G, nodeA, nodeB, weight=None, invert_weight=True):
"""Returns the resistance distance between node A and node B on graph G.
The resistance distance between two nodes of a graph is akin to treating
the graph as a grid of resistorses with a resistance equal to the provided
weight.
If weight is not provided, then a weight of 1 is used for all edges.
Parameters
----------
G : NetworkX graph
A graph
nodeA : node
A node within graph G.
nodeB : node
A node within graph G, exclusive of Node A.
weight : string or None, optional (default=None)
The edge data key used to compute the resistance distance.
If None, then each edge has weight 1.
invert_weight : boolean (default=True)
Proper calculation of resistance distance requires building the
Laplacian matrix with the reciprocal of the weight. Not required
if the weight is already inverted. Weight cannot be zero.
Returns
-------
rd : float
Value of effective resistance distance
Notes
-----
Overview discussion:
* https://en.wikipedia.org/wiki/Resistance_distance
* http://mathworld.wolfram.com/ResistanceDistance.html
Additional details:
Vaya Sapobi Samui Vos, “Methods for determining the effective resistance,” M.S.,
Mathematisch Instituut, Universiteit Leiden, Leiden, Netherlands, 2016
Available: `Link to thesis <https://www.universiteitleiden.nl/binaries/content/assets/science/mi/scripties/master/vos_vaya_master.pdf>`_
"""
import numpy as np
import scipy.sparse
if not nx.is_connected(G):
msg = ('Graph G must be strongly connected.')
raise nx.NetworkXError(msg)
elif nodeA not in G:
msg = ('Node A is not in graph G.')
raise nx.NetworkXError(msg)
elif nodeB not in G:
msg = ('Node B is not in graph G.')
raise nx.NetworkXError(msg)
elif nodeA == nodeB:
msg = ('Node A and Node B cannot be the same.')
raise nx.NetworkXError(msg)
G = G.copy()
node_list = list(G)
if invert_weight and weight is not None:
if G.is_multigraph():
for (u, v, k, d) in G.edges(keys=True, data=True):
d[weight] = 1/d[weight]
else:
for (u, v, d) in G.edges(data=True):
d[weight] = 1/d[weight]
# Replace with collapsing topology or approximated zero?
# Using determinants to compute the effective resistance is more memory
# efficent than directly calculating the psuedo-inverse
L = nx.laplacian_matrix(G, node_list, weight=weight)
Lsub_a, node_list_a = _laplacian_submatrix(nodeA, L.copy(),
node_list[:])
Lsub_ab, node_list_ab = _laplacian_submatrix(nodeB, Lsub_a.copy(),
node_list_a[:])
# Factorize Laplacian submatrixes and extract diagonals
# Order the diagonals to minimize the likelihood over overflows
# during computing the determinant
lu_a = scipy.sparse.linalg.splu(Lsub_a, options=dict(SymmetricMode=True))
LdiagA = lu_a.U.diagonal()
LdiagA_s = np.product(np.sign(LdiagA)) * np.product(lu_a.L.diagonal())
LdiagA_s *= (-1)**_count_lu_permutations(lu_a.perm_r)
LdiagA_s *= (-1)**_count_lu_permutations(lu_a.perm_c)
LdiagA = np.absolute(LdiagA)
LdiagA = np.sort(LdiagA)
lu_ab = scipy.sparse.linalg.splu(Lsub_ab, options=dict(SymmetricMode=True))
LdiagAB = lu_ab.U.diagonal()
LdiagAB_s = np.product(np.sign(LdiagAB)) * np.product(lu_ab.L.diagonal())
LdiagAB_s *= (-1)**_count_lu_permutations(lu_ab.perm_r)
LdiagAB_s *= (-1)**_count_lu_permutations(lu_ab.perm_c)
LdiagAB = np.absolute(LdiagAB)
LdiagAB = np.sort(LdiagAB)
# Calculate the ratio of determinant, rd = det(Lsub_ab)/det(Lsub_a)
Ldet = np.product(np.divide(np.append(LdiagAB, [1]), LdiagA))
rd = Ldet * LdiagAB_s / LdiagA_s
return rd
|
the-stack_106_14080
|
"""Ops and optimizations for using BLAS calls
BLAS = Basic Linear Algebra Subroutines
Learn more about BLAS here:
http://www.netlib.org/blas/blast-forum/
The standard BLAS libraries implement what is called "legacy BLAS" in that
document.
This documentation describes Aesara's BLAS optimization pipeline.
Where there is a discrepancy between how things do work and how they *should*
work, both aspects should be documented.
There are four kinds of BLAS Ops in Aesara:
- Python implementations (this file)
- SciPy-based (blas_scipy)
- C-based (blas_c)
- GPU-based (aesara.gpuarray)
Notes
-----
Unfortunately (because it's confusing) this file currently contains Ops
that contain both Python and C versions. I think it would be better to
move the C implementations to blas_c so that this file is pure Python.
-JB
Ops
===
GEMM: Dot22, Dot22Scalar, GemmRelated, Gemm
-------------------------------------------
The BLAS GEMM operation implements Z <- a X Y + b Z,
where Z, X and Y are matrices, and a and b are scalars.
Dot22 is a GEMM where a=1, b=0, and Z is allocated every time.
Dot22Scalar is a GEMM where b=0 and Z is allocated every time.
Gemm is a GEMM in all its generality.
In the future we can refactor the GemmRelated, Gemm, Dot22 and
Dot22Scalar Ops into a single Op. That new Op (Gemm2) is basically a
normal Gemm, but with an additional configuration variable that says
to ignore the input Z. Setting that configuration variable to True
would make Gemm2 equivalent to the current Dot22 and Dot22Scalar.
This would make the file a lot easier to read, and save a few hundred
lines of library, to say nothing of testing and documentation.
GEMV: Gemv
----------
The BLAS GEMV operation implements Z <- a X Y + b Z,
where X is a matrix, Y, and Z are vectors, and a and b are scalars.
GER: Ger
--------
The BLAS GER operation implements Z <- a X' Y + Z,
where X and Y are vectors, and matrix Z gets a rank-1 update.
Other Notable BLAS-related Ops
------------------------------
SYRK is another useful special case of GEMM. Particularly SYRK preserves
symmetry in the matrix that it updates. See how the linear-algebra module uses
symmetry hints before implementing this Op, so that this Op is compatible with
that system.
Optimizations
=============
The optimization pipeline works something like this:
1. identify dot22 from dot
2. identify gemm from dot22
3. identify dot22scalar from dot22 that are not gemm
4. specialize gemm to gemv where applicable
5. specialize gemm to ger where applicable
6. specialize dot22 -> gemv or ger where applicable
:note: GEMM is the most canonical BLAS signature that we deal with so far, it
would be good to turn most things into GEMM (dot, inner, outer, dot22,
dot22scalar), and then to specialize from gemm to the various other L2 and
L3 operations.
Identify Dot22
--------------
Numpy's dot supports arguments that are of any rank, and we should support that
too (just for compatibility). The BLAS optimizations work with Dot Ops whose
inputs are each either vector or matrix. So the first part of the optimization
pipeline is to transform qualifying Dot Ops to Dot22 Ops. Dot22 Ops may be
transformed further, but they will get implemented by a BLAS call.
More precisely, Dot nodes whose inputs are all vectors or matrices and whose
inputs both have the same dtype, and whose dtype is float or complex, become
Dot22. This is implemented in `local_dot_to_dot22`.
Identify Gemm from Dot22
------------------------
This is complicated, done in GemmOptimizer.
Identify Dot22Scalar from Dot22
-------------------------------
Dot22 Ops that remain after the GemmOptimizer is done have not
qualified as GEMM Ops. Still they might be scaled by a factor, in
which case we use Dot22Scalar which is like Gemm, but without the b
and the Z. In the future it would be good to merge this into the
GemmOptimizer.
Specialize Gemm to Gemv
-----------------------
If arguments to GEMM are dimshuffled vectors, then we can use GEMV
instead. This optimization is `local_gemm_to_gemv`.
"""
import copy
import logging
import os
import time
import numpy as np
import numpy.distutils
try:
import numpy.distutils.__config__ # noqa
except ImportError:
pass
from functools import reduce
from typing import Tuple, Union
import aesara.scalar
from aesara.compile.mode import optdb
from aesara.configdefaults import config
from aesara.graph.basic import Apply, view_roots
from aesara.graph.features import ReplacementDidNotRemoveError, ReplaceValidate
from aesara.graph.fg import InconsistencyError
from aesara.graph.op import COp, Op
from aesara.graph.opt import (
EquilibriumOptimizer,
GlobalOptimizer,
in2out,
inherit_stack_trace,
local_optimizer,
)
from aesara.graph.optdb import SequenceDB
from aesara.graph.params_type import ParamsType
from aesara.graph.utils import MethodNotDefined, TestValueError
from aesara.printing import FunctionPrinter, debugprint, pprint
from aesara.scalar import bool as bool_t
from aesara.tensor import basic as at
from aesara.tensor.basic_opt import local_dimshuffle_lift
from aesara.tensor.blas_headers import blas_header_text, blas_header_version
from aesara.tensor.elemwise import DimShuffle, Elemwise
from aesara.tensor.exceptions import NotScalarConstantError
from aesara.tensor.math import Dot, add, mul, neg, sub
from aesara.tensor.type import integer_dtypes, tensor, values_eq_approx_remove_inf_nan
from aesara.utils import memoize
_logger = logging.getLogger("aesara.tensor.blas")
try:
import scipy.linalg.blas
have_fblas = True
try:
fblas = scipy.linalg.blas.fblas
except AttributeError:
# A change merged in Scipy development version on 2012-12-02 replaced
# `scipy.linalg.blas.fblas` with `scipy.linalg.blas`.
# See http://github.com/scipy/scipy/pull/358
fblas = scipy.linalg.blas
_blas_gemv_fns = {
np.dtype("float32"): fblas.sgemv,
np.dtype("float64"): fblas.dgemv,
np.dtype("complex64"): fblas.cgemv,
np.dtype("complex128"): fblas.zgemv,
}
except ImportError as e:
have_fblas = False
# This is used in Gemv and ScipyGer. We use CGemv and CGer
# when config.blas__ldflags is defined. So we don't need a
# warning in that case.
if not config.blas__ldflags:
_logger.warning(
"Failed to import scipy.linalg.blas, and "
"Aesara flag blas__ldflags is empty. "
"Falling back on slower implementations for "
"dot(matrix, vector), dot(vector, matrix) and "
f"dot(vector, vector) ({str(e)})"
)
# If check_init_y() == True we need to initialize y when beta == 0.
def check_init_y():
if check_init_y._result is None:
if not have_fblas:
check_init_y._result = False
y = float("NaN") * np.ones((2,))
x = np.ones((2,))
A = np.ones((2, 2))
gemv = _blas_gemv_fns[y.dtype]
gemv(1.0, A.T, x, 0.0, y, overwrite_y=True, trans=True)
check_init_y._result = np.isnan(y).any()
return check_init_y._result
check_init_y._result = None
class Gemv(Op):
"""
expression is beta * y + alpha * A x
A is matrix
x, y are vectors
alpha, beta are scalars
output is a vector that can be inplace on y
"""
__props__ = ("inplace",)
def __init__(self, inplace):
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
else:
return "%s{no_inplace}" % self.__class__.__name__
def make_node(self, y, alpha, A, x, beta):
y = at.as_tensor_variable(y)
x = at.as_tensor_variable(x)
A = at.as_tensor_variable(A)
alpha = at.as_tensor_variable(alpha)
beta = at.as_tensor_variable(beta)
if y.dtype != A.dtype or y.dtype != x.dtype:
raise TypeError(
"Gemv requires matching dtypes", (y.dtype, A.dtype, x.dtype)
)
if A.ndim != 2:
raise TypeError("gemv requires matrix for A", A.type)
if x.ndim != 1:
raise TypeError("gemv requires vector for x", x.type)
if y.ndim != 1:
raise TypeError("gemv requires vector for y", y.type)
return Apply(self, [y, alpha, A, x, beta], [y.type()])
def perform(self, node, inputs, out_storage, params=None):
y, alpha, A, x, beta = inputs
if (
have_fblas
and y.shape[0] != 0
and x.shape[0] != 0
and y.dtype in _blas_gemv_fns
):
gemv = _blas_gemv_fns[y.dtype]
if A.shape[0] != y.shape[0] or A.shape[1] != x.shape[0]:
raise ValueError(
"Incompatible shapes for gemv "
f"(beta * y + alpha * dot(A, x)). y: {y.shape}, A: {A.shape}, x: {x.shape}"
)
if beta == 0 and check_init_y():
y.fill(0)
# Here I suppose that A is in c order. If we don't make it
# explicitly as fortran order, scipy 0.7.2 seam to create
# a copy in fortran order instead of just reshaping it
# and using the trans flag.
# If A is already in fortran order, make it in c order and using the
# trans flag don't seam to cause slowdown.
# out_storage[0][0] = gemv(alpha, A, x, beta, y,
# overwrite_y=self.inplace)
out_storage[0][0] = gemv(
alpha, A.T, x, beta, y, overwrite_y=self.inplace, trans=True
)
else:
out = np.dot(A, x)
if alpha != 1:
out *= alpha
if beta != 0:
if beta != 1:
out += beta * y
else:
out += y
out_storage[0][0] = np.asarray(out, dtype=y.dtype)
def infer_shape(self, fgraph, node, input_shapes):
return [input_shapes[0]]
gemv_no_inplace = Gemv(inplace=False)
gemv_inplace = Gemv(inplace=True)
# For the user interface. Opt will make them inplace later
gemv = gemv_no_inplace
class Ger(Op):
"""
BLAS defines general rank-1 update GER as A <- A + alpha x y'
for matrix A, scalar alpha, vectors x and y.
This interface to GER allows non-destructive operation on A via the
`destructive` argument to the constructor.
"""
__props__ = ("destructive",)
def __init__(self, destructive):
self.destructive = destructive
if destructive:
self.destroy_map = {0: [0]}
def __str__(self):
if self.destructive:
return "%s{destructive}" % self.__class__.__name__
else:
return "%s{non-destructive}" % self.__class__.__name__
def make_node(self, A, alpha, x, y):
A = at.as_tensor_variable(A)
y = at.as_tensor_variable(y)
x = at.as_tensor_variable(x)
alpha = at.as_tensor_variable(alpha)
if not (A.dtype == x.dtype == y.dtype == alpha.dtype):
raise TypeError(
"ger requires matching dtypes", (A.dtype, alpha.dtype, x.dtype, y.dtype)
)
if alpha.ndim != 0:
raise TypeError("ger requires scalar alpha", alpha.type)
if A.ndim != 2:
raise TypeError("ger requires matrix for A", A.type)
if x.ndim != 1:
raise TypeError("ger requires vector for x", x.type)
if y.ndim != 1:
raise TypeError("ger requires vector for y", y.type)
if x.dtype not in ("float32", "float64", "complex64", "complex128"):
raise TypeError("only float and complex types supported", x.dtype)
return Apply(self, [A, alpha, x, y], [A.type()])
def perform(self, node, inp, out, params=None):
cA, calpha, cx, cy = inp
(cZ,) = out
if self.destructive:
A = cA
else:
A = cA.copy()
if calpha != 1:
A += calpha * np.outer(cx, cy)
else:
A += np.outer(cx, cy)
cZ[0] = A
def infer_shape(self, fgraph, node, input_shapes):
return [input_shapes[0]]
ger = Ger(destructive=False)
ger_destructive = Ger(destructive=True)
def ldflags(libs=True, flags=False, libs_dir=False, include_dir=False):
"""Extract a list of compilation flags from config.blas__ldflags.
Depending on the options, different type of flags will be kept.
It returns a list of libraries against which an Op's object file
should be linked to benefit from a BLAS implementation.
Parameters
----------
libs : bool, optional
Extract flags starting with "-l" (the default is True).
libs_dir : bool, optional
Extract flags starting with "-L" (the default is False).
include_dir : bool, optional
Extract flags starting with "-I" (the default is False).
flags: bool, optional
Extract all the other flags (the default is False).
Returns
-------
list of strings
Extracted flags.
"""
ldflags_str = config.blas__ldflags
return _ldflags(
ldflags_str=ldflags_str,
libs=libs,
flags=flags,
libs_dir=libs_dir,
include_dir=include_dir,
)
@memoize
def _ldflags(ldflags_str, libs, flags, libs_dir, include_dir):
"""Extract list of compilation flags from a string.
Depending on the options, different type of flags will be kept.
Parameters
----------
ldflags_str : string
The string to process. Typically, this will be the content of
`config.blas__ldflags`.
libs : bool
Extract flags starting with "-l".
flags: bool
Extract all the other flags.
libs_dir: bool
Extract flags starting with "-L".
include_dir: bool
Extract flags starting with "-I".
Returns
-------
list of strings
Extracted flags.
"""
rval = []
if libs_dir:
found_dyn = False
dirs = [x[2:] for x in ldflags_str.split() if x.startswith("-L")]
l = _ldflags(
ldflags_str=ldflags_str,
libs=True,
flags=False,
libs_dir=False,
include_dir=False,
)
for d in dirs:
for f in os.listdir(d.strip('"')):
if f.endswith(".so") or f.endswith(".dylib") or f.endswith(".dll"):
if any(f.find(ll) >= 0 for ll in l):
found_dyn = True
if not found_dyn and dirs:
_logger.warning(
"We did not find a dynamic library in the "
"library_dir of the library we use for blas. If you use "
"ATLAS, make sure to compile it with dynamics library."
)
for t in ldflags_str.split():
# Remove extra quote.
if (t.startswith("'") and t.endswith("'")) or (
t.startswith('"') and t.endswith('"')
):
t = t[1:-1]
try:
t0, t1, t2 = t[0:3]
assert t0 == "-"
except Exception:
raise ValueError(f'invalid token "{t}" in ldflags_str: "{ldflags_str}"')
if libs_dir and t1 == "L":
rval.append(t[2:])
elif include_dir and t1 == "I":
raise ValueError(
"Include dirs are not used for blas. We disable"
" this as this can hide other headers and this"
" is not wanted.",
t,
)
rval.append(t[2:])
elif libs and t1 == "l": # example -lmkl
rval.append(t[2:])
elif flags and t1 not in ("L", "I", "l"): # example -openmp
rval.append(t)
elif flags and t1 == "L":
# to find it when we load the compiled op if the env of the
# used is not well configured.
rval.append("-Wl,-rpath," + t[2:])
return rval
class GemmRelated(COp):
"""Base class for Gemm and Dot22.
This class provides a kind of templated gemm Op.
"""
__props__: Union[Tuple, Tuple[str]] = ()
def c_support_code(self, **kwargs):
# return cblas_header_text()
mod_str = """
#ifndef MOD
#define MOD %
#endif
static double time_time() // a time function like time.time()
{
struct timeval tv;
gettimeofday(&tv, 0);
return (double) tv.tv_sec + (double) tv.tv_usec / 1000000.0;
}
"""
return blas_header_text() + mod_str
def c_headers(self, **kwargs):
# std.cout doesn't require the '%' symbol to print stuff...
# so it works much better with python's string-substitution stuff.
return ["<iostream>", "<time.h>", "<sys/time.h>"]
def c_libraries(self, **kwargs):
return ldflags()
# code_cache_version is built by subclasses from
# build_gemm_version
def c_compile_args(self, **kwargs):
return ldflags(libs=False, flags=True)
def c_lib_dirs(self, **kwargs):
return ldflags(libs=False, libs_dir=True)
def c_header_dirs(self, **kwargs):
return ldflags(libs=False, include_dir=True)
declare_NS = """
int unit = 0;
int type_num = PyArray_DESCR(%(_x)s)->type_num;
int type_size = PyArray_DESCR(%(_x)s)->elsize; // in bytes
npy_intp* Nx = PyArray_DIMS(%(_x)s);
npy_intp* Ny = PyArray_DIMS(%(_y)s);
npy_intp* Nz = 0; //PyArray_DIMS(%(_zout)s);
npy_intp* Sx = PyArray_STRIDES(%(_x)s);
npy_intp* Sy = PyArray_STRIDES(%(_y)s);
npy_intp* Sz = 0; //PyArray_STRIDES(%(_zout)s);
//strides for x, y, z in dimensions 0, 1
int sx_0, sx_1, sy_0, sy_1, sz_0, sz_1;
"""
# implement if you don't have an inplace props
# setup_z_Nz_Sz = None
# otherwise implement
# setup_z_Nz_Sz_inplace = None
# setup_z_Nz_Sz_outplace = None
check_xyz_rank2 = """
if (PyArray_NDIM(%(_x)s) != 2) {
PyErr_Format(PyExc_NotImplementedError,
"rank(x) != 2. rank(x) is %%d.",
PyArray_NDIM(%(_x)s));
%(fail)s;
}
if (PyArray_NDIM(%(_y)s) != 2) {
PyErr_Format(PyExc_NotImplementedError,
"rank(y) != 2. rank(y) is %%d.", PyArray_NDIM(%(_y)s));
%(fail)s;
}
if (%(_zout)s && PyArray_NDIM(%(_zout)s) != 2) {
PyErr_Format(PyExc_NotImplementedError,
"rank(z) != 2. rank(z) is %%d.", PyArray_NDIM(%(_zout)s));
%(fail)s;
}
"""
check_xyz_double_or_float = """
if ((PyArray_DESCR(%(_x)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_x)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(x) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(%(_y)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_y)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(y) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(%(_zout)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_zout)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(z) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(%(_x)s)->type_num != PyArray_DESCR(%(_y)s)->type_num)
||(PyArray_DESCR(%(_x)s)->type_num != PyArray_DESCR(%(_zout)s)->type_num))
{ PyErr_SetString(PyExc_NotImplementedError, "type(x), type(y), type(z) are not all the same"); %(fail)s; }
"""
# it is not necessary that a or b have the same type as x,y,z
check_ab_double_or_float = """
if ((PyArray_DESCR(%(_a)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_a)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(a) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(%(_b)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_b)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(b) is not double or float"); %(fail)s;}
"""
check_dims = """
if (Nx[0] != Nz[0])
{
PyErr_Format(PyExc_ValueError,
"Shape mismatch: x has %%ld rows but z has %%ld rows",
(long int)Nx[0], (long int)Nz[0]);
%(fail)s;
}
if (Nx[1] != Ny[0])
{
PyErr_Format(PyExc_ValueError,
"Shape mismatch: x has %%ld cols (and %%ld rows) but y has %%ld rows (and %%ld cols)",
(long int)Nx[1], (long int)Nx[0], (long int)Ny[0], (long int)Ny[1]);
%(fail)s;
}
if (Ny[1] != Nz[1])
{
PyErr_Format(PyExc_ValueError,
"Shape mismatch: y has %%ld cols but z has %%ld cols",
(long int)Ny[1], (long int)Nz[1]);
%(fail)s;
}
// We must not raise an error when Nx[1] == 0. This would disable cases
// that numpy.dot accept.
"""
check_strides = """
/*
If some matrices are not contiguous on either dimensions,
or have invalid strides, copy their content into a contiguous one
*/
if ((Sx[0] < 1) || (Sx[1] < 1) || (Sx[0] MOD type_size) || (Sx[1] MOD type_size)
|| ((Sx[0] != type_size) && (Sx[1] != type_size)))
{
PyArrayObject * _x_copy = (PyArrayObject *) PyArray_Copy(%(_x)s);
if (!_x_copy)
%(fail)s
Py_XDECREF(%(_x)s);
%(_x)s = _x_copy;
Sx = PyArray_STRIDES(%(_x)s);
}
if ((Sy[0] < 1) || (Sy[1] < 1) || (Sy[0] MOD type_size) || (Sy[1] MOD type_size)
|| ((Sy[0] != type_size) && (Sy[1] != type_size)))
{
PyArrayObject * _y_copy = (PyArrayObject *) PyArray_Copy(%(_y)s);
if (!_y_copy)
%(fail)s
Py_XDECREF(%(_y)s);
%(_y)s = _y_copy;
Sy = PyArray_STRIDES(%(_y)s);
}
if ((Sz[0] < 1) || (Sz[1] < 1) || (Sz[0] MOD type_size) || (Sz[1] MOD type_size)
|| ((Sz[0] != type_size) && (Sz[1] != type_size)))
{
PyArrayObject * _z_copy = (PyArrayObject *) PyArray_Copy(%(_zout)s);
if (!_z_copy)
%(fail)s
Py_XDECREF(%(_zout)s);
%(_zout)s = _z_copy;
Sz = PyArray_STRIDES(%(_zout)s);
}
"""
encode_strides_in_unit = """
/*
encode the stride structure of _x,_y,_zout into a single integer
*/
unit |= ((Sx[1] == type_size || Nx[1]==1) ? 0x0 : (Sx[0] == type_size || Nx[0]==1) ? 0x1 : 0x2) << 8;
unit |= ((Sy[1] == type_size || Ny[1]==1) ? 0x0 : (Sy[0] == type_size || Ny[0]==1) ? 0x1 : 0x2) << 4;
unit |= ((Sz[1] == type_size || Nz[1]==1) ? 0x0 : (Sz[0] == type_size || Nz[0]==1) ? 0x1 : 0x2) << 0;
"""
compute_strides = """
/* create appropriate strides for malformed matrices that are row or column
* vectors, or empty matrices.
* In that case, the value of the stride does not really matter, but
* some versions of BLAS insist that:
* - they are not smaller than the number of elements in the array,
* - they are not 0.
*/
sx_0 = (Nx[0] > 1) ? Sx[0]/type_size : (Nx[1] + 1);
sx_1 = (Nx[1] > 1) ? Sx[1]/type_size : (Nx[0] + 1);
sy_0 = (Ny[0] > 1) ? Sy[0]/type_size : (Ny[1] + 1);
sy_1 = (Ny[1] > 1) ? Sy[1]/type_size : (Ny[0] + 1);
sz_0 = (Nz[0] > 1) ? Sz[0]/type_size : (Nz[1] + 1);
sz_1 = (Nz[1] > 1) ? Sz[1]/type_size : (Nz[0] + 1);
"""
begin_switch_typenum = """
switch (type_num)
{
"""
case_float = """
case NPY_FLOAT:
{
"""
# case_float_ab_constants = None
case_float_gemm = """
float* x = (float*)PyArray_DATA(%(_x)s);
float* y = (float*)PyArray_DATA(%(_y)s);
float* z = (float*)PyArray_DATA(%(_zout)s);
char N = 'N';
char T = 'T';
int Nz0 = Nz[0], Nz1 = Nz[1], Nx1 = Nx[1];
//std::cerr << (unit/256) MOD 16 << (unit / 16) MOD 16 << unit MOD 16<< '\\n';
//double t0 = time_time();
switch(unit)
{
case 0x000: sgemm_(&N, &N, &Nz1, &Nz0, &Nx1, &a, y, &sy_0, x, &sx_0, &b, z, &sz_0); break;
case 0x100: sgemm_(&N, &T, &Nz1, &Nz0, &Nx1, &a, y, &sy_0, x, &sx_1, &b, z, &sz_0); break;
case 0x010: sgemm_(&T, &N, &Nz1, &Nz0, &Nx1, &a, y, &sy_1, x, &sx_0, &b, z, &sz_0); break;
case 0x110: sgemm_(&T, &T, &Nz1, &Nz0, &Nx1, &a, y, &sy_1, x, &sx_1, &b, z, &sz_0); break;
case 0x001: sgemm_(&T, &T, &Nz0, &Nz1, &Nx1, &a, x, &sx_0, y, &sy_0, &b, z, &sz_1); break;
case 0x101: sgemm_(&N, &T, &Nz0, &Nz1, &Nx1, &a, x, &sx_1, y, &sy_0, &b, z, &sz_1); break;
case 0x011: sgemm_(&T, &N, &Nz0, &Nz1, &Nx1, &a, x, &sx_0, y, &sy_1, &b, z, &sz_1); break;
case 0x111: sgemm_(&N, &N, &Nz0, &Nz1, &Nx1, &a, x, &sx_1, y, &sy_1, &b, z, &sz_1); break;
default: PyErr_SetString(PyExc_ValueError, "some matrix has no unit stride"); %(fail)s;
};
//fprintf(stderr, "Calling sgemm %%i %%i %%i %%i took %%f\\n", unit, Nz1, Nz0, Nx1, time_time() - t0);
"""
case_double = """
}
break;
case NPY_DOUBLE:
{
"""
# case_double_ab_constants = None
case_double_gemm = """
double* x = (double*)PyArray_DATA(%(_x)s);
double* y = (double*)PyArray_DATA(%(_y)s);
double* z = (double*)PyArray_DATA(%(_zout)s);
char N = 'N';
char T = 'T';
int Nz0 = Nz[0], Nz1 = Nz[1], Nx1 = Nx[1];
//std::cerr << (unit/256) MOD 16 << (unit / 16) MOD 16 << unit MOD 16<< '\\n';
//double t0 = time_time();
//fprintf(stderr, "unit=%%x N= %%i %%i %%i S = %%i %%i %%i %%i %%i %%i\\n", unit,
//Nz1, Nz0, Nx1,
//sy_0, sy_1,
//sx_0, sx_1,
//sz_0, sz_1
//);
switch(unit)
{
case 0x000: dgemm_(&N, &N, &Nz1, &Nz0, &Nx1, &a, y,
&sy_0, x, &sx_0, &b, z, &sz_0); break;
case 0x100: dgemm_(&N, &T, &Nz1, &Nz0, &Nx1, &a, y,
&sy_0, x, &sx_1, &b, z, &sz_0); break;
case 0x010: dgemm_(&T, &N, &Nz1, &Nz0, &Nx1, &a, y,
&sy_1, x, &sx_0, &b, z, &sz_0); break;
case 0x110: dgemm_(&T, &T, &Nz1, &Nz0, &Nx1, &a, y,
&sy_1, x, &sx_1, &b, z, &sz_0); break;
case 0x001: dgemm_(&T, &T, &Nz0, &Nz1, &Nx1, &a, x,
&sx_0, y, &sy_0, &b, z, &sz_1); break;
case 0x101: dgemm_(&N, &T, &Nz0, &Nz1, &Nx1, &a, x,
&sx_1, y, &sy_0, &b, z, &sz_1); break;
case 0x011: dgemm_(&T, &N, &Nz0, &Nz1, &Nx1, &a, x,
&sx_0, y, &sy_1, &b, z, &sz_1); break;
case 0x111: dgemm_(&N, &N, &Nz0, &Nz1, &Nx1, &a, x,
&sx_1, y, &sy_1, &b, z, &sz_1); break;
default: PyErr_SetString(PyExc_ValueError,
"some matrix has no unit stride");
%(fail)s;
};
//fprintf(stderr, "Calling dgemm %%i %%i %%i %%i took %%f\\n",
// unit, Nz1, Nz0, Nx1, time_time()- t0);
"""
end_switch_typenum = """
}
break;
}
"""
def build_gemm_call(self):
if hasattr(self, "inplace"):
setup_z_Nz_Sz = "if(%(params)s->inplace){{{}}}else{{{}}}".format(
self.setup_z_Nz_Sz_inplace,
self.setup_z_Nz_Sz_outplace,
)
else:
setup_z_Nz_Sz = self.setup_z_Nz_Sz
return reduce(
str.__add__,
(
self.declare_NS,
self.check_xyz_rank2,
setup_z_Nz_Sz,
self.check_xyz_double_or_float,
self.check_ab_double_or_float,
self.check_dims,
self.check_strides,
self.encode_strides_in_unit,
self.compute_strides,
self.begin_switch_typenum,
self.case_float,
self.case_float_ab_constants,
self.case_float_gemm,
self.case_double,
self.case_double_ab_constants,
self.case_double_gemm,
self.end_switch_typenum,
),
"",
)
def build_gemm_version(self):
return (13, blas_header_version())
class Gemm(GemmRelated):
"""In-place version of matrix-matrix multiplication (with accumulation).
When a and b are scalars and x, y, and z are matrices, then
gemm(z,a,x,y,b)
is similar to
b*z + a*dot(x,y)
The difference between the two is that the top form is destructive
on z, whereas the bottom form is not. Gemm works in-place on the
storage associated with z, and the L{Variable} returned by Gemm
has a storage that will be aliased to the storage of the z
argument. Because of this in-place computation, an L{Apply} of
this op will destroy the L{Variable} z on which it operates. (See
L{DestructiveOps} for an explanation of what destroying means in
the context of aesara graphs. See L{BlasLapackSupport} for more
optimized linear algebra operations.)
"""
E_rank = "gemm only works for rank 2"
E_scalar = "gemm requires scalar argument"
E_z_uniq = "argument z aliased to x or y" # TODO: justify / delete this
E_mixed = "gemm requires matching dtypes"
E_float = "gemm requires floating-point dtypes"
__props__ = ("inplace",)
params_type = ParamsType(
inplace=bool_t,
)
check_input = False
def __init__(self, inplace):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
def __str__(self):
if self.inplace:
inplace_str = "inplace"
else:
inplace_str = "no_inplace"
return f"{self.__class__.__name__}{{{inplace_str}}}"
def __setstate__(self, dct):
self.__dict__.update(dct)
# Correctly reload older pickles where destroy_map were not
# saved
if "destroy_map" not in self.__dict__ and self.inplace:
self.destroy_map = {0: [0]}
def __getstate__(self):
rval = self.__dict__.copy()
# Do not serialize the setup code, it will be restored in __setstate__
# depending on the value of 'inplace'
rval.pop("setup_z_Nz_Sz", None)
return rval
def make_node(self, *inputs):
inputs = list(map(at.as_tensor_variable, inputs))
if len(inputs) != 5:
raise TypeError(
f"Wrong number of inputs for {self} (expected 5, got {len(inputs)})"
)
z, a, x, y, b = inputs
zr, xr, yr = [set(view_roots(i)) for i in (z, x, y)]
# We want the gemm to be inplace. When this op is inplace, it
# declare to be inplace only on z. So to make it safe, we
# raise an error if z can be a view on x or y.
# I don't know if Aesara currently can support that case. As
# this case don't happen in our code, I won't spent time
# investigating this. So the assert is for safety. I also
# think there is another mechanism that would prevent this,
# but I don't what to modify old code and have chance to break
# something.
if self.inplace:
if zr.intersection(xr):
raise InconsistencyError(Gemm.E_z_uniq, (z, x))
if zr.intersection(yr):
raise InconsistencyError(Gemm.E_z_uniq, (z, y))
if z.ndim != 2:
raise TypeError(Gemm.E_rank, z)
if a.ndim != 0:
raise TypeError(Gemm.E_scalar, a)
if x.ndim != 2:
raise TypeError(Gemm.E_rank, x)
if y.ndim != 2:
raise TypeError(Gemm.E_rank, y)
if b.ndim != 0:
raise TypeError(Gemm.E_scalar, b)
if not (z.dtype == a.dtype == x.dtype == y.dtype == b.dtype):
raise TypeError(Gemm.E_mixed, (z.dtype, a.dtype, x.dtype, y.dtype, b.dtype))
if not z.dtype.startswith("float") and not z.dtype.startswith("complex"):
raise TypeError(Gemm.E_float, (z.dtype))
output = z.type()
return Apply(self, inputs, [output])
def perform(self, node, inp, out, params):
z, a, x, y, b = inp
(zout,) = out
assert a.shape == ()
assert b.shape == ()
if not params.inplace:
z = z.copy() # the original z will not be changed
if z.shape == ():
z.itemset(z * a + b * np.dot(x, y))
zout[0] = z
else:
if b == 0.0:
if a == 1.0:
z[:] = np.dot(x, y)
elif a == -1.0:
z[:] = -np.dot(x, y)
else:
z[:] = a * np.dot(x, y)
elif b == 1.0:
if a == 1.0:
z += np.dot(x, y)
elif a == -1.0:
z -= np.dot(x, y)
else:
z += a * np.dot(x, y)
else:
z *= b
z += a * np.dot(x, y)
zout[0] = z
def infer_shape(self, fgraph, node, input_shapes):
return [input_shapes[0]]
setup_z_Nz_Sz_inplace = """
if (%(_zout)s != %(_z)s)
{
if (%(_zout)s)
{
Py_DECREF(%(_zout)s);
}
%(_zout)s = %(_z)s;
Py_INCREF(%(_zout)s);
}
Nz = PyArray_DIMS(%(_z)s);
Sz = PyArray_STRIDES(%(_z)s);
"""
setup_z_Nz_Sz_outplace = """
if ((NULL == %(_zout)s)
|| (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_z)s)[0])
|| (PyArray_DIMS(%(_zout)s)[1] != PyArray_DIMS(%(_z)s)[1])
|| (PyArray_STRIDES(%(_zout)s)[0] <= 0)
|| (PyArray_STRIDES(%(_zout)s)[1] <= 0)
|| (PyArray_STRIDES(%(_zout)s)[0] MOD type_size)
|| (PyArray_STRIDES(%(_zout)s)[1] MOD type_size)
|| ((PyArray_STRIDES(%(_zout)s)[0] != type_size)
&& (PyArray_STRIDES(%(_zout)s)[1] != type_size)))
{
Py_XDECREF(%(_zout)s);
npy_intp dims[2];
dims[0] = PyArray_DIMS(%(_z)s)[0];
dims[1] = PyArray_DIMS(%(_z)s)[1];
%(_zout)s = (PyArrayObject*)PyArray_SimpleNew(2, dims,
PyArray_TYPE(%(_z)s));
//fprintf(stderr, "Gemm Allocating %%i %%i\\n", dims[0], dims[1]);
if(!%(_zout)s) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc gemm_no_inplace output");
%(fail)s
}
}
Nz = PyArray_DIMS(%(_zout)s);
Sz = PyArray_STRIDES(%(_zout)s);
if (PyArray_DESCR(%(_zout)s)->type_num == NPY_FLOAT)
{
float * zoutdata = (float*)PyArray_DATA(%(_zout)s);
int zoi = Sz[0] / sizeof(float);
int zoj = Sz[1] / sizeof(float);
const float * zdata = (float*)PyArray_DATA(%(_z)s);
int zi = PyArray_STRIDES(%(_z)s)[0]/sizeof(float);
int zj = PyArray_STRIDES(%(_z)s)[1]/sizeof(float);
for (int i = 0; i < Nz[0]; ++i)
{
for (int j = 0; j < Nz[1]; ++j)
{
zoutdata[zoi*i + zoj*j] = zdata[zi*i + zj*j];
}
}
}
else if (PyArray_DESCR(%(_zout)s)->type_num == NPY_DOUBLE)
{
double * zoutdata = (double*) PyArray_DATA(%(_zout)s);
int zoi = Sz[0] / sizeof(double);
int zoj = Sz[1] / sizeof(double);
const double * zdata = (double*)PyArray_DATA(%(_z)s);
int zi = PyArray_STRIDES(%(_z)s)[0]/sizeof(double);
int zj = PyArray_STRIDES(%(_z)s)[1]/sizeof(double);
for (int i = 0; i < Nz[0]; ++i)
{
for (int j = 0; j < Nz[1]; ++j)
{
zoutdata[zoi*i + zoj*j] = zdata[zi*i + zj*j];
}
}
}
else
{
PyErr_SetString(PyExc_AssertionError,
"neither float nor double dtype");
%(fail)s
}
"""
case_float_ab_constants = """
#define REAL float
float a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT)
? (REAL)(((float*)PyArray_DATA(%(_a)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_a)s))[0]);
float b = (PyArray_DESCR(%(_b)s)->type_num == NPY_FLOAT) ?
(REAL)(((float*)PyArray_DATA(%(_b)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_b)s))[0]);
#undef REAL
"""
case_double_ab_constants = """
#define REAL double
double a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT)
? (REAL)(((float*)PyArray_DATA(%(_a)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_a)s))[0]);
double b = (PyArray_DESCR(%(_b)s)->type_num == NPY_FLOAT) ?
(REAL)(((float*)PyArray_DATA(%(_b)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_b)s))[0]);
#undef REAL
"""
def c_code(self, node, name, inp, out, sub):
_z, _a, _x, _y, _b = inp
(_zout,) = out
if node.inputs[0].type.dtype.startswith("complex"):
raise MethodNotDefined(f"{self.__class__.__name__}.c_code")
full_code = self.build_gemm_call() % dict(locals(), **sub)
return full_code
def c_code_cache_version(self):
gv = self.build_gemm_version()
if gv:
return (6,) + gv
else:
return gv
gemm_inplace = Gemm(inplace=True)
gemm_no_inplace = Gemm(inplace=False)
# For the user interface. Aesara optimization will make them inplace
gemm = gemm_no_inplace
pprint.assign(gemm_inplace, FunctionPrinter(["gemm_inplace"]))
pprint.assign(gemm_no_inplace, FunctionPrinter(["gemm_no_inplace"]))
def res_is_a(fgraph, var, op, maxclients=None):
if maxclients is not None and var in fgraph.clients:
retval = len(fgraph.get_clients(var)) <= maxclients
else:
retval = True
return var.owner and var.owner.op == op and retval
def _as_scalar(res, dtype=None):
"""Return ``None`` or a `TensorVariable` of float type"""
if dtype is None:
dtype = config.floatX
if np.all(res.type.broadcastable):
while res.owner and isinstance(res.owner.op, DimShuffle):
res = res.owner.inputs[0]
# may still have some number of True's
if res.type.broadcastable:
rval = res.dimshuffle()
else:
rval = res
if rval.type.dtype in integer_dtypes:
# We check that the upcast of res and dtype won't change dtype.
# If dtype is float64, we will cast int64 to float64.
# This is valid when res is a scalar used as input to a dot22
# as the cast of the scalar can be done before or after the dot22
# and this will give the same result.
if aesara.scalar.upcast(res.dtype, dtype) == dtype:
return at.cast(rval, dtype)
else:
return None
return rval
def _is_real_matrix(res):
return (
res.type.dtype in ("float16", "float32", "float64")
and res.type.ndim == 2
and res.type.broadcastable[0] is False
and res.type.broadcastable[1] is False
) # cope with tuple vs. list
def _is_real_vector(res):
return (
res.type.dtype in ("float16", "float32", "float64")
and res.type.ndim == 1
and res.type.broadcastable[0] is False
)
def _beta_L_plus_alpha_M(fgraph, beta, L, alpha, M, recurse_flip=True):
# print 'BETA L + ALPHA M', beta, L, alpha, M, recurse_flip
# EXPRESSION: (beta * L) + (alpha * M)
# we've already checked the client counts, now just make the type check.
# if res_is_a(M, _dot22, 1):
if M.owner and M.owner.op == _dot22:
Ml, Mr = M.owner.inputs
rval = [gemm_no_inplace(L, alpha, Ml, Mr, beta)]
# print 'GEMM 0', rval, beta, L, alpha, M
return rval, M
# it also might be the case that there is a dimshuffle between the +
# and the dot22. local_dot_to_dot22 in particular will put in such things.
if (
M.owner
and isinstance(M.owner.op, DimShuffle)
and M.owner.inputs[0].owner
and isinstance(M.owner.inputs[0].owner.op, Dot22)
):
MM = M.owner.inputs[0]
if M.owner.op.new_order == (0,):
# it is making a column MM into a vector
MMl, MMr = MM.owner.inputs
g = gemm_no_inplace(L.dimshuffle(0, "x"), alpha, MMl, MMr, beta)
rval = [g.dimshuffle(0)]
return rval, MM
if M.owner.op.new_order == (1,):
# it is making a row MM into a vector
MMl, MMr = MM.owner.inputs
g = gemm_no_inplace(L.dimshuffle("x", 0), alpha, MMl, MMr, beta)
rval = [g.dimshuffle(1)]
return rval, MM
if len(M.owner.op.new_order) == 0:
# it is making a row MM into a vector
MMl, MMr = MM.owner.inputs
g = gemm_no_inplace(L.dimshuffle("x", "x"), alpha, MMl, MMr, beta)
rval = [g.dimshuffle()]
return rval, MM
if recurse_flip:
return _beta_L_plus_alpha_M(fgraph, alpha, M, beta, L, recurse_flip=False)
else:
return False, False
def _gemm_canonicalize(fgraph, r, scale, rval, maxclients):
# Tries to interpret node as a sum of scalars * (vectors or matrices)
def scaled(thing):
if scale == 1:
return thing
if scale == -1 and thing.type.dtype != "bool":
return -thing
else:
return scale * thing
try:
r.type.broadcastable
except Exception:
return None
if (r.type.ndim not in (1, 2)) or r.type.dtype not in (
"float16",
"float32",
"float64",
"complex64",
"complex128",
):
rval.append(scaled(r))
return rval
if maxclients and len(fgraph.clients[r]) > maxclients:
rval.append((scale, r))
return rval
if r.owner and r.owner.op == sub:
_gemm_canonicalize(fgraph, r.owner.inputs[0], scale, rval, 1)
_gemm_canonicalize(fgraph, r.owner.inputs[1], -scale, rval, 1)
elif r.owner and r.owner.op == add:
for i in r.owner.inputs:
_gemm_canonicalize(fgraph, i, scale, rval, 1)
elif r.owner and r.owner.op == neg:
_gemm_canonicalize(fgraph, r.owner.inputs[0], -scale, rval, 1)
elif r.owner and r.owner.op == mul:
scalars = []
vectors = []
matrices = []
for i in r.owner.inputs:
if np.all(i.type.broadcastable):
while i.owner and isinstance(i.owner.op, DimShuffle):
i = i.owner.inputs[0]
if i.type.broadcastable:
scalars.append(i.dimshuffle())
else:
scalars.append(i)
elif _is_real_vector(i):
vectors.append(i)
elif _is_real_matrix(i):
matrices.append(i)
else:
# just put the original arguments as in the base case
rval.append((scale, r))
return rval
if len(matrices) == 1:
assert len(vectors) == 0
m = matrices[0]
if len(scalars) == 0:
_gemm_canonicalize(fgraph, m, scale, rval, 1)
elif len(scalars) == 1:
_gemm_canonicalize(fgraph, m, scaled(scalars[0]), rval, 1)
else:
_gemm_canonicalize(
fgraph, m, mul(scaled(scalars[0]), *scalars[1:]), rval, 1
)
elif len(vectors) == 1:
assert len(matrices) == 0
v = vectors[0]
if len(scalars) == 0:
_gemm_canonicalize(fgraph, v, scale, rval, 1)
elif len(scalars) == 1:
_gemm_canonicalize(fgraph, v, scaled(scalars[0]), rval, 1)
else:
_gemm_canonicalize(
fgraph, v, mul(scaled(scalars[0]), *scalars[1:]), rval, 1
)
else: # lets not open this up
rval.append((scale, r))
else:
rval.append((scale, r))
return rval
def _factor_canonicalized(lst):
# remove duplicates from canonicalized list
# we only delete out of the right end of the list,
# once i has touched a list element, it is permantent
lst = list(lst)
# print 'FACTOR', lst
# for t in lst:
# if not isinstance(t, (list, tuple)):
# t = (t,)
# for e in t:
# try:
# aesara.printing.debugprint(e)
# except TypeError:
# print e, type(e)
i = 0
while i < len(lst) - 1:
try:
s_i, M_i = lst[i]
except Exception:
i += 1
continue
j = i + 1
while j < len(lst):
try:
s_j, M_j = lst[j]
except Exception:
j += 1
continue
if M_i is M_j:
s_i = s_i + s_j
lst[i] = (s_i, M_i)
del lst[j]
else:
j += 1
i += 1
return lst
def _gemm_from_factored_list(fgraph, lst):
"""
Returns None, or a list to replace node.outputs.
"""
lst2 = []
# Remove the tuple that can't be cast correctly.
# This can happen when we try to cast a complex to a real
for sM in lst:
# Make every pair in list have matching dtypes
# sM can be a tuple of 2 elements or an Aesara variable.
if isinstance(sM, tuple):
sm0, sm1 = sM
sm0 = at.as_tensor_variable(sm0)
if aesara.scalar.upcast(sm0.dtype, sm1.dtype) == sm1.dtype:
lst2.append((at.cast(sm0, sm1.dtype), sM[1]))
lst = lst2
def item_to_var(t):
try:
s, M = t
except Exception:
return t
if s == 1:
return M
if s == -1:
return -M
return s * M
# Try every pair in the sM_list, trying to turn it into a gemm operation
for i in range(len(lst) - 1):
s_i, M_i = lst[i]
for j in range(i + 1, len(lst)):
s_j, M_j = lst[j]
if not M_j.type.in_same_class(M_i.type):
continue
# print 'TRYING', (s_i, M_i, s_j, M_j)
gemm_of_sM_list, old_dot22 = _beta_L_plus_alpha_M(
fgraph, s_i, M_i, s_j, M_j
)
# print 'GOT IT', gemm_of_sM_list
if gemm_of_sM_list:
assert len(gemm_of_sM_list) == 1
add_inputs = [
item_to_var(input) for k, input in enumerate(lst) if k not in (i, j)
]
add_inputs.extend(gemm_of_sM_list)
if len(add_inputs) > 1:
rval = [add(*add_inputs)]
else:
rval = add_inputs
# print "RETURNING GEMM THING", rval
return rval, old_dot22
def _gemm_from_node2(fgraph, node):
"""
TODO: In many expressions, there are many ways to turn it into a
gemm. For example dot(a,b) + c + d. This function should return all
of them, so that if one version of gemm causes a cycle in the graph, then
another application of gemm can be tried.
"""
lst = []
t0 = time.time()
_gemm_canonicalize(fgraph, node.outputs[0], 1.0, lst, 0)
t1 = time.time()
if len(lst) > 1:
lst = _factor_canonicalized(lst)
t2 = time.time()
rval = _gemm_from_factored_list(fgraph, lst)
t3 = time.time()
# It can happen that _factor_canonicalized and
# _gemm_from_factored_list return a node with an incorrect
# type. This happens in particular when one of the scalar
# factors forces the upcast of the whole expression. In that
# case, we simply skip that candidate for Gemm. This was
# discussed in
# http://groups.google.com/group/theano-dev/browse_thread/thread/a3096c82856e3ad5,
# but never made it into a trac ticket.
if rval and rval[0][0].type.in_same_class(node.outputs[0].type):
return rval, t1 - t0, t2 - t1, t3 - t2
return None, t1 - t0, 0, 0
class GemmOptimizer(GlobalOptimizer):
"""Graph optimizer for inserting Gemm operations."""
def __init__(self):
super().__init__()
self.warned = False
def add_requirements(self, fgraph):
fgraph.attach_feature(ReplaceValidate())
def apply(self, fgraph):
did_something = True
nb_iter = 0
nb_replacement = 0
nb_replacement_didn_t_remove = 0
nb_inconsistency_make = 0
nb_inconsistency_replace = 0
time_canonicalize = 0
time_factor_can = 0
time_factor_list = 0
time_toposort = 0
if fgraph.profile:
validate_before = fgraph.profile.validate_time
callbacks_before = fgraph.execute_callbacks_times.copy()
callback_before = fgraph.execute_callbacks_time
def on_import(new_node):
if new_node is not node:
nodelist.append(new_node)
u = aesara.graph.opt.Updater(on_import, None, None, name="GemmOptimizer")
fgraph.attach_feature(u)
while did_something:
nb_iter += 1
t0 = time.time()
nodelist = aesara.graph.basic.io_toposort(fgraph.inputs, fgraph.outputs)
time_toposort += time.time() - t0
did_something = False
nodelist.reverse()
for node in nodelist:
if not (
isinstance(node.op, Elemwise)
and isinstance(
node.op.scalar_op,
(
aesara.scalar.Add,
aesara.scalar.Sub,
aesara.scalar.Neg,
aesara.scalar.Mul,
),
)
):
continue
if node not in fgraph.apply_nodes:
# This mean that we already removed this node from
# the graph
continue
try:
new_outputs, time1, time2, time3 = _gemm_from_node2(fgraph, node)
time_canonicalize += time1
time_factor_can += time2
time_factor_list += time3
except InconsistencyError:
nb_inconsistency_make += 1
continue
if new_outputs:
new_outputs, old_dot22 = new_outputs
assert len(new_outputs) == len(node.outputs)
new_outputs[
0
].tag.values_eq_approx = values_eq_approx_remove_inf_nan
try:
fgraph.replace_all_validate_remove(
list(zip(node.outputs, new_outputs)),
[old_dot22],
reason="GemmOptimizer",
# For now we disable the warning as we know case
# that we need to fix.
warn=False, # warn=not self.warned
)
did_something = True
nb_replacement += 1
except InconsistencyError:
# TODO: retry other applications of gemm (see comment
# in _gemm_from_node)
nb_inconsistency_replace += 1
except ReplacementDidNotRemoveError:
nb_replacement_didn_t_remove += 1
self.warned = True
fgraph.remove_feature(u)
if fgraph.profile:
validate_time = fgraph.profile.validate_time - validate_before
callback_time = fgraph.execute_callbacks_time - callback_before
callbacks_time = {}
for k, v in fgraph.execute_callbacks_times.items():
if k in callbacks_before:
callbacks_time[k] = v - callbacks_before[k]
else:
callbacks_time[k] = v
else:
validate_time = None
callback_time = None
callbacks_time = {}
return (
self,
nb_iter,
nb_replacement,
nb_replacement_didn_t_remove,
nb_inconsistency_make,
nb_inconsistency_replace,
time_canonicalize,
time_factor_can,
time_factor_list,
time_toposort,
validate_time,
callback_time,
callbacks_time,
)
@staticmethod
def print_profile(stream, prof, level=0):
blanc = " " * level
print(blanc, "GemmOptimizer", file=stream)
print(blanc, " nb_iter", prof[1], file=stream)
print(blanc, " nb_replacement", prof[2], file=stream)
print(blanc, " nb_replacement_didn_t_remove", prof[3], file=stream)
print(blanc, " nb_inconsistency_make", prof[4], file=stream)
print(blanc, " nb_inconsistency_replace", prof[5], file=stream)
print(blanc, " time_canonicalize", prof[6], file=stream)
print(blanc, " time_factor_can", prof[7], file=stream)
print(blanc, " time_factor_list", prof[8], file=stream)
print(blanc, " time_toposort", prof[9], file=stream)
print(blanc, " validate_time", prof[10], file=stream)
print(blanc, " callback_time", prof[11], file=stream)
if prof[11] > 1:
print(blanc, " callbacks_time", file=stream)
for i in sorted(prof[12].items(), key=lambda a: a[1]):
if i[1] > 0:
print(i)
class Dot22(GemmRelated):
"""Compute a matrix-matrix product.
This is a specialization of the more general Dot().
"""
check_input = False
def make_node(self, x, y):
x = at.as_tensor_variable(x)
y = at.as_tensor_variable(y)
dtypes = ("float16", "float32", "float64", "complex64", "complex128")
if x.type.ndim != 2 or x.type.dtype not in dtypes:
raise TypeError(x)
if y.type.ndim != 2 or y.type.dtype not in dtypes:
raise TypeError(y)
if y.type.dtype != x.type.dtype:
raise TypeError("dtype mismatch to Dot22")
bz = (x.type.broadcastable[0], y.type.broadcastable[1])
outputs = [tensor(x.type.dtype, bz)]
return Apply(self, [x, y], outputs)
def perform(self, node, inp, out):
x, y = inp
(z,) = out
try:
z[0] = np.asarray(np.dot(x, y))
except ValueError as e:
# The error raised by numpy has no shape information, we mean to
# add that
e.args = e.args + (x.shape, y.shape)
raise
def infer_shape(self, fgraph, node, input_shapes):
return [[input_shapes[0][0], input_shapes[1][1]]]
setup_z_Nz_Sz = """
if ((NULL == %(_zout)s)
|| (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_x)s)[0])
|| (PyArray_DIMS(%(_zout)s)[1] != PyArray_DIMS(%(_y)s)[1]))
{
if (NULL != %(_zout)s) Py_XDECREF(%(_zout)s);
npy_intp dims[2];
dims[0] = PyArray_DIMS(%(_x)s)[0];
dims[1] = PyArray_DIMS(%(_y)s)[1];
%(_zout)s = (PyArrayObject*)PyArray_SimpleNew(2, dims,
PyArray_TYPE(%(_x)s));
//fprintf(stderr, "Dot Allocating %%i %%i\\n", dims[0], dims[1]);
if(!%(_zout)s) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc dot22 output");
%(fail)s
}
}
Nz = PyArray_DIMS(%(_zout)s);
Sz = PyArray_STRIDES(%(_zout)s);
"""
check_ab_double_or_float = ""
case_float_ab_constants = """
float a = 1.0;
float b = 0.0;
"""
case_double_ab_constants = """
double a = 1.0;
double b = 0.0;
"""
def c_code(self, node, name, inp, out, sub): # DEBUG
_x, _y = inp
(_zout,) = out
if node.inputs[0].type.dtype.startswith("complex"):
raise MethodNotDefined(f"{self.__class__.__name__}.c_code")
if len(self.c_libraries()) <= 0:
raise NotImplementedError()
full_code = self.build_gemm_call() % dict(locals(), **sub)
return full_code
def c_code_cache_version(self):
gv = self.build_gemm_version()
if gv:
return (2,) + gv
else:
return gv
_dot22 = Dot22()
@local_optimizer([Dot])
def local_dot_to_dot22(fgraph, node):
# This works for tensor.outer too because basic.outer is a macro that
# produces a dot(dimshuffle,dimshuffle) of form 4 below
if not isinstance(node.op, Dot):
return
x, y = node.inputs
if y.type.dtype != x.type.dtype:
# TODO: upcast one so the types match
_logger.info(f"Not optimizing dot with inputs {x} {y} {x.type} {y.type}")
return
if y.type.dtype in ("float16", "float32", "float64", "complex64", "complex128"):
with inherit_stack_trace(node.outputs):
if x.ndim == 2 and y.ndim == 2:
return [_dot22(*node.inputs)]
if x.ndim == 2 and y.ndim == 1:
return [_dot22(x, y.dimshuffle(0, "x")).dimshuffle(0)]
if x.ndim == 1 and y.ndim == 2:
return [_dot22(x.dimshuffle("x", 0), y).dimshuffle(1)]
if x.ndim == 1 and y.ndim == 1:
return [_dot22(x.dimshuffle("x", 0), y.dimshuffle(0, "x")).dimshuffle()]
_logger.info(f"Not optimizing dot with inputs {x} {y} {x.type} {y.type}")
@local_optimizer([gemm_no_inplace], inplace=True)
def local_inplace_gemm(fgraph, node):
if node.op == gemm_no_inplace:
with inherit_stack_trace(node.outputs):
return [gemm_inplace(*node.inputs)]
@local_optimizer([gemv_no_inplace], inplace=True)
def local_inplace_gemv(fgraph, node):
if node.op == gemv_no_inplace:
with inherit_stack_trace(node.outputs):
return [gemv_inplace(*node.inputs)]
@local_optimizer([ger], inplace=True)
def local_inplace_ger(fgraph, node):
if node.op == ger:
with inherit_stack_trace(node.outputs):
return [ger_destructive(*node.inputs)]
@local_optimizer([gemm_no_inplace])
def local_gemm_to_gemv(fgraph, node):
"""GEMM acting on row or column matrices -> GEMV."""
if node.op == gemm_no_inplace:
z, a, x, y, b = node.inputs
with inherit_stack_trace(node.outputs):
if z.broadcastable == x.broadcastable == (True, False):
r = gemv_no_inplace(z.dimshuffle(1), a, y.T, x.dimshuffle(1), b)
return [r.dimshuffle("x", 0)]
if z.broadcastable == y.broadcastable == (False, True):
r = gemv_no_inplace(z.dimshuffle(0), a, x, y.dimshuffle(0), b)
return [r.dimshuffle(0, "x")]
@local_optimizer([gemm_no_inplace])
def local_gemm_to_ger(fgraph, node):
"""GEMM computing an outer-product -> GER."""
if node.op == gemm_no_inplace:
z, a, x, y, b = node.inputs
if x.broadcastable[1] and y.broadcastable[0]:
with inherit_stack_trace(node.outputs):
# x and y are both vectors so this might qualifies for a GER
xv = x.dimshuffle(0)
yv = y.dimshuffle(1)
try:
bval = at.get_scalar_constant_value(b)
except NotScalarConstantError:
# b isn't a constant, GEMM is doing useful pre-scaling
return
if bval == 1: # best case a natural GER
rval = ger(z, a, xv, yv)
return [rval]
elif bval == 0: # GER on zeros_like should be faster than GEMM
zeros = at.zeros([x.shape[0], y.shape[1]], x.dtype)
rval = ger(zeros, a, xv, yv)
return [rval]
else:
# if bval is another constant, then z is being usefully
# pre-scaled and GER isn't really the right tool for the job.
return
# TODO: delete this optimization when we have the proper dot->gemm->ger pipeline
# working
@local_optimizer([_dot22])
def local_dot22_to_ger_or_gemv(fgraph, node):
"""dot22 computing an outer-product -> GER."""
if node.op == _dot22:
with inherit_stack_trace(node.outputs):
x, y = node.inputs
xb = x.broadcastable
yb = y.broadcastable
one = at.as_tensor_variable(np.asarray(1, dtype=x.dtype))
zero = at.as_tensor_variable(np.asarray(0, dtype=x.dtype))
if xb[1] and yb[0]:
# x and y are both vectors so this might qualifies for a GER
xv = x.dimshuffle(0)
yv = y.dimshuffle(1)
zeros = at.zeros([x.shape[0], y.shape[1]], dtype=x.dtype)
rval = ger(zeros, one, xv, yv)
return [rval]
if xb[0] and yb[1]:
# x and y are both vectors so this qualifies for a sdot / ddot
# TODO: Aesara doesn't have a sdot, but gemv is better than _dot22
xv = x.dimshuffle(1)
zeros = at.AllocEmpty(x.dtype)(1)
rval = gemv_no_inplace(zeros, one, y.T, xv, zero)
return [rval.dimshuffle("x", 0)]
if xb[0] and not yb[0] and not yb[1]:
# x is vector, y is matrix so try gemv
xv = x.dimshuffle(1)
zeros = at.AllocEmpty(x.dtype)(y.shape[1])
rval = gemv_no_inplace(zeros, one, y.T, xv, zero)
return [rval.dimshuffle("x", 0)]
if not xb[0] and not xb[1] and yb[1]:
# x is matrix, y is vector, try gemv
yv = y.dimshuffle(0)
zeros = at.AllocEmpty(x.dtype)(x.shape[0])
rval = gemv_no_inplace(zeros, one, x, yv, zero)
return [rval.dimshuffle(0, "x")]
#################################
#
# Set up the BlasOpt optimizer
#
#################################
blas_optdb = SequenceDB()
# run after numerical stability optimizations (1.5)
optdb.register("BlasOpt", blas_optdb, 1.7, "fast_run", "fast_compile")
# run before specialize (2.0) because specialize is basically a
# free-for-all that makes the graph crazy.
# fast_compile is needed to have GpuDot22 created.
blas_optdb.register(
"local_dot_to_dot22", in2out(local_dot_to_dot22), 0, "fast_run", "fast_compile"
)
blas_optdb.register("gemm_optimizer", GemmOptimizer(), 10, "fast_run")
blas_optdb.register(
"local_gemm_to_gemv",
EquilibriumOptimizer(
[
local_gemm_to_gemv,
local_gemm_to_ger,
local_dot22_to_ger_or_gemv,
local_dimshuffle_lift,
],
max_use_ratio=5,
ignore_newtrees=False,
),
15,
"fast_run",
)
# After destroyhandler(49.5) but before we try to make elemwise things
# inplace (75)
blas_opt_inplace = in2out(
local_inplace_gemm, local_inplace_gemv, local_inplace_ger, name="blas_opt_inplace"
)
optdb.register(
"InplaceBlasOpt", blas_opt_inplace, 70.0, "fast_run", "inplace", "blas_opt_inplace"
)
class Dot22Scalar(GemmRelated):
"""Compute a matrix-matrix product.
This is a specialization of the more general Dot()
Used to call optimized gemm implementation.
Also used to generate a gemm later.
compute scalar*dot(x,y).
"""
check_input = False
def make_node(self, x, y, a):
if a.ndim != 0:
raise TypeError(Gemm.E_scalar, a)
if x.ndim != 2:
raise TypeError(Gemm.E_rank, x)
if y.ndim != 2:
raise TypeError(Gemm.E_rank, y)
if not (a.dtype == x.dtype == y.dtype):
raise TypeError(
"Dot22Scalar requires matching dtypes", (a.dtype, x.dtype, y.dtype)
)
if not a.dtype.startswith("float") and not a.dtype.startswith("complex"):
raise TypeError("Dot22Scalar requires float or complex args", a.dtype)
bz = [x.type.broadcastable[0], y.type.broadcastable[1]]
outputs = [tensor(x.type.dtype, bz)]
return Apply(self, [x, y, a], outputs)
def perform(self, node, inp, out):
x, y, scalar = inp
(z,) = out
try:
z[0] = np.asarray(scalar * np.dot(x, y))
except ValueError as e:
# The error raised by numpy has no shape information, we
# mean to add that
e.args = e.args + (x.shape, y.shape)
raise
def infer_shape(self, fgraph, node, input_shapes):
return [[input_shapes[0][0], input_shapes[1][1]]]
setup_z_Nz_Sz = Dot22.setup_z_Nz_Sz
check_ab_double_or_float = """
if ((PyArray_DESCR(%(_a)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_a)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError,
"type(a) is not double or float"); %(fail)s;}
"""
case_float_ab_constants = """
#define REAL float
float a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT)
? (REAL)(((float*)PyArray_DATA(%(_a)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_a)s))[0]);
#undef REAL
float b = 0.0;
"""
case_double_ab_constants = """
#define REAL double
double a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT)
? (REAL)(((float*)PyArray_DATA(%(_a)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_a)s))[0]);
#undef REAL
double b = 0.0;
"""
def c_code(self, node, name, inp, out, sub):
_x, _y, _a = inp
(_zout,) = out
if node.inputs[0].type.dtype.startswith("complex"):
raise MethodNotDefined(f"{self.__class__.__name__}.c_code")
if len(self.c_libraries()) <= 0:
raise NotImplementedError()
full_code = self.build_gemm_call() % dict(locals(), **sub)
return full_code
def c_code_cache_version(self):
gv = self.build_gemm_version()
if gv:
return (2,) + gv
else:
return gv
_dot22scalar = Dot22Scalar()
@local_optimizer([mul])
def local_dot22_to_dot22scalar(fgraph, node):
"""
Notes
-----
Previous attempts to alter this optimization to replace dot22 with
gemm instead of dot22scalar resulted in some Scan nodes being
duplicated and the ScanSaveMem optimization never running on them,
resulting in highly increased memory usage. Until this issue is
resolved, this optimization should keep using dot22scalar instead of
gemm.
We upcast the scalar if after the multiplication with the dot this give
the same type.
We execute this optimizer after the gemm optimizer. This
allow to give more priority to gemm that give more speed up
then this optimizer, but allow the gemm optimizer to ignore
this op.
TODO: support when we can reorder the mul to generate a
dot22scalar or fix the canonizer to merge them(1 mul with multiple
inputs)
"""
if node.op != mul:
return False
i_dot22 = [x.owner and x.owner.op == _dot22 for x in node.inputs]
if not any(i_dot22):
return False # no dot22
if i_dot22.count(True) > 1:
# TODO: try each of them.
pass
# return False #TODO fix
dot22_idx = i_dot22.index(True)
d = node.inputs[dot22_idx]
i_scalar = [_as_scalar(x, dtype=d.dtype) for x in node.inputs]
if not any(i_scalar):
# Check if we can reorder the graph as this mul have a mul in inputs.
# We support only 1 additional level of mul.
# The canonizer should have merged those mul together.
i_mul = [
x.owner
and x.owner.op == mul
and any(_as_scalar(x_i, dtype=d.dtype) for x_i in x.owner.inputs)
for x in node.inputs
]
if not any(i_mul):
# no scalar in input and no multiplication
# if their was a multiplication we couls reorder the graph
# by the associativity of the graph.
return False
mul_idx = i_mul.index(True) # The first one should always work
m = node.inputs[mul_idx]
scalar_idx = -1
for i, x in enumerate(m.owner.inputs):
if _as_scalar(x, dtype=d.dtype) and (
aesara.scalar.upcast(x.type.dtype, d.type.dtype) == d.type.dtype
):
scalar_idx = i
break
if scalar_idx < 0:
_logger.info(
f"Not optimizing dot22 with inputs {node.inputs} {[x.type for x in node.inputs]}, as the"
" type of the scalar cannot be upcasted to the"
" matrix type"
)
return False
a = at.cast(_as_scalar(m.owner.inputs[scalar_idx], dtype=d.dtype), d.type.dtype)
assert not a.type.ndim
dot = _dot22scalar(d.owner.inputs[0], d.owner.inputs[1], a)
# The other inputs to the original node that were
# neither part of the dot22 or this mul should be
# factors in the returned "mul" node.
assert dot22_idx != mul_idx
other_factors = [
inpt for i, inpt in enumerate(node.inputs) if i not in (dot22_idx, mul_idx)
]
other_m_inputs = [
inpt for i, inpt in enumerate(m.owner.inputs) if i != scalar_idx
]
return [mul(dot, *(other_factors + other_m_inputs))]
scalar_idx = -1
for i, x in enumerate(node.inputs):
if (
i != dot22_idx
and i_scalar[i] is not None
and (aesara.scalar.upcast(x.type.dtype, d.type.dtype) == d.type.dtype)
):
scalar_idx = i
break
if scalar_idx < 0:
_logger.info(
f"Not optimizing dot22 with inputs {node.inputs} {[x.type for x in node.inputs]}, as the type "
"of the scalar cannot be upcasted to the matrix type"
)
return False
assert scalar_idx < len(node.inputs)
s = node.inputs[scalar_idx]
o = copy.copy(node.inputs)
o.remove(d)
o.remove(s)
a = at.cast(i_scalar[scalar_idx], d.type.dtype)
assert not a.type.ndim
if len(o) == 0:
return [_dot22scalar(d.owner.inputs[0], d.owner.inputs[1], a)]
else:
return [mul(_dot22scalar(d.owner.inputs[0], d.owner.inputs[1], a), *o)]
# must happen after gemm as the gemm optimizer don't understant
# dot22scalar and gemm give more speed up then dot22scalar
blas_optdb.register(
"local_dot22_to_dot22scalar", in2out(local_dot22_to_dot22scalar), 11, "fast_run"
)
class BatchedDot(COp):
"""
Computes the batched dot product of two variables:
batched_dot(a, b)[i] = dot(a[i], b[i])
"""
__props__ = ()
def make_node(self, *inputs):
inputs = list(map(at.as_tensor_variable, inputs))
if len(inputs) != 2:
raise TypeError(f"Two arguments required, but {len(inputs)} given.")
if inputs[0].ndim not in (2, 3):
raise TypeError(
"Input 0 (0-indexed)"
f" must have ndim of 2 or 3, {int(inputs[0].ndim)} given. Consider"
" calling batched_dot instead."
)
if inputs[1].ndim not in (2, 3):
raise TypeError(
"Input 1 (0-indexed)"
f" must have ndim of 2 or 3, {int(inputs[1].ndim)} given. Consider"
" calling batched_dot instead."
)
dtype = aesara.scalar.upcast(*[input.type.dtype for input in inputs])
# upcast inputs to common dtype if needed
upcasted_inputs = [at.cast(input, dtype) for input in inputs]
broadcastable = (
(inputs[0].type.broadcastable[0] or inputs[1].type.broadcastable[0],)
+ inputs[0].type.broadcastable[1:-1]
+ inputs[1].type.broadcastable[2:]
)
return Apply(self, upcasted_inputs, [tensor(dtype, broadcastable)])
def perform(self, node, inp, out):
x, y = inp
(z,) = out
if x.shape[0] != y.shape[0]:
raise TypeError(
f"Inputs [{', '.join(map(str, inp))}] must have the"
f" same size in axis 0, but have sizes [{', '.join([str(i.shape[0]) for i in inp])}]."
)
shape = self.infer_shape(None, node, [i.shape for i in inp])[0]
dtype = node.outputs[0].dtype
z0 = z[0] = np.empty(shape, dtype=dtype)
for i in range(z0.shape[0]):
z0[i] = np.dot(x[i], y[i])
def c_support_code(self, **kwargs):
batch_gemm_defn = """
template<typename dtype>
bool batch_gemm(void (*gemm)(char*, char*, const int*, const int*, const int*, const dtype*, const dtype*, const int*, const dtype*, const int*, const dtype*, dtype*, const int*),
int type_size, PyArrayObject* xs, PyArrayObject* ys,
PyArrayObject* zs) {
npy_intp *Nx = PyArray_DIMS(xs), *Sx = PyArray_STRIDES(xs);
npy_intp *Ny = PyArray_DIMS(ys), *Sy = PyArray_STRIDES(ys);
npy_intp *Nz = PyArray_DIMS(zs), *Sz = PyArray_STRIDES(zs);
if (Nx[0] != Ny[0]) {
PyErr_Format(PyExc_ValueError,
"Shape mismatch: batch sizes unequal."
" x.shape is (%d, %d, %d),"
" y.shape is (%d, %d, %d).",
Nx[0], Nx[1], Nx[2],
Ny[0], Ny[1], Ny[2]);
return 1;
}
if (Nx[2] != Ny[1]) {
PyErr_Format(PyExc_ValueError,
"Shape mismatch: summation axis sizes unequal."
" x.shape is (%d, %d, %d),"
" y.shape is (%d, %d, %d).",
Nx[0], Nx[1], Nx[2],
Ny[0], Ny[1], Ny[2]);
return 1;
}
/* encode the stride structure of _x,_y,_z into a single integer. */
int unit = 0;
unit |= ((Sx[2] == type_size || Nx[2] == 1) ? 0x0 : (Sx[1] == type_size || Nx[1]==1) ? 0x1 : 0x2) << 8;
unit |= ((Sy[2] == type_size || Ny[2] == 1) ? 0x0 : (Sy[1] == type_size || Ny[1]==1) ? 0x1 : 0x2) << 4;
unit |= ((Sz[2] == type_size || Nz[2] == 1) ? 0x0 : (Sz[1] == type_size || Nz[1]==1) ? 0x1 : 0x2) << 0;
/* create appropriate strides for malformed matrices that are row or column
* vectors, or empty matrices.
* In that case, the value of the stride does not really matter, but
* some versions of BLAS insist that:
* - they are not smaller than the number of elements in the array,
* - they are not 0.
*/
int sx_1 = (Nx[1] > 1) ? Sx[1]/type_size : (Nx[2] + 1);
int sx_2 = (Nx[2] > 1) ? Sx[2]/type_size : (Nx[1] + 1);
int sy_1 = (Ny[1] > 1) ? Sy[1]/type_size : (Ny[2] + 1);
int sy_2 = (Ny[2] > 1) ? Sy[2]/type_size : (Ny[1] + 1);
int sz_1 = (Nz[1] > 1) ? Sz[1]/type_size : (Nz[2] + 1);
int sz_2 = (Nz[2] > 1) ? Sz[2]/type_size : (Nz[1] + 1);
dtype* x = (dtype*)PyArray_DATA(xs);
dtype* y = (dtype*)PyArray_DATA(ys);
dtype* z = (dtype*)PyArray_DATA(zs);
dtype a = 1.0;
dtype b = 0.0;
char N = 'N';
char T = 'T';
int Nz1 = Nz[1], Nz2 = Nz[2], Nx2 = Nx[2];
// loop over batch axis
for (int i = 0; i < Nz[0]; i++) {
switch(unit)
{
case 0x000: gemm(&N, &N, &Nz2, &Nz1, &Nx2, &a, y, &sy_1, x, &sx_1, &b, z, &sz_1); break;
case 0x100: gemm(&N, &T, &Nz2, &Nz1, &Nx2, &a, y, &sy_1, x, &sx_2, &b, z, &sz_1); break;
case 0x010: gemm(&T, &N, &Nz2, &Nz1, &Nx2, &a, y, &sy_2, x, &sx_1, &b, z, &sz_1); break;
case 0x110: gemm(&T, &T, &Nz2, &Nz1, &Nx2, &a, y, &sy_2, x, &sx_2, &b, z, &sz_1); break;
case 0x001: gemm(&T, &T, &Nz1, &Nz2, &Nx2, &a, x, &sx_1, y, &sy_1, &b, z, &sz_2); break;
case 0x101: gemm(&N, &T, &Nz1, &Nz2, &Nx2, &a, x, &sx_2, y, &sy_1, &b, z, &sz_2); break;
case 0x011: gemm(&T, &N, &Nz1, &Nz2, &Nx2, &a, x, &sx_1, y, &sy_2, &b, z, &sz_2); break;
case 0x111: gemm(&N, &N, &Nz1, &Nz2, &Nx2, &a, x, &sx_2, y, &sy_2, &b, z, &sz_2); break;
default: PyErr_SetString(PyExc_ValueError, "some matrix has no unit stride"); return 1;
};
x += Sx[0] / type_size;
y += Sy[0] / type_size;
z += Sz[0] / type_size;
}
return 0;
}
"""
return blas_header_text() + batch_gemm_defn
def c_libraries(self, **kwargs):
return ldflags()
def c_compile_args(self, **kwargs):
return ldflags(libs=False, flags=True)
def c_lib_dirs(self, **kwargs):
return ldflags(libs=False, libs_dir=True)
def c_header_dirs(self, **kwargs):
return ldflags(libs=False, include_dir=True)
def c_code_cleanup(self, node, name, inputs, outputs, sub):
return """
// clean up views
Py_XDECREF(xs); xs = 0;
Py_XDECREF(ys); ys = 0;
Py_XDECREF(zs); zs = 0;
"""
def c_code(self, node, name, inp, out, sub):
_x, _y = inp
(_z,) = out
fail = sub["fail"]
# generate contiguity condition
def contiguous(var, ndim):
strides = f"PyArray_STRIDES({var})"
if ndim == 1:
return f"{strides}[0] == type_size"
return " && ".join(
[
" && ".join(
"{strides}[{i}] > 0 && {strides}[{i}] % type_size == 0".format(
strides=strides, i=i
)
for i in range(1, ndim)
),
"(%s)"
% " || ".join(
"{strides}[{i}] == type_size".format(strides=strides, i=i)
for i in range(1, ndim)
),
]
)
x_ndim, y_ndim, z_ndim = (
node.inputs[0].ndim,
node.inputs[1].ndim,
node.outputs[0].ndim,
)
# generate code to allocate output based on runtime input shapes
z_dims = [f"PyArray_DIMS({_x})[0]"]
if x_ndim == 3:
z_dims.append(f"PyArray_DIMS({_x})[1]")
if y_ndim == 3:
z_dims.append(f"PyArray_DIMS({_y})[2]")
assert len(z_dims) == z_ndim
z_shape_correct = " && ".join(
"PyArray_DIMS(%s)[%i] == %s" % (_z, i, dim) for i, dim in enumerate(z_dims)
)
z_shape = ", ".join(z_dims)
z_contiguous = contiguous(_z, z_ndim)
allocate = (
"""
if (NULL == %(_z)s || !(%(z_shape_correct)s) || !(%(z_contiguous)s))
{
npy_intp dims[%(z_ndim)s] = {%(z_shape)s};
Py_XDECREF(%(_z)s);
%(_z)s = (PyArrayObject*)PyArray_SimpleNew(
%(z_ndim)s, dims, PyArray_TYPE(%(_x)s));
if(!%(_z)s) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc BatchedDot output");
%(fail)s
}
}
"""
% locals()
)
# code to reallocate inputs contiguously if necessary
contiguate = []
for var, ndim in [(_x, x_ndim), (_y, y_ndim)]:
_contiguous = contiguous(var, ndim)
contiguate.append(
"""
if (!(%(_contiguous)s)) {
PyArrayObject * _copy = (PyArrayObject *) PyArray_Copy(%(var)s);
if (!_copy)
%(fail)s
Py_XDECREF(%(var)s);
%(var)s = _copy;
}
"""
% locals()
)
contiguate = "\n".join(contiguate)
def c_dimshuffle(newname, oldname, shape):
_fail = fail
_shape = ", ".join(
"1" if axis is None else "PyArray_DIMS(%s)[%i]" % (oldname, axis)
for axis in shape
)
return (
"""{
npy_intp dims[3] = {%(_shape)s};
PyArray_Dims newshape = {dims, 3};
%(newname)s = (PyArrayObject*)PyArray_Newshape(%(oldname)s, &newshape, NPY_ANYORDER);
if (!%(newname)s)
%(_fail)s
// make sure we didn't accidentally copy
assert(PyArray_DATA(%(oldname)s) == PyArray_DATA(%(newname)s));
}"""
% locals()
)
# create tensor3 views for any of x, y, z that are not tensor3, so that
# we only need to implement the tensor3-tensor3 batched dot product.
# xs, ys and zs will point to these views, or to the original array if
# it was already tensor3.
# in the latter case, we artificially increase the reference count of
# the original array so that the c_code_cleanup method can decref them
# all indiscriminately.
upcast = []
if x_ndim == 3:
upcast.append("xs = %(_x)s; Py_XINCREF(xs);")
elif x_ndim == 2:
upcast.append(c_dimshuffle("xs", _x, (0, None, 1)))
if y_ndim == 3:
upcast.append("ys = %(_y)s; Py_XINCREF(ys);")
elif y_ndim == 2:
upcast.append(c_dimshuffle("ys", _y, (0, 1, None)))
if z_ndim == 3:
upcast.append("zs = %(_z)s; Py_XINCREF(zs);")
else:
upcast.append(
c_dimshuffle(
"zs",
_z,
(0, None if x_ndim == 2 else 1, None if y_ndim == 2 else 1),
)
)
upcast = "\n".join(upcast) % locals()
return (
"""
int type_num = PyArray_DESCR(%(_x)s)->type_num;
int type_size = PyArray_DESCR(%(_x)s)->elsize; // in bytes
// xs, ys, zs will point to views onto %(_x)s, %(_y)s, %(_z)s
PyArrayObject *xs = 0, *ys = 0, *zs = 0;
if (PyArray_NDIM(%(_x)s) != %(x_ndim)s) {
PyErr_Format(PyExc_NotImplementedError,
"rank(x) != %(x_ndim)s. rank(x) is %%d.",
PyArray_NDIM(%(_x)s));
%(fail)s;
}
if (PyArray_NDIM(%(_y)s) != %(y_ndim)s) {
PyErr_Format(PyExc_NotImplementedError,
"rank(y) != %(y_ndim)s. rank(y) is %%d.",
PyArray_NDIM(%(_y)s));
%(fail)s;
}
if (%(_z)s && PyArray_NDIM(%(_z)s) != %(z_ndim)s) {
PyErr_Format(PyExc_NotImplementedError,
"rank(z) != %(z_ndim)s. rank(z) is %%d.",
PyArray_NDIM(%(_z)s));
%(fail)s;
}
// allocate output
%(allocate)s
// reallocate any noncontiguous arrays or arrays with invalid strides
%(contiguate)s
// add dims to make sure everything is tensor3
%(upcast)s
// from here on, use xs, ys and zs as they are tensor3 and share memory
// with the original %(_x)s, %(_y)s and %(_z)s arrays.
if ((PyArray_DESCR(xs)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(xs)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(x) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(ys)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(ys)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(y) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(zs)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(zs)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(z) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(xs)->type_num != PyArray_DESCR(ys)->type_num)
||(PyArray_DESCR(xs)->type_num != PyArray_DESCR(zs)->type_num))
{ PyErr_SetString(PyExc_NotImplementedError, "type(x), type(y), type(z) are not all the same"); %(fail)s; }
switch (type_num)
{
case NPY_FLOAT:
if (batch_gemm<float>(sgemm_, type_size, xs, ys, zs)) {
%(fail)s;
}
break;
case NPY_DOUBLE:
if (batch_gemm<double>(dgemm_, type_size, xs, ys, zs)) {
%(fail)s;
}
break;
}
"""
% locals()
)
def c_code_cache_version(self):
from aesara.tensor.blas_headers import blas_header_version
return (4, blas_header_version())
def grad(self, inp, grads):
x, y = inp
(gz,) = grads
xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim
# grad is a vector, so x is a matrix and y is a matrix
if gdim == 1:
xgrad = gz.dimshuffle(0, "x") * y
ygrad = gz.dimshuffle(0, "x") * x
# x is a matrix, y is a tensor3, grad is a matrix
elif xdim == 2 and ydim == 3:
xgrad = batched_dot(gz, y.dimshuffle(0, 2, 1))
ygrad = x.dimshuffle(0, 1, "x") * gz.dimshuffle(0, "x", 1)
# x is a tensor3, y is a matrix, grad is a matrix
elif xdim == 3 and ydim == 2:
xgrad = gz.dimshuffle(0, 1, "x") * y.dimshuffle(0, "x", 1)
ygrad = batched_dot(x.dimshuffle(0, 2, 1), gz)
# x is a tensor3, y is a tensor3, grad is a tensor3
elif xdim == ydim == 3:
xgrad = batched_dot(gz, y.dimshuffle(0, 2, 1))
ygrad = batched_dot(x.dimshuffle(0, 2, 1), gz)
# If x or y contain broadcastable dimensions but only one of
# them know that a matching dimensions is broadcastable, the
# above code don't always return the right broadcast pattern.
# This cause problem down the road. See gh-1461.
if xgrad.broadcastable != x.broadcastable:
xgrad = at.patternbroadcast(xgrad, x.broadcastable)
if ygrad.broadcastable != y.broadcastable:
ygrad = at.patternbroadcast(ygrad, y.broadcastable)
return xgrad, ygrad
def R_op(self, inputs, eval_points):
# R_op for batched_dot(a, b) evaluated at c for a and d for b is
# simply batched_dot(c, b) + batched_dot(a, d)
assert len(inputs) == 2
assert len(eval_points) == 2
if eval_points[0] is None and eval_points[1] is None:
return [None]
test_values_enabled = config.compute_test_value != "off"
if test_values_enabled:
try:
iv0 = aesara.graph.op.get_test_value(inputs[0])
except TestValueError:
aesara.graph.op.missing_test_message(
"first input passed to BatchedDot.R_op has no test value"
)
test_values_enabled = False
try:
iv1 = aesara.graph.op.get_test_value(inputs[1])
except TestValueError:
aesara.graph.op.missing_test_message(
"second input passed to BatchedDot.R_op has no test value"
)
test_values_enabled = False
if eval_points[0]:
try:
ev0 = aesara.graph.op.get_test_value(eval_points[0])
except TestValueError:
aesara.graph.op.missing_test_message(
"first eval point passed to BatchedDot.R_op "
"has no test value"
)
test_values_enabled = False
if eval_points[1]:
try:
ev1 = aesara.graph.op.get_test_value(eval_points[1])
except TestValueError:
aesara.graph.op.missing_test_message(
"second eval point passed to BatchedDot.R_op "
"has no test value"
)
test_values_enabled = False
if test_values_enabled:
input_values = [iv0, iv1]
eval_point_values = [ev0, ev1]
for i in range(2):
if (
eval_point_values[i] is not None
and input_values[i].shape != eval_point_values[i].shape
):
raise ValueError(
"input "
+ str(i)
+ " and eval_point "
+ str(i)
+ " to BatchedDot.R_op should have the same shape, but "
f"their shapes are {input_values[i].shape} and {eval_point_values[i].shape}, respectively"
)
if eval_points[0]:
t1 = self(eval_points[0], inputs[1])
if eval_points[1]:
t2 = self(inputs[0], eval_points[1])
if eval_points[0] and eval_points[1]:
return [t1 + t2]
elif eval_points[0]:
return [t1]
else:
return [t2]
def infer_shape(self, fgraph, node, shapes):
for shape_ in shapes:
if len(shape_) not in (2, 3):
raise NotImplementedError()
xshp, yshp = shapes
return [xshp[:-1] + yshp[2:]]
_batched_dot = BatchedDot()
# from opt import register_specialize, register_canonicalize
# @register_specialize
@local_optimizer([sub, add])
def local_print_as_we_go_along(fgraph, node):
if node.op in (sub, add):
debugprint(node)
def batched_dot(a, b):
"""Compute the batched dot product of two variables.
I.e.:
batched_dot(a, b)[i] = dot(a[i], b[i])
Note that this batched_dot function does one of three things, in the
following sequence:
1. If either a or b is a vector, it returns the batched elementwise
product without calling the Aesara BatchedDot op.
2. If both a and b have either 2 or 3 dimensions, it calls Aesara's
BatchedDot op on a and b.
3. If either a or b has more than 3 dimensions, it calls Aesara's
batched_tensordot function with appropriate axes. The
batched_tensordot function expresses high-dimensional batched
dot products in terms of batched matrix-matrix dot products, so
it may be possible to further optimize for performance.
"""
a, b = at.as_tensor_variable(a), at.as_tensor_variable(b)
if a.ndim == 0:
raise TypeError("a must have at least one (batch) axis")
elif b.ndim == 0:
raise TypeError("b must have at least one (batch) axis")
elif a.ndim == 1:
return a.dimshuffle(*([0] + ["x"] * (b.ndim - 1))) * b
elif b.ndim == 1:
return a * b.dimshuffle(*([0] + ["x"] * (a.ndim - 1)))
elif a.ndim > 3 or b.ndim > 3:
return batched_tensordot(a, b, [[a.ndim - 1], [np.maximum(1, b.ndim - 2)]])
else:
# avoid circular import
return _batched_dot(a, b)
def batched_tensordot(x, y, axes=2):
"""Compute a batched tensordot product.
A hybrid of batched_dot and tensordot, this function computes the
tensordot product between the two tensors, by iterating over the
first dimension to perform a sequence of tensordots.
Parameters
----------
x: TensorVariable
A tensor with sizes e.g.: for 3D (dim1, dim3, dim2)
y: TensorVariable
A tensor with sizes e.g.: for 3D (dim1, dim2, dim4)
axes: int or array-like of length 2
If an integer, the number of axes to sum over.
If an array, it must have two array elements containing the axes to sum
over in each tensor.
If an integer i, it is converted to an array containing
the last i dimensions of the first tensor and the first
i dimensions of the second tensor (excluding the first
(batch) dimension):
axes = [list(range(a.ndim - i, b.ndim)), list(range(1,i+1))]
If an array, its two elements must contain compatible axes
of the two tensors. For example, [[1, 2], [2, 4]] means sum
over the 2nd and 3rd axes of a and the 3rd and 5th axes of b.
(Remember axes are zero-indexed!) The 2nd axis of a and the
3rd axis of b must have the same shape; the same is true for
the 3rd axis of a and the 5th axis of b.
Like tensordot, this function uses a series of dimshuffles and
reshapes to reduce the tensor dot product to a matrix or vector
dot product. Finally, it calls batched_dot to compute the result.
"""
from aesara.tensor.math import _tensordot_as_dot
return _tensordot_as_dot(x, y, axes, dot=batched_dot, batched=True)
|
the-stack_106_14083
|
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from django.db import models
from django.conf import settings
import uuid
from taggit.managers import TaggableManager
from django_resized import ResizedImageField
from ckeditor.fields import RichTextField
class DateTimeModel(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class MessageModel(DateTimeModel):
# id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
room = models.ForeignKey(Group, verbose_name="room_name", on_delete=models.CASCADE, null=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name = 'sender', on_delete=models.CASCADE, null=True)
message = models.TextField('body')
recipients = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='recipients')
def __unicode__(self):
return self.id
def __str__(self):
return f'{self.message} sent by {self.user} in Room {self.room}'
class Meta:
ordering = ['-id']
class News(models.Model):
title = models.CharField(max_length=100)
body = RichTextField(blank=True, null=True)
image = ResizedImageField(size=[500, 500],upload_to="news_post",null=True, blank=True)
room_field = models.CharField(max_length=10, blank=True)
published = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(unique=True, max_length=100)
tags = TaggableManager()
def __str__(self):
return self.title
|
the-stack_106_14085
|
#!/usr/bin/env python
"""
pytest-flask
============
A set of `pytest <https://docs.pytest.org>`_ fixtures to test Flask
extensions and applications.
Features
--------
Plugin provides some fixtures to simplify app testing:
- ``client`` - an instance of ``app.test_client``,
- ``client_class`` - ``client`` fixture for class-based tests,
- ``config`` - the application config,
- ``live_server`` - runs an application in the background (useful for tests
with `Selenium <http://www.seleniumhq.org>`_ and other headless browsers),
- ``request_ctx`` - the request context,
- ``accept_json``, ``accept_jsonp``, ``accept_any`` - accept headers
suitable to use as parameters in ``client``.
To pass options to your application use the ``pytest.mark.options`` marker:
.. code:: python
@pytest.mark.options(debug=False)
def test_app(app):
assert not app.debug, 'Ensure the app not in debug mode'
During tests execution the request context has been pushed, e.g. ``url_for``,
``session`` and other context bound objects are available without context
managers:
.. code:: python
def test_app(client):
assert client.get(url_for('myview')).status_code == 200
Response object has a ``json`` property to test a view that returns
a JSON response:
.. code:: python
@api.route('/ping')
def ping():
return jsonify(ping='pong')
def test_api_ping(client):
res = client.get(url_for('api.ping'))
assert res.json == {'ping': 'pong'}
If you want your tests done via Selenium or other headless browser use
the ``live_server`` fixture. The server’s URL can be retrieved using
the ``url_for`` function:
.. code:: python
from flask import url_for
@pytest.mark.usefixtures('live_server')
class TestLiveServer:
def test_server_is_up_and_running(self):
res = urllib2.urlopen(url_for('index', _external=True))
assert b'OK' in res.read()
assert res.code == 200
Quick Start
-----------
To start using a plugin define your application fixture in ``conftest.py``:
.. code:: python
from myapp import create_app
@pytest.fixture
def app():
app = create_app()
return app
Install the extension with dependencies and run your test suite:
.. code:: bash
$ pip install pytest-flask
$ py.test
Documentation
-------------
The latest documentation is available at
http://pytest-flask.readthedocs.org/en/latest/.
Contributing
------------
Don’t hesitate to create a `GitHub issue
<https://github.com/vitalk/pytest-flask/issues>`_ for any **bug** or
**suggestion**.
"""
import os
from setuptools import find_packages
from setuptools import setup
def read(*parts):
"""Reads the content of the file located at path created from *parts*."""
try:
return open(os.path.join(*parts), "r", encoding="utf-8").read()
except OSError:
return ""
requirements = read("requirements", "main.txt").splitlines()
tests_require = []
extras_require = {
"docs": read("requirements", "docs.txt").splitlines(),
"tests": tests_require,
}
setup(
name="pytest-flask",
# Versions should comply with PEP440, and automatically obtained from tags
# thanks to setuptools_scm
use_scm_version={"write_to": "pytest_flask/_version.py"},
setup_requires=["setuptools-scm"],
author="Vital Kudzelka",
author_email="[email protected]",
url="https://github.com/vitalk/pytest-flask",
project_urls={
"Source": "https://github.com/pytest-dev/pytest-flask",
"Tracker": "https://github.com/pytest-dev/pytest-flask/issues",
},
description="A set of py.test fixtures to test Flask applications.",
long_description=__doc__,
license="MIT",
packages=find_packages(exclude=["docs", "tests"]),
zip_safe=False,
platforms="any",
install_requires=requirements,
tests_require=tests_require,
extras_require=extras_require,
keywords="pytest flask testing",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Plugins",
"Environment :: Web Environment",
"Framework :: Pytest",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Testing",
],
python_requires=">=3.5",
# The following makes the plugin available to pytest
entry_points={
"pytest11": [
"flask = pytest_flask.plugin",
]
},
)
|
the-stack_106_14088
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import functools
import pytest
import logging
from devtools_testutils import recorded_by_proxy, set_custom_default_matcher
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import ResourceNotFoundError, ClientAuthenticationError
from azure.core.pipeline.transport import RequestsTransport
from azure.ai.formrecognizer import (
DocumentModelAdministrationClient,
DocumentAnalysisApiVersion,
ModelOperation
)
from testcase import FormRecognizerTest
from preparers import GlobalClientPreparer as _GlobalClientPreparer
from preparers import FormRecognizerPreparer
DocumentModelAdministrationClientPreparer = functools.partial(_GlobalClientPreparer, DocumentModelAdministrationClient)
class TestManagement(FormRecognizerTest):
def teardown(self):
self.sleep(4)
@pytest.mark.skip()
@pytest.mark.live_test_only
@FormRecognizerPreparer()
def test_active_directory_auth(self):
token = self.generate_oauth_token()
endpoint = self.get_oauth_endpoint()
client = DocumentModelAdministrationClient(endpoint, token)
info = client.get_account_info()
assert info
@FormRecognizerPreparer()
@recorded_by_proxy
def test_dmac_auth_bad_key(self, formrecognizer_test_endpoint, formrecognizer_test_api_key, **kwargs):
client = DocumentModelAdministrationClient(formrecognizer_test_endpoint, AzureKeyCredential("xxxx"))
with pytest.raises(ClientAuthenticationError):
result = client.get_account_info()
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
def test_get_model_empty_model_id(self, **kwargs):
client = kwargs.pop("client")
with pytest.raises(ValueError):
result = client.get_model("")
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
def test_get_model_none_model_id(self, **kwargs):
client = kwargs.pop("client")
with pytest.raises(ValueError):
result = client.get_model(None)
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
def test_delete_model_none_model_id(self, **kwargs):
client = kwargs.pop("client")
with pytest.raises(ValueError):
result = client.delete_model(None)
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
def test_delete_model_empty_model_id(self, **kwargs):
client = kwargs.pop("client")
with pytest.raises(ValueError):
result = client.delete_model("")
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_account_info(self, client):
info = client.get_account_info()
assert info.document_model_limit
assert info.document_model_count
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_get_model_prebuilt(self, client, **kwargs):
model = client.get_model("prebuilt-invoice")
assert model.model_id == "prebuilt-invoice"
assert model.description is not None
assert model.created_on
assert model.api_version
assert model.tags is None
for name, doc_type in model.doc_types.items():
assert name
for key, field in doc_type.field_schema.items():
assert key
assert field["type"]
assert doc_type.field_confidence is None
@pytest.mark.skip()
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_mgmt_model(self, client, formrecognizer_storage_container_sas_url, **kwargs):
# this can be reverted to set_bodiless_matcher() after tests are re-recorded and don't contain these headers
set_custom_default_matcher(
compare_bodies=False, excluded_headers="Authorization,Content-Length,x-ms-client-request-id,x-ms-request-id"
)
poller = client.begin_build_model(formrecognizer_storage_container_sas_url, description="mgmt model")
model = poller.result()
model_from_get = client.get_model(model.model_id)
assert model.model_id == model_from_get.model_id
assert model.description == model_from_get.description
assert model.created_on == model_from_get.created_on
for name, doc_type in model.doc_types.items():
assert name in model_from_get.doc_types
for key, field in doc_type.field_schema.items():
assert key in model_from_get.doc_types[name].field_schema
assert field["type"] == model_from_get.doc_types[name].field_schema[key]["type"]
assert doc_type.field_confidence[key] == model_from_get.doc_types[name].field_confidence[key]
models_list = client.list_models()
for model in models_list:
assert model.model_id
assert model.created_on
client.delete_model(model.model_id)
with pytest.raises(ResourceNotFoundError):
client.get_model(model.model_id)
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_get_list_operations(self, client, **kwargs):
operations = client.list_operations()
successful_op = None
failed_op = None
for op in operations:
assert op.operation_id
assert op.status
# FIXME check why some operations aren't returned with a percent_completed field
# assert op.percent_completed is not None
assert op.created_on
assert op.last_updated_on
assert op.kind
assert op.resource_location
if op.status == "succeeded":
successful_op = op
if op.status == "failed":
failed_op = op
# check successful op
if successful_op:
op = client.get_operation(successful_op.operation_id)
# TODO not seeing this returned at the operation level
# assert op.api_version
# assert op.tags is None
# test to/from dict
op_dict = op.to_dict()
op = ModelOperation.from_dict(op_dict)
assert op.error is None
model = op.result
assert model.model_id
# operations may or may not have descriptions
if model.description:
assert model.description
assert model.created_on
for name, doc_type in model.doc_types.items():
assert name
for key, field in doc_type.field_schema.items():
assert key
assert field["type"]
assert doc_type.field_confidence[key] is not None
# check failed op
if failed_op:
op = client.get_operation(failed_op.operation_id)
# test to/from dict
op_dict = op.to_dict()
op = ModelOperation.from_dict(op_dict)
assert op.result is None
error = op.error
assert error.code
assert error.message
assert error.details
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
def test_get_operation_bad_model_id(self, **kwargs):
client = kwargs.pop("client")
with pytest.raises(ValueError):
client.get_operation("")
with pytest.raises(ValueError):
client.get_operation(None)
@FormRecognizerPreparer()
@recorded_by_proxy
def test_get_document_analysis_client(self, formrecognizer_test_endpoint, formrecognizer_test_api_key, **kwargs):
# this can be reverted to set_bodiless_matcher() after tests are re-recorded and don't contain these headers
set_custom_default_matcher(
compare_bodies=False, excluded_headers="Authorization,Content-Length,x-ms-client-request-id,x-ms-request-id"
)
transport = RequestsTransport()
dtc = DocumentModelAdministrationClient(endpoint=formrecognizer_test_endpoint, credential=AzureKeyCredential(formrecognizer_test_api_key), transport=transport)
with dtc:
dtc.get_account_info()
assert transport.session is not None
with dtc.get_document_analysis_client() as dac:
assert transport.session is not None
dac.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg).wait()
assert dac._api_version == DocumentAnalysisApiVersion.V2022_06_30_PREVIEW
dtc.get_account_info()
assert transport.session is not None
|
the-stack_106_14089
|
#!/usr/bin/env python3
#
# Copyright 2015 Signal Processing Devices Sweden AB. All rights reserved.
#
# Description: ADQ14 FWDAQ streaming example
# Documentation:
#
import numpy as np
import ctypes as ct
import matplotlib.pyplot as plt
import sys
import time
import os
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__))+'/..')
from modules.example_helpers import *
# Record settings
number_of_records = 10
samples_per_record = 512
# Plot data if set to True
plot_data = True
# Print metadata in headers
print_headers = True
# DMA transfer buffer settings
transfer_buffer_size = 65536
num_transfer_buffers = 8
# DMA flush timeout in seconds
flush_timeout = 0.5
# Load ADQAPI
ADQAPI = adqapi_load()
# Create ADQControlUnit
adq_cu = ct.c_void_p(ADQAPI.CreateADQControlUnit())
# Enable error logging from ADQAPI
ADQAPI.ADQControlUnit_EnableErrorTrace(adq_cu, 3, '.')
# Find ADQ devices
ADQAPI.ADQControlUnit_FindDevices(adq_cu)
n_of_ADQ = ADQAPI.ADQControlUnit_NofADQ(adq_cu)
print('Number of ADQ found: {}'.format(n_of_ADQ))
# Exit if no devices were found
if n_of_ADQ < 1:
print('No ADQ connected.')
ADQAPI.DeleteADQControlUnit(adq_cu)
adqapi_unload(ADQAPI)
sys.exit(1)
# Select ADQ
if n_of_ADQ > 1:
adq_num = int(input('Select ADQ device 1-{:d}: '.format(n_of_ADQ)))
else:
adq_num = 1
print_adq_device_revisions(ADQAPI, adq_cu, adq_num)
# Set clock source
ADQ_CLOCK_INT_INTREF = 0
ADQAPI.ADQ_SetClockSource(adq_cu, adq_num, ADQ_CLOCK_INT_INTREF)
# Maximum number of channels for ADQ14 FWPD is four
max_number_of_channels = ADQAPI.ADQ_GetNofChannels(adq_cu, adq_num)
# Setup test pattern
# 0 enables the analog input from the ADCs
# > 0 enables a specific test pattern
# Note: Default is to enable a test pattern (4) and disconnect the
# analog inputs inside the FPGA.
ADQAPI.ADQ_SetTestPatternMode(adq_cu, adq_num, 4)
# Set trig mode
SW_TRIG = 1
EXT_TRIG_1 = 2
EXT_TRIG_2 = 7
EXT_TRIG_3 = 8
LVL_TRIG = 3
INT_TRIG = 4
LVL_FALLING = 0
LVL_RISING = 1
trig_type = SW_TRIG
success = ADQAPI.ADQ_SetTriggerMode(adq_cu, adq_num, trig_type)
if (success == 0):
print('ADQ_SetTriggerMode failed.')
success = ADQAPI.ADQ_SetLvlTrigLevel(adq_cu, adq_num, 0)
if (success == 0):
print('ADQ_SetLvlTrigLevel failed.')
success = ADQAPI.ADQ_SetTrigLevelResetValue(adq_cu, adq_num, 1000)
if (success == 0):
print('ADQ_SetTrigLevelResetValue failed.')
success = ADQAPI.ADQ_SetLvlTrigChannel(adq_cu, adq_num, 1)
if (success == 0):
print('ADQ_SetLvlTrigChannel failed.')
success = ADQAPI.ADQ_SetLvlTrigEdge(adq_cu, adq_num, LVL_RISING)
if (success == 0):
print('ADQ_SetLvlTrigEdge failed.')
# Setup acquisition
channels_mask = 0xf
ADQAPI.ADQ_TriggeredStreamingSetup(adq_cu, adq_num, number_of_records, samples_per_record, 0, 0, channels_mask)
ADQAPI.ADQ_SetStreamStatus(adq_cu, adq_num, 1);
# Get number of channels from device
number_of_channels = ADQAPI.ADQ_GetNofChannels(adq_cu, adq_num)
# Setup size of transfer buffers
print('Setting up streaming...')
ADQAPI.ADQ_SetTransferBuffers(adq_cu, adq_num, num_transfer_buffers, transfer_buffer_size)
# Start streaming
print('Collecting data, please wait...')
ADQAPI.ADQ_StopStreaming(adq_cu, adq_num)
ADQAPI.ADQ_StartStreaming(adq_cu, adq_num)
# Allocate target buffers for intermediate data storage
target_buffers = (ct.POINTER(ct.c_int16*transfer_buffer_size)*number_of_channels)()
for bufp in target_buffers:
bufp.contents = (ct.c_int16*transfer_buffer_size)()
# Create some buffers for the full records
data_16bit = [np.array([], dtype=np.int16),
np.array([], dtype=np.int16),
np.array([], dtype=np.int16),
np.array([], dtype=np.int16)]
# Allocate target buffers for headers
headerbuf_list = [(HEADER*number_of_records)() for ch in range(number_of_channels)]
# Create an C array of pointers to header buffers
headerbufp_list = ((ct.POINTER(HEADER*number_of_records))*number_of_channels)()
# Initiate pointers with allocated header buffers
for ch,headerbufp in enumerate(headerbufp_list):
headerbufp.contents = headerbuf_list[ch]
# Create a second level pointer to each buffer pointer,
# these will only be used to change the bufferp_list pointer values
headerbufvp_list = [ct.cast(ct.pointer(headerbufp_list[ch]), ct.POINTER(ct.c_void_p)) for ch in range(number_of_channels)]
# Allocate length output variable
samples_added = (4*ct.c_uint)()
for ind in range(len(samples_added)):
samples_added[ind] = 0
headers_added = (4*ct.c_uint)()
for ind in range(len(headers_added)):
headers_added[ind] = 0
header_status = (4*ct.c_uint)()
for ind in range(len(header_status)):
header_status[ind] = 0
# Generate triggers if software trig is used
if (trig_type == 1):
for trig in range(number_of_records):
ADQAPI.ADQ_SWTrig(adq_cu, adq_num)
print('Waiting for data...')
# Collect data until all requested records have been recieved
records_completed = [0, 0, 0, 0]
headers_completed = [0, 0, 0, 0]
records_completed_cnt = 0
ltime = time.time()
buffers_filled = ct.c_uint(0)
# Read out data until records_completed for ch A is number_of_records
while (number_of_records > records_completed[0]):
buffers_filled.value = 0
collect_result = 1
poll_time_diff_prev = time.time()
# Wait for next data buffer
while ((buffers_filled.value == 0) and (collect_result)):
collect_result = ADQAPI.ADQ_GetTransferBufferStatus(adq_cu, adq_num,
ct.byref(buffers_filled))
poll_time_diff = time.time()
if ((poll_time_diff - poll_time_diff_prev) > flush_timeout):
# Force flush
print('No data for {}s, flushing the DMA buffer.'.format(flush_timeout))
status = ADQAPI.ADQ_FlushDMA(adq_cu, adq_num);
print('ADQAPI.ADQ_FlushDMA returned {}'.format(adq_status(status)))
poll_time_diff_prev = time.time()
# Fetch data and headers into target buffers
status = ADQAPI.ADQ_GetDataStreaming(adq_cu, adq_num,
target_buffers,
headerbufp_list,
channels_mask,
ct.byref(samples_added),
ct.byref(headers_added),
ct.byref(header_status))
if status == 0:
print('GetDataStreaming failed!')
sys.exit()
for ch in range(number_of_channels):
if (headers_added[ch] > 0):
# The last call to GetDataStreaming has generated header data
if (header_status[ch]):
headers_done = headers_added[ch]
else:
# One incomplete header
headers_done = headers_added[ch]-1
# Update counter counting completed records
headers_completed[ch] += headers_done
# Update the number of completed records if at least one header has completed
if (headers_done > 0):
records_completed[ch] = headerbuf_list[ch][headers_completed[ch]-1].RecordNumber + 1
# Update header pointer so that it points to the current header
headerbufvp_list[ch].contents.value += headers_done*ct.sizeof(headerbuf_list[ch]._type_)
if headers_done > 0 and (np.sum(records_completed)-records_completed_cnt) > 1000:
dtime = time.time()-ltime
if (dtime > 0):
print('{:d} {:.2f} MB/s'.format(np.sum(records_completed),
((samples_per_record
*2
*(np.sum(records_completed)-records_completed_cnt))
/(dtime))/(1024*1024)))
sys.stdout.flush()
records_completed_cnt = np.sum(records_completed)
ltime = time.time()
if (samples_added[ch] > 0 and plot_data):
# Copy channel data to continuous buffer
data_buf = np.frombuffer(target_buffers[ch].contents, dtype=np.int16, count=samples_added[ch])
data_16bit[ch] = np.append(data_16bit[ch], data_buf)
print(records_completed[0])
# Stop streaming
ADQAPI.ADQ_StopStreaming(adq_cu, adq_num)
# Print recieved headers
if print_headers:
for ch in range(max_number_of_channels):
if number_of_records > 0:
print('------------------')
print('Headers channel {}'.format(ch))
print('------------------')
for rec in range(number_of_records):
header = headerbuf_list[ch][rec]
print('RecordStatus: {}'.format(header.RecordStatus))
print('UserID: {}'.format(header.UserID))
print('SerialNumber: {}'.format(header.SerialNumber))
print('Channel: {}'.format(header.Channel))
print('DataFormat: {}'.format(header.DataFormat))
print('RecordNumber: {}'.format(header.RecordNumber))
print('Timestamp: {} ns'.format(header.Timestamp * 0.125))
print('RecordStart: {} ns'.format(header.RecordStart * 0.125))
print('SamplePeriod: {} ns'.format(header.SamplePeriod * 0.125))
print('RecordLength: {} ns'.format(header.RecordLength * (header.SamplePeriod* 0.125)))
print('------------------')
# Plot data
if plot_data:
for ch in range(max_number_of_channels):
if number_of_records > 0:
widths = np.array([], dtype=np.uint32)
record_end_offset = 0
# Extract record lengths from headers
for rec in range(number_of_records):
header = headerbuf_list[ch][rec]
widths = np.append(widths, header.RecordLength)
# Get new figure
plt.figure(ch)
plt.clf()
# Plot data
plt.plot(data_16bit[ch].T, '.-')
# Set window title
plt.gcf().canvas.set_window_title('Channel {}'.format(ch))
# Set grid mode
plt.grid(which='Major')
# Mark records in plot
alternate_background(plt.gca(), 0, widths, labels=True)
# Show plot
plt.show()
# Delete ADQ device handle
ADQAPI.ADQControlUnit_DeleteADQ(adq_cu, adq_num)
# Delete ADQControlunit
ADQAPI.DeleteADQControlUnit(adq_cu)
print('Done.')
|
the-stack_106_14090
|
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from sim_model import sim
ENT_TYPE = sim.ENT_TYPE
DATA_TYPE = sim.DATA_TYPE
# create a graph
sm = sim.SIM()
# create posi
posi = sm.add_posi([1,2,3])
# create a point
point = sm.add_point(posi)
# create a pline
pline = sm.add_pline(
[posi, sm.add_posi([4,5,7]), sm.add_posi([2,2,2])],
True # closed
)
# create pgon
pgon = sm.add_pgon(
[posi, sm.add_posi([2,3,4]), sm.add_posi([8,5,0])]
)
# create an coll
coll = sm.add_coll()
sm.add_coll_ent(coll, point)
sm.add_coll_ent(coll, pline)
sm.add_coll_ent(coll, pgon)
# create attributes
sm.add_attrib(ENT_TYPE.COLLS, "aaa", DATA_TYPE.LIST)
sm.add_attrib(ENT_TYPE.PGONS, "bbb", DATA_TYPE.NUM)
# set entity attrib value
sm.set_attrib_val(coll, "aaa", [1,2,3,4,5,6])
sm.set_attrib_val(pgon, "bbb", 1.2345)
# get the attribute value
val1 = sm.get_attrib_val(coll, "aaa")
val2 = sm.get_attrib_val(pgon, "bbb")
# set model attribute values
sm.set_model_attrib_val("ccc", "This is a test")
val3 = sm.get_model_attrib_val("ccc")
# get the positions from the pgon
val4 = sm.get_ents(ENT_TYPE.POSIS)
val5 = sm.get_ents(ENT_TYPE.VERTS, pgon)
val6 = sm.get_ents(ENT_TYPE.POSIS, pline)
val7 = sm.get_ents(ENT_TYPE.COLLS, posi)
# print("RESULT", val6)
# ptint attrib vals
print("Attrib values:", val1, val2, val3, val4, val5, val6, val7)
# print info about the graph
print(sm.info())
json = sm.to_json_str()
with open("test.sim", "w") as f:
f.write(json)
print(json)
|
the-stack_106_14093
|
# -*- coding: utf-8 -*-
"""
@author:XuMing<[email protected]>
@description:
"""
import os
from queue import Queue
from threading import Thread
import tensorflow as tf
import text2vec
from text2vec.bert import modeling
from text2vec.bert import tokenization
from text2vec.bert.graph import optimize_graph
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
class BertVector:
def __init__(self, max_seq_len=32, batch_size=32, layer_indexes=[-2], model_dir='', output_dir=''):
"""
init BertVector
:param batch_size: Depending on your memory default is 32
"""
self.max_seq_length = max_seq_len
self.layer_indexes = layer_indexes
self.gpu_memory_fraction = 1
self.model_dir = model_dir
vocab_file = os.path.join(model_dir, 'vocab.txt')
config_name = os.path.join(model_dir, 'bert_config.json')
ckpt_name = os.path.join(model_dir, 'bert_model.ckpt')
self.graph_path = optimize_graph(layer_indexes=layer_indexes, config_name=config_name, ckpt_name=ckpt_name,
max_seq_len=max_seq_len, output_dir=output_dir)
self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=True)
self.batch_size = batch_size
self.estimator = self.get_estimator()
self.input_queue = Queue(maxsize=1)
self.output_queue = Queue(maxsize=1)
self.predict_thread = Thread(target=self.predict_from_queue, daemon=True)
self.predict_thread.start()
self.sentence_len = 0
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
def model_fn(features, labels, mode, params):
with tf.gfile.GFile(self.graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_names = ['input_ids', 'input_mask', 'input_type_ids']
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k] for k in input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={
'encodes': output[0]
})
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config),
params={'batch_size': self.batch_size})
def predict_from_queue(self):
prediction = self.estimator.predict(input_fn=self.queue_predict_input_fn, yield_single_examples=False)
for i in prediction:
self.output_queue.put(i)
def encode(self, sentence):
self.sentence_len = len(sentence)
self.input_queue.put(sentence)
prediction = self.output_queue.get()['encodes']
return prediction
def queue_predict_input_fn(self):
return (tf.data.Dataset.from_generator(
self.generate_from_queue,
output_types={'unique_ids': tf.int32,
'input_ids': tf.int32,
'input_mask': tf.int32,
'input_type_ids': tf.int32},
output_shapes={
'unique_ids': (self.sentence_len,),
'input_ids': (None, self.max_seq_length),
'input_mask': (None, self.max_seq_length),
'input_type_ids': (None, self.max_seq_length)}).prefetch(10))
def generate_from_queue(self):
while True:
features = list(self.convert_examples_to_features(seq_length=self.max_seq_length, tokenizer=self.tokenizer))
yield {
'unique_ids': [f.unique_id for f in features],
'input_ids': [f.input_ids for f in features],
'input_mask': [f.input_mask for f in features],
'input_type_ids': [f.input_type_ids for f in features]
}
def input_fn_builder(self, features, seq_length):
"""Creates an `input_fn` closure to be passed to Estimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_input_type_ids = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_input_type_ids.append(feature.input_type_ids)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"input_type_ids":
tf.constant(
all_input_type_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
})
d = d.batch(batch_size=batch_size, drop_remainder=False)
return d
return input_fn
def model_fn_builder(self, bert_config, init_checkpoint, layer_indexes):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
input_type_ids = features["input_type_ids"]
jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
with jit_scope():
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids)
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,
init_checkpoint)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {
"unique_id": unique_ids,
}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
from tensorflow.python.estimator.model_fn import EstimatorSpec
output_spec = EstimatorSpec(mode=mode, predictions=predictions)
return output_spec
return model_fn
def convert_examples_to_features(self, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
input_masks = []
examples = self._to_example(self.input_queue.get())
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
# if the sentences's length is more than seq_length, only use sentence's left part
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
# Where "input_ids" are tokens's index in vocabulary
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
input_masks.append(input_mask)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (example.unique_id))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
yield InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
@staticmethod
def _to_example(sentences):
import re
"""
sentences to InputExample
:param sentences: list of strings
:return: list of InputExample
"""
unique_id = 0
for ss in sentences:
line = tokenization.convert_to_unicode(ss)
if not line:
continue
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
yield InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)
unique_id += 1
if __name__ == "__main__":
vector = BertVector(model_dir=os.path.join(text2vec.USER_DATA_DIR, 'chinese_L-12_H-768_A-12'),
output_dir=os.path.join(text2vec.USER_DATA_DIR, 'bert_vector'))
emb = vector.encode(['你好吗朋友', '您好呀小盆友'])
print(str(emb))
print(emb.shape)
|
the-stack_106_14095
|
"""
Functions for clustering weather data and electricity pricing, and calculations of full-year or Cluster-average values
"""
import numpy as np
class Cluster:
def __init__(self):
self.algorithm = 'affinity-propagation' # Clustering algorithm
self.n_cluster = 40 # Number of clusters
self.Nmaxiter = 200 # Maximum iterations for clustering algorithm
self.sim_hard_partitions = True # Use hard partitioning for simulation weighting factors?
self.afp_preference_mult = 1.0 # Multiplier for default preference values (median of input similarities = negative Euclidean distance b/w points i and k) --> Larger multiplier = fewer clusters
self.afp_Nconverge = 10 # Number of iterations without change in solution for convergence
self.afp_enforce_Ncluster = False # Iterate on afp_preference_mult to create the number of clusters specified in n_cluster
self.afp_enforce_Ncluster_tol = 1 # Tolerance for number of clusters
self.afp_enforce_Ncluster_maxiter = 50 # Maximum number of iterations
def form_clusters(self, data):
clusters = {}
n_group = data.shape[0]
if n_group == 1:
clusters['n_cluster'] = 1
clusters['wcss'] = 0.0
clusters['index'] = np.zeros(n_group, int)
clusters['count'] = np.ones(1, int)
clusters['means'] = np.ones((1, data.shape[1])) * data
clusters['partition_matrix'] = np.ones((1, 1))
clusters['exemplars'] = np.zeros(1, int)
clusters['data_pts'] = np.zeros((1, 1), int)
return clusters
else:
if self.afp_preference_mult == 1.0: # Run with default preference
pref = None
else:
distsqr = []
for g in range(n_group):
dist = ((data[g, :] - data[g:n_group, :]) ** 2).sum(1)
distsqr = np.append(distsqr, -dist)
pref = (np.median(distsqr)) * self.afp_preference_mult
alg = AffinityPropagation(max_iter=self.Nmaxiter, convergence_iter=self.afp_Nconverge, preference=pref)
alg.fit_predict(data)
clusters['index'] = alg.cluster_index
clusters['n_cluster'] = alg.n_clusters
clusters['means'] = alg.cluster_means
clusters['wcss'] = alg.wcss
clusters['exemplars'] = alg.exemplars
n_cluster = clusters['n_cluster']
clusters['count'] = np.zeros(n_cluster, int) # Number of data points nominally assigned to each Cluster
clusters['partition_matrix'] = np.zeros((n_group, n_cluster))
for k in range(n_cluster):
clusters['count'][k] = np.sum(clusters['index'] == k)
if self.sim_hard_partitions:
inds = np.arange(n_group)
clusters['partition_matrix'][inds, clusters['index'][inds]] = 1.0
else: # Compute "fuzzy" partition matrix
distsqr = np.zeros((n_group, n_cluster))
for k in range(n_cluster):
distsqr[:, k] = ((data - clusters['means'][k, :]) ** 2).sum(
1) # Squared distance between all data points and Cluster mean k
distsqr[distsqr == 0] = 1.e-10
sumval = (distsqr ** (-2. / (self.mfuzzy - 1))).sum(1) # Sum of dik^(-2/m-1) over all clusters k
for k in range(n_cluster):
clusters['partition_matrix'][:, k] = (distsqr[:, k] ** (2. / (self.mfuzzy - 1)) * sumval) ** -1
# Sum of wij over all data points (i) / n_group
clusters['weights'] = clusters['partition_matrix'].sum(0) / n_group
return clusters
# ============================================================================
class AffinityPropagation:
# Affinity propagation algorithm
def __init__(self, damping=0.5, max_iter=300, convergence_iter=10, preference=None):
self.damping = damping # Damping factor for update of responsibility and availability matrices (0.5 - 1)
self.max_iter = max_iter # Maximum number of iterations
# Number of iterations without change in clusters or exemplars to define convergence
self.convergence_iter = convergence_iter
# Preference for all data points to serve as exemplar.
# If None, the preference will be set to the median of the input similarities
self.preference = preference
self.random_seed = 123
# This attributes are filled by fit_predict()
self.n_clusters = None
self.cluster_means = None
self.cluster_index = None
self.wcss = None
self.exemplars = None
self.converged = None
def compute_wcss(self, data, cluster_index, means):
# Computes the within-cluster sum-of-squares
n_clusters = means.shape[0]
self.wcss = 0.0
for k in range(n_clusters):
dist = ((data - means[k, :]) ** 2).sum(1) # Distance to Cluster k centroid
self.wcss += (dist * (cluster_index == k)).sum()
def fit_predict(self, data):
n_obs, n_features = data.shape # Number of observations and features
# Compute similarities between data points (negative of Euclidean distance)
S = np.zeros((n_obs, n_obs))
inds = np.arange(n_obs)
for p in range(n_obs):
# Negative squared Euclidean distance between pt p and all other points
S[p, :] = -((data[p, :] - data[:, :]) ** 2).sum(1)
if self.preference: # Preference is specified
S[inds, inds] = self.preference
else:
pref = np.median(S)
S[inds, inds] = pref
np.random.seed(self.random_seed)
S += 1.e-14 * S * (np.random.random_sample((n_obs, n_obs)) - 0.5)
# Initialize availability and responsibility matrices
A = np.zeros((n_obs, n_obs))
R = np.zeros((n_obs, n_obs))
exemplars = np.zeros(n_obs, bool)
q = 0
count = 0
while (q < self.max_iter) and (count < self.convergence_iter):
exemplars_prev = exemplars
update = np.zeros((n_obs, n_obs))
# Update responsibility
M = A + S
k = M.argmax(axis=1) # Location of maximum value in each row of M
maxval = M[inds, k] # Maximum values in each row of M
update = S - np.reshape(maxval, (n_obs, 1)) # S - max value in each row
M[inds, k] = -np.inf
k2 = M.argmax(axis=1) # Location of second highest value in each row of M
maxval = M[inds, k2] # Second highest value in each row of M
update[inds, k] = S[inds, k] - maxval
R = self.damping * R + (1. - self.damping) * update
# Update availability
posR = R.copy()
posR[posR < 0] = 0.0 # Only positive values of R matrix
sumR = posR.sum(0)
values = sumR - np.diag(posR)
update[:] = values # Sum positive values of R over all rows (i)
update -= posR
update[:, inds] += np.diag(R)
update[update > 0] = 0.0
update[inds, inds] = values
A = self.damping * A + (1. - self.damping) * update
# Identify exemplars
exemplars = (A + R).argmax(1) == inds # Exemplar for point i is value of k that maximizes A[i,k]+R[i,k]
diff = (exemplars != exemplars_prev).sum()
if diff == 0:
count += 1
else:
count = 0
q += 1
if count < self.convergence_iter and q >= self.max_iter:
converged = False
else:
converged = True
exemplars = np.where(exemplars == True)[0]
found_exemplars = exemplars.shape[0]
# Modify final set of clusters to ensure that the chosen exemplars minimize wcss
S[inds, inds] = 0.0 # Replace diagonal entries in S with 0
S[:, :] = -S[:, :] # Revert back to actual distance
clusters = S[:, exemplars].argmin(1) # Assign points to clusters based on distance to the possible exemplars
for k in range(found_exemplars): # Loop over clusters
pts = np.where(clusters == k)[0] # All points in Cluster k
n_pts = len(pts)
dist_sum = np.zeros(n_pts)
for p in range(n_pts):
# Calculate total distance between point p and all other points in Cluster k
dist_sum[p] += (S[pts[p], pts]).sum()
i = dist_sum.argmin()
exemplars[k] = pts[i] # Replace exemplar k with point that minimizes wcss
# Assign points to clusters based on distance to the possible exemplars
cluster_means = data[exemplars, :]
cluster_index = S[:, exemplars].argmin(1) # TODO: this is not being used, this seems to be the same as clusters
cluster_index = clusters
self.compute_wcss(data, cluster_index, cluster_means)
self.n_clusters = found_exemplars
self.cluster_means = cluster_means
self.cluster_index = cluster_index
self.exemplars = exemplars
self.converged = converged
return self
def read_weather(weather_file):
weather = {'year': [],
'month': [],
'day': [],
'hour': [],
'ghi': [],
'dhi': [],
'dni': [],
'tdry': [],
'wspd': []}
zones = {-7: 'MST', -8: 'US/Pacific', 0: 'UTC'}
# Get header info
header = np.genfromtxt(weather_file, dtype=str, delimiter=',', max_rows=1, skip_header=1)
lat = float(header[5])
lon = float(header[6])
z = int(header[7])
alt = float(header[8])
# Read in weather data
labels = {'year': ['Year'],
'month': ['Month'],
'day': ['Day'],
'hour': ['Hour'],
'ghi': ['GHI'],
'dhi': ['DHI'],
'dni': ['DNI'],
'tdry': ['Tdry', 'Temperature'],
'wspd': ['Wspd', 'Wind Speed']}
header = np.genfromtxt(weather_file, dtype=str, delimiter=',', max_rows=1, skip_header=2)
data = np.genfromtxt(weather_file, dtype=float, delimiter=',', skip_header=3)
for k in labels.keys():
found = False
for j in labels[k]:
if j in header:
found = True
c = header.tolist().index(j)
weather[k] = data[:, c]
if not found:
print('Failed to find data for ' + k + ' in weather file')
return weather
def calc_metrics(weather_file, Ndays=2, ppa=None, sfavail=None, user_weights=None, user_divisions=None, normalize=True,
stow_limit=None):
"""
weather_file = file name containing weather data
Ndays = number of simulation days in each group
ppa = ppa multiplier array with same time step as weather file
sfavail = solar field availability with same time step as weather file
user_weights = user-selected metric weights
user_divisions = user-specified # of daily time-domain divisions per metric
normalize = calculate metrics after normalization to the maximum value?
stow_limit = wind velocity (m/s) for heliostat slow limit.
If specified, DNI in hours with velocity > stow limit
will be set to zero before calculating clustering metrics
"""
weights = {'avgghi': 0.,
'avgghi_prev': 0.,
'avgghi_next': 0.,
# 'clearsky': 0.,
'avgt': 0.,
# 'avg_sfavail': 0.,
'avgwspd': 0.,
'avgwspd_prev': 0.,
'avgwspd_next': 0.,
'avgppa': 0.,
'avgppa_prev': 0,
'avgppa_next': 0} # Weighting factors
divisions = {'avgghi': 1,
'avgghi_prev': 1,
'avgghi_next': 1,
'avgt': 1,
'avgwspd': 1,
'avgwspd_prev': 1,
'avgwspd_next': 1,
'avgppa': 1,
'avgppa_prev': 1,
'avgppa_next': 1} # Integer # of divisions per day
# Hourly calculation boundaries for classification metrics: 'summer_daylight' = daylight hours at summer solstice
bounds = {'avgghi': 'summer_daylight',
'avgghi_prev': 'summer_daylight',
'avgghi_next': 'summer_daylight',
'avgt': 'fullday',
'avgwspd': 'fullday',
'avgwspd_prev': 'fullday',
'avgwspd_next': 'fullday',
'avgppa': 'fullday',
'avgppa_prev': 'fullday',
'avgppa_next': 'fullday'}
if user_weights is not None and user_divisions is not None: # User-specified clustering inputs
for key in weights.keys():
weights[key] = user_weights[key]
for key in divisions.keys():
divisions[key] = int(user_divisions[key])
else: # Default case
# Weighting factors -> if 0, metric will not count in clusters
weights = {'avgghi': 1.,
'avgghi_prev': 1.,
'avgghi_next': 0.,
'avgt': 0.,
'avgwspd': 1.,
'avgwspd_prev': 1.,
'avgwspd_next': 0.,
'avgppa': 1.,
'avgppa_prev': 1.,
'avgppa_next': 0.}
# Integer # of divisions per day
divisions = {'avgghi': 4,
'avgghi_prev': 1,
'avgghi_next': 1,
'avgt': 1,
'avgwspd': 4,
'avgwspd_prev': 1,
'avgwspd_next': 1,
'avgppa': 4,
'avgppa_prev': 1,
'avgppa_next': 1}
# Read in weather data, prices, and solar field availability
hourly_data = read_weather(weather_file)
n_pts = len(hourly_data['ghi'])
n_pts_day = int(n_pts / 365)
# Replace ghi at all points with wind speed > stow limit
if stow_limit:
hourly_data['ghi'][hourly_data['wspd'] > stow_limit] = 0.0
# TODO: This could be used for both PV 1- or 2-axis tracking system and wind turbines
# (however, will need separate inputs)
# Read in PPA price data
hourly_data['ppa'] = np.ones(n_pts)
if ppa is None:
if weights['avgppa'] > 0 or weights['avgppa_prev'] > 0 or weights['avgppa_next'] > 0:
print('Warning: PPA price multipliers were not provided. ' +
'Classification metrics will be calculated with a uniform multiplier.')
else:
if len(ppa) == n_pts:
hourly_data['ppa'] = np.array(ppa)
else:
print('Warning: Specified ppa multiplier array and data in weather file have different lengths. ' +
'Classification metrics will be calculated with a uniform multiplier')
# TODO: REMOVED FOR NOW - May want to add this back for CSP
# read in solar field availability data (could be adapted for wind farm or pv field availability)
# hourly_data['sfavail'] = np.ones((n_pts))
# if sfavail is None:
# if weights['avg_sfavail']>0:
# print('Warning: solar field availability was not provided.
# Weighting factor for average solar field availability will be reset to zero')
# weights['avg_sfavail'] = 0.0
# else:
# if len(sfavail) == n_pts:
# hourly_data['sfavail'] = np.array(sfavail)
# else:
# print('Warning: Specified solar field availability array and data in weather file have different lengths.
# Weighting factor for average solar field availability will be reset to zero')
# weights['avg_sfavail'] = 0.0
# Identify "daylight" hours
daylight_pts = np.zeros((365, 2), int)
for d in range(365):
# Points in day d with nonzero clear-sky ghi
nonzero = np.nonzero(hourly_data['ghi'][d * n_pts_day:(d + 1) * n_pts_day])[0]
daylight_pts[d, 0] = nonzero[0] # First morning point with measurable sunlight
daylight_pts[d, 1] = nonzero[-1] + 1 # First evening point without measurable sunlight
# Calculate daily values for selected classification metrics
daily_metrics = {'avgghi': [],
'avgghi_prev': [],
'avgghi_next': [],
'avgt': [],
'avgwspd': [],
'avgwspd_prev': [],
'avgwspd_next': [],
'avgppa': [],
'avgppa_prev': [],
'avgppa_next': []}
datakeys = {'avgghi': 'ghi',
'avgghi_prev': 'ghi',
'avgghi_next': 'ghi',
'avgt': 'tdry',
'avgwspd': 'wspd',
'avgwspd_prev': 'wspd',
'avgwspd_next': 'wspd',
'avgppa': 'ppa',
'avgppa_prev': 'ppa',
'avgppa_next': 'ppa'}
n_metrics = 0
for key in weights.keys():
if weights[key] > 0.0: # Metric weighting factor is non-zero
n_div = divisions[key] # Number of divisions per day
daily_metrics[key] = np.zeros((365, n_div))
if '_prev' in key or '_next' in key:
n_metrics += n_div # TODO: should this *Nnext or *Nprev depending?? (This assumes 1 day for each)
else:
n_metrics += n_div * Ndays
# Determine total number of hours considered in metric calculations
if bounds[key] == 'fullday':
n_pts = n_pts_day
p1 = 0
elif bounds[key] == 'summer_daylight':
n_pts = daylight_pts[172, 1] - daylight_pts[172, 0]
p1 = daylight_pts[172, 0]
# Calculate average value in each division
# (Averages with non-integer number of time points in a division are computed from weighted averages)
n = float(n_pts) / n_div # Number of time points per division
pts = []
wts = []
for i in range(n_div):
pstart = i * n # Start time pt
pend = (i + 1) * n # End time pt
# Number of discrete hourly points included in the time period average
npt = int(pend) - int(pstart) + 1
# Discrete points which are at least partially included in the time period average
pts.append(np.linspace(int(pstart), int(pend), npt, dtype=int))
wts.append(1. / n * np.ones(npt))
wts[i][0] = float(1.0 - (pstart - int(pstart))) / n # Weighting factor for first point
wts[i][npt - 1] = float(pend - int(pend)) / n # Weighting factor for last point
# Calculate metrics for each day and each division
for d in range(365):
for i in range(n_div):
for h in range(len(pts[i])): # Loop over hours which are at least partially contained in division i
if pts[i][h] == n_pts:
# Hour falls outside of allowed number of hours in the day
# (allowed as long as weighting factor is 0)
if wts[i][h] > 0.0:
print('Error calculating weighted average for key ' + key + ' and division ' + str(i))
else:
p = d * n_pts_day + p1 + pts[i][h] # Point in yearly array
daily_metrics[key][d, i] += (hourly_data[datakeys[key]][p] * wts[i][h])
# Normalize daily metrics
if normalize:
max_metric = daily_metrics[key].max()
min_metric = daily_metrics[key].min()
daily_metrics[key] = (daily_metrics[key] - min_metric) / (max_metric - min_metric)
# Create arrays of classification data for simulation days
feature_order = ['avgghi',
'avgghi_prev',
'avgghi_next',
'avgt',
'avgwspd',
'avgwspd_prev',
'avgwspd_next',
'avgppa',
'avgppa_prev',
'avgppa_next'] # Order in which data features will be created #TODO: Does this order matter?
n_group = int((363. / Ndays)) # Number of daily ghi groupings (first and last days of the year are excluded)
data = np.zeros((n_group, int(n_metrics))) # Classification data for clustering level j
for g in range(n_group):
f = 0
for key in feature_order:
if weights[key] > 0.0: # Weighting factor > 0
n_div = divisions[key]
if '_prev' in key:
days = [g * Ndays]
elif '_next' in key:
days = [(g + 1) * Ndays + 1]
else:
days = np.arange(g * Ndays + 1, (g + 1) * Ndays + 1)
for d in days:
data[g, f:f + n_div] = daily_metrics[key][d, :] * weights[key]
f += n_div
# Evaluate subset of classification metrics for days at beginning and end of the year
# (not included as "simulated" days)
data_first = None
data_last = None
if Ndays != 2:
print('Extra classification metrics for first/last days are currently only defined for Ndays = 2')
else:
data_firstlast = np.zeros((2, n_metrics))
for p in range(2): # Metrics for first and last days
d1 = 0
if p == 1:
d1 = 363
f = 0
for key in feature_order:
if weights[key] > 0.0: # Weighting factor > 0
n_div = divisions[key]
days = [d1, d1 + 1]
if key == 'avgghi_prev' or key == 'avgppa_prev': # TODO: Do I need to add avgwspd_prev and _next?
days = [d1 - 1]
elif key == 'avgghi_next' or key == 'avgppa_next':
days = [d1 + 2]
for d in days:
if (d >= 0) and (d < 365):
data_firstlast[p, f:f + n_div] = daily_metrics[key][d, :] * weights[key]
else:
data_firstlast[p, f:f + n_div] = -1.e8 # TODO: Ask Janna why do this?
f += n_div
data_first = data_firstlast[0, :]
data_last = data_firstlast[1, :]
classification_data = {'data': data, 'n_metrics': n_metrics, 'firstday': data_first, 'lastday': data_last}
return classification_data
def create_clusters(data, cluster_inputs, verbose=False):
# Create clusters from classification data.
# Includes iterations of affinity propagation algorithm to create desired number of clusters if specified.
if cluster_inputs.algorithm == 'affinity-propagation' and cluster_inputs.afp_enforce_Ncluster:
maxiter = cluster_inputs.afp_enforce_Ncluster_maxiter
Ntarget = cluster_inputs.n_cluster
tol = cluster_inputs.afp_enforce_Ncluster_tol
urf = 1.0
mult = 1.0
mult_prev = 1.0
Nc_prev = 0
i = 0
finished = False
while i < maxiter and not finished:
cluster_inputs.afp_preference_mult = mult
clusters = cluster_inputs.form_clusters(data)
converged = True
if 'converged' in clusters.keys():
converged = clusters['converged'] # Did affinity propagation algorithm converge?
if verbose:
print('Formed %d clusters with preference multiplier %f' % (clusters['n_cluster'], mult))
if not converged:
print('Affinity propagation algorithm did not converge within the maximum allowable iterations')
# Don't use this solution to create next guess for preference multiplier
# -> increase maximum iterations and damping and try again
if not converged:
cluster_inputs.afp_damping += 0.05
cluster_inputs.afp_damping = min(cluster_inputs.afp_damping, 0.95)
if verbose:
print('Damping factor increased to %f' % (cluster_inputs.afp_damping))
else:
# Algorithm converged -> use this solution
# and revert back to original damping and maximum number of iterations for next guess
Nc = clusters['n_cluster']
if abs(clusters['n_cluster'] - Ntarget) <= tol:
finished = True
else:
if Nc_prev == 0 or Nc == Nc_prev:
# First successful iteration, or no change in clusters with change in preference multiplier
mult_new = mult * float(clusters['n_cluster']) / Ntarget
else:
dNcdmult = float(Nc - Nc_prev) / float(mult - mult_prev)
mult_new = mult - urf * float(Nc - Ntarget) / dNcdmult
if mult_new <= 0:
mult_new = mult * float(clusters['n_cluster']) / Ntarget
mult_prev = mult
Nc_prev = Nc
mult = mult_new
i += 1
if not finished:
print('Maximum number of iterations reached without finding %d clusters. '
'The current number of clusters is %d' % (Ntarget, clusters['n_cluster']))
else:
clusters = cluster_inputs.form_clusters(data)
if verbose:
print(' Created %d clusters' % (clusters['n_cluster']))
# Sort clusters in order of lowest to highest exemplar points
n_group = data.shape[0] # Number of data points
n_cluster = clusters['n_cluster']
inds = clusters['exemplars'].argsort()
clusters_sorted = {}
for key in clusters.keys():
if key in ['n_cluster', 'wcss']:
clusters_sorted[key] = clusters[key]
else:
clusters_sorted[key] = np.empty_like(clusters[key])
for i in range(n_cluster):
k = inds[i]
clusters_sorted['partition_matrix'][:, i] = clusters['partition_matrix'][:, k]
for key in ['count', 'weights', 'exemplars']:
clusters_sorted[key][i] = clusters[key][k]
for key in ['means']:
clusters_sorted[key][i, :] = clusters[key][k, :]
for g in range(n_group):
k = clusters['index'][g]
clusters_sorted['index'][g] = inds.argsort()[k]
return clusters_sorted
def adjust_weighting_firstlast(data, data_first, data_last, clusters, Ndays=2):
"""
Adjust Cluster weighting to account for first/last days of the year
(excluded from original clustering algorithm because these days cannot be used as exemplar points)
data = data for clustering (Npts x Nmetrics)
data_first = data for neglected points at the beginning of the year
data_last = data for neglected points at the end of the year
clusters = clusters formed from original data set
Ndays = # of consecutive simulation days
"""
if Ndays != 2:
print('Cluster weighting factor adjustment to include first/last days is not currently defined for ' + str(
Ndays) + 'consecutive simulation days. Cluster weights will not include days excluded from original clustering algorithm')
clusters['weights_adjusted'] = clusters['weights']
return [clusters, -1, -1]
else:
ngroup, nfeatures = data.shape
n_clusters = clusters['n_cluster']
dist_first = np.zeros(n_clusters)
dist_last = np.zeros(n_clusters)
for k in range(n_clusters):
for f in range(nfeatures):
if data_first[f] > -1.e7: # Data feature f is defined for first set
dist_first[k] += (data_first[f] - clusters['means'][k, f]) ** 2
if data_last[f] > -1.e7:
dist_last[k] += (data_last[f] - clusters['means'][k, f]) ** 2
kfirst = dist_first.argmin() # Cluster which best represents first days
klast = dist_last.argmin() # Cluster which best represents last days
# Recompute Cluster weights
ngroup_adj = ngroup + 1.5 # Adjusted total number of groups
s = clusters['partition_matrix'].sum(0)
s[kfirst] = s[kfirst] + 0.5
s[klast] = s[klast] + 1
clusters['weights_adjusted'] = s / ngroup_adj
return [clusters, kfirst, klast]
def compute_cluster_avg_from_timeseries(hourly, partition_matrix, Ndays, Nprev=1, Nnext=1, adjust_wt=False, k1=None,
k2=None):
"""
# Compute Cluster-average hourly values from full-year hourly array and partition matrix
hourly = full annual array of data
partition_matrix = partition matrix from clustering (rows = data points, columns = clusters)
Ndays = number of simulated days (not including previous/next)
Nprev = number of previous days that will be included in the simulation
Nnext = number of subsequent days that will be included in the simulation
adjust_wt = adjust calculations with first/last days allocated to a Cluster
k1 = Cluster to which first day belongs
k2 = Cluster to which last day belongs
ouput = list of Cluster-average hourly arrays for the (Nprev+Ndays+Nnext) days simulated within the Cluster
"""
Ngroup, Ncluster = partition_matrix.shape
Ndaystot = Ndays + Nprev + Nnext # Number of days that will be included in the simulation (including previous / next days)
Nptshr = int(len(hourly) / 8760)
avg = np.zeros((Ncluster, Ndaystot * 24 * Nptshr))
for g in range(Ngroup):
d = g * Ndays + 1 # First day to be counted in simulation group g
d1 = max(0, d - Nprev) # First day to be included in simulation group g (Nprev days before day d if possible)
Nprev_actual = d - d1 # Actual number of previous days that can be included
Ndaystot_actual = Ndays + Nprev_actual + Nnext
h = d1 * 24 * Nptshr # First time point included in simulation group g
if Nprev == Nprev_actual:
vals = np.array(hourly[
h:h + Ndaystot * 24 * Nptshr]) # Hourly values for only the days included in the simulation for group g
else: # Number of previous days was reduced (only occurs at beginning of the year)
Nvoid = Nprev - Nprev_actual # Number of previous days which don't exist in the data file (only occurs when Nprev >1)
vals = []
for v in range(Nvoid): # Days for which data doesn't exist
vals = np.append(vals, hourly[0:24 * Nptshr]) # Use data from first day
vals = np.append(vals, hourly[h:h + Ndaystot_actual * 24 * Nptshr])
for k in range(Ncluster):
avg[k, :] += vals * partition_matrix[
g, k] # Sum of hourly array * partition_matrix value for Cluster k over all points (g)
for k in range(Ncluster):
avg[k, :] = avg[k, :] / partition_matrix.sum(0)[
k] # Divide by sum of partition matrix over all groups to normalize
if adjust_wt and Ndays == 2: # Adjust averages to include first/last days of the year
avgnew = avg[k1, Nprev * 24 * Nptshr:(Nprev + 1) * 24 * Nptshr] * partition_matrix.sum(0)[
k1] # Revert back to non-normalized values for first simulation day in which results will be counted
avgnew += hourly[0:24 * Nptshr] # Update values to include first day
avg[k1, Nprev * 24 * Nptshr:(Nprev + 1) * 24 * Nptshr] = avgnew / (partition_matrix.sum(0)[
k1] + 1) # Normalize values for first day and insert back into average array
avgnew = avg[k2, 0:(Ndays + Nprev) * 24 * Nptshr] * partition_matrix.sum(0)[
k2] # Revert back to non-normalized values for the previous day and two simulated days
avgnew += hourly[
(363 - Nprev) * 24 * Nptshr:365 * 24 * Nptshr] # Update values to include the last days of the year
avg[k2, 0:(Ndays + Nprev) * 24 * Nptshr] = avgnew / (
partition_matrix.sum(0)[k2] + 1) # Normalize values and insert back into average array
return avg.tolist()
def setup_clusters(weather_file, ppamult, n_clusters, Ndays=2, Nprev=1, Nnext=1, user_weights=None, user_divisions=None):
# Clustering inputs that have no dependence on independent variables
algorithm = 'affinity-propagation'
hard_partitions = True
afp_enforce_Ncluster = True
# Calculate classification metrics
ret = calc_metrics(weather_file=weather_file, Ndays=Ndays, ppa=ppamult, user_weights=user_weights,
user_divisions=user_divisions, stow_limit=None)
data = ret['data']
data_first = ret['firstday']
data_last = ret['lastday']
# Create clusters
cluster_ins = Cluster()
cluster_ins.algorithm = algorithm
cluster_ins.n_cluster = n_clusters
cluster_ins.sim_hard_partitions = hard_partitions
cluster_ins.afp_enforce_Ncluster = afp_enforce_Ncluster
clusters = create_clusters(data, cluster_ins)
sim_start_days = (1 + clusters['exemplars'] * Ndays).tolist()
# Adjust weighting for first and last days
ret = adjust_weighting_firstlast(data, data_first, data_last, clusters, Ndays)
clusters = ret[0]
firstpt_cluster = ret[1]
lastpt_cluster = ret[2]
# Calculate Cluster-average PPA multipliers and solar field adjustment factors
avg_ppamult = compute_cluster_avg_from_timeseries(ppamult, clusters['partition_matrix'], Ndays=Ndays, Nprev=Nprev,
Nnext=Nnext, adjust_wt=True, k1=firstpt_cluster,
k2=lastpt_cluster)
cluster_inputs = {}
cluster_inputs['exemplars'] = clusters['exemplars']
cluster_inputs['weights'] = clusters['weights_adjusted']
cluster_inputs['day_start'] = sim_start_days
cluster_inputs['partition_matrix'] = clusters['partition_matrix']
cluster_inputs['first_pt_cluster'] = firstpt_cluster
cluster_inputs['last_pt_cluster'] = lastpt_cluster
cluster_inputs['avg_ppamult'] = avg_ppamult
return cluster_inputs
def create_annual_array_with_cluster_average_values(hourly, cluster_average, start_days, Nsim_days, Nprev=1, Nnext=1,
overwrite_surrounding_days=False):
"""
# Create full year array of hourly data with sections corresponding to Cluster exemplar simulations overwritten
with Cluster-average values
hourly = full year of hourly input data
cluster_average = groups of Cluster-average input data
start_days = list of Cluster start days
Nsim_days = list of number of days simulated within each Cluster
Nprev = number of previous days included in the simulation
Nnext = number of subsequent days included in teh simulation
"""
Ng = len(start_days)
output = hourly
Nptshr = int(len(hourly) / 8760)
Nptsday = Nptshr * 24
count_days = []
for g in range(Ng):
for d in range(Nsim_days[g]):
count_days.append(start_days[g] + d)
for g in range(Ng): # Number of simulation groupings
Nday = Nsim_days[g] # Number of days counted in simulation group g
Nsim = Nsim_days[g] + Nprev + Nnext # Number of simulated days in group g
for d in range(Nsim): # Days included in simulation for group g
day_of_year = (start_days[g] - Nprev) + d
if d >= Nprev and d < Nprev + Nday: # Days that will be counted in results
for h in range(Nptsday):
output[day_of_year * Nptsday + h] = cluster_average[g][d * Nptsday + h]
else: # Days that will not be counted in results
if overwrite_surrounding_days:
if day_of_year not in count_days and day_of_year >= 0 and day_of_year < 365:
for h in range(Nptsday):
output[day_of_year * Nptsday + h] = cluster_average[g][d * Nptsday + h]
return output
def compute_annual_array_from_clusters(exemplardata, clusters, Ndays, adjust_wt=False, k1=None, k2=None, dtype=float):
"""
# Create full year hourly array from hourly array containing only data at exemplar points
exemplardata = full-year hourly array with data existing only at days within exemplar groupings
clusters = Cluster information
Ndays = number of consecutive simulation days within each group
adjust_wt = adjust calculations with first/last days allocated to a Cluster
k1 = Cluster to which first day belongs
k2 = Cluster to which last day belongs
"""
npts = len(exemplardata)
fulldata = np.zeros((npts))
ngroup, ncluster = clusters['partition_matrix'].shape
nptshr = int(npts / 8760)
nptsday = nptshr * 24
data = np.zeros((nptsday * Ndays, ncluster)) # Hourly data for each Cluster exemplar
for k in range(ncluster):
d = clusters['exemplars'][k] * Ndays + 1 # Starting days for each exemplar grouping
data[:, k] = exemplardata[d * nptsday:(d + Ndays) * nptsday]
for g in range(ngroup):
d = g * Ndays + 1 # Starting day for data group g
avg = (clusters['partition_matrix'][g, :] * data).sum(
1) # Sum of partition matrix x exemplar data points for each hour
fulldata[d * nptsday:(d + Ndays) * nptsday] = avg
# Fill in first/last days
if adjust_wt and k1 >= 0 and k2 >= 0 and Ndays == 2:
d = (clusters['exemplars'][k1]) * Ndays + 1 # Starting day for group to which day 0 is assigned
fulldata[0:nptsday] = fulldata[d * nptsday:(d + 1) * nptsday]
d = (clusters['exemplars'][k2]) * Ndays + 1 # Starting day for group to which days 363 and 364 are assigned
fulldata[363 * nptsday:(363 + Ndays) * nptsday] = fulldata[d * nptsday:(d + Ndays) * nptsday]
else:
navg = 5
if max(fulldata[0:24]) == 0: # No data for first day of year
print(
'First day of the year was not assigned to a Cluster and will be assigned average generation profile from the next ' + str(
navg) + ' days.')
hourly_avg = np.zeros((nptsday))
for d in range(1, navg + 1):
for h in range(24 * nptshr):
hourly_avg[h] += fulldata[d * nptsday + h] / navg
fulldata[0:nptsday] = hourly_avg
nexclude = 364 - ngroup * Ndays # Number of excluded days at the end of the year
if nexclude > 0:
h1 = 8760 * nptshr - nexclude * nptsday # First excluded hour at the end of the year
if max(fulldata[h1: h1 + nexclude * nptsday]) == 0:
print('Last ' + str(
nexclude) + ' days were not assigned to a Cluster and will be assigned average generation profile from prior ' + str(
navg) + ' days.')
hourly_avg = np.zeros((nexclude * nptsday))
d1 = 365 - nexclude - navg # First day to include in average
for d in range(d1, d1 + navg):
for h in range(nptsday):
hourly_avg[h] += fulldata[d * nptsday + h] / navg
fulldata[h1: h1 + nexclude * nptsday] = hourly_avg
if dtype is bool:
fulldata = np.array(fulldata, dtype=bool)
return fulldata.tolist()
def combine_consecutive_exemplars(days, weights, avg_ppamult, avg_sfadjust, Ndays=2, Nprev=1, Nnext=1):
"""
Combine consecutive exemplars into a single simulation
days = starting days for simulations (not including previous days)
weights = Cluster weights
avg_ppamult = average hourly ppa multipliers for each Cluster (note: arrays include all previous and subsequent days)
avg_sfadjust = average hourly solar field adjustment factors for each Cluster (note: arrays include all previous and subsequent days)
Ndays = number of consecutive days for which results will be counted
Nprev = number of previous days which are included before simulation days
Nnext = number of subsequent days which are included after simulation days
"""
Ncombine = sum(np.diff(
days) == Ndays) # Number of simulation groupings that can be combined (starting days represent consecutive groups)
Nsim = len(days) - Ncombine # Number of simulation grouping after combination
Nptshr = int(len(avg_ppamult[0]) / ((Ndays + Nprev + Nnext) * 24)) # Number of points per hour in input arrays
group_index = np.zeros((len(days)))
start_days = np.zeros((Nsim), int)
sim_days = np.zeros((Nsim), int)
g = -1
for i in range(len(days)):
if i == 0 or days[i] - days[i - 1] != Ndays: # Day i starts new simulation grouping
g += 1
start_days[g] = days[i]
sim_days[g] += Ndays
group_index[i] = g
group_weight = []
group_avgppa = []
group_avgsfadj = []
h1 = Nprev * 24 * Nptshr # First hour of "simulation" day in any Cluster
h2 = (Ndays + Nprev) * 24 * Nptshr # Last hour of "simulation" days in any Cluster
hend = (Ndays + Nprev + Nnext) * 24 * Nptshr # Last hour of "next" day in any Cluster
for i in range(len(days)):
g = group_index[i]
if i == 0 or g != group_index[i - 1]: # Start of new group
wt = [float(weights[i])]
avgppa = avg_ppamult[i][0:h2]
avgsfadj = avg_sfadjust[i][0:h2]
else: # Continuation of previous group
wt.append(weights[i])
avgppa = np.append(avgppa, avg_ppamult[i][h1:h2])
avgsfadj = np.append(avgsfadj, avg_sfadjust[i][h1:h2])
if i == len(days) - 1 or g != group_index[i + 1]: # End of group
avgppa = np.append(avgppa, avg_ppamult[i][h2:hend])
avgsfadj = np.append(avgsfadj, avg_sfadjust[i][h2:hend])
group_weight.append(wt)
group_avgppa.append(avgppa.tolist())
group_avgsfadj.append(avgsfadj.tolist())
combined = {}
combined['start_days'] = start_days.tolist()
combined['Nsim_days'] = sim_days.tolist()
combined['avg_ppa'] = group_avgppa
combined['avg_sfadj'] = group_avgsfadj
combined['weights'] = group_weight
return combined
|
the-stack_106_14098
|
########################################
# Changes compared to 30_11_dDQN_light_tweak71.py
# 01.
# lr_optimizer = 7.3e-4
# 02.
# discount_rate = 0.98
# 03.
# max_replay_len = 30_000
########################################
import sys
import numpy as np
#import pandas as pd
import datetime
import json
from array import *
import os
import math
from random import randrange
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import model_from_json
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras import optimizers
import tensorflow.keras as keras
#import tensorflow.compat.v1 as tf
#from tensorflow.compat.v1.keras import backend as K
#tf.disable_v2_behavior()
import tensorflow as tf
from tensorflow.keras import backend as K
import constants
import non_RL_agent
import non_RL_agent02
import non_RL_agent03
import non_RL_agent04
import non_RL_agent05
import non_RL_agent06
n_episodes = 500_000
#n_epsilon_decay = int(n_episodes*.6)
#n_epsilon_decay = int(n_episodes*.805)
#n_epsilon_decay = 10**6 / 0.99
n_epsilon_decay = int(n_episodes // 50)
n_episodes_buf_fill = 10_000
batch_size = 32
discount_rate = 0.98
#lr_optimizer = 2.5e-4
lr_optimizer = 7.3e-4
#loss_fn = keras.losses.mean_squared_error
loss_fn = keras.losses.Huber()
max_replay_len = 30_000
#Classes in GAME_SOCKET_DUMMY.py
class ObstacleInfo:
# initial energy for obstacles: Land (key = 0): -1, Forest(key = -1): 0 (random), Trap(key = -2): -10, Swamp (key = -3): -5
types = {0: -1, -1: 0, -2: -10, -3: -5}
def __init__(self):
self.type = 0
self.posx = 0
self.posy = 0
self.value = 0
class GoldInfo:
def __init__(self):
self.posx = 0
self.posy = 0
self.amount = 0
def loads(self, data):
golds = []
for gd in data:
g = GoldInfo()
g.posx = gd["posx"]
g.posy = gd["posy"]
g.amount = gd["amount"]
golds.append(g)
return golds
class PlayerInfo:
STATUS_PLAYING = 0
STATUS_ELIMINATED_WENT_OUT_MAP = 1
STATUS_ELIMINATED_OUT_OF_ENERGY = 2
STATUS_ELIMINATED_INVALID_ACTION = 3
STATUS_STOP_EMPTY_GOLD = 4
STATUS_STOP_END_STEP = 5
def __init__(self, id):
self.playerId = id
self.score = 0
self.energy = 0
self.posx = 0
self.posy = 0
self.lastAction = -1
self.status = PlayerInfo.STATUS_PLAYING
self.freeCount = 0
class GameInfo:
def __init__(self):
self.numberOfPlayers = 1
self.width = 0
self.height = 0
self.steps = 100
self.golds = []
self.obstacles = []
def loads(self, data):
m = GameInfo()
m.width = data["width"]
m.height = data["height"]
m.golds = GoldInfo().loads(data["golds"])
m.obstacles = data["obstacles"]
m.numberOfPlayers = data["numberOfPlayers"]
m.steps = data["steps"]
return m
class UserMatch:
def __init__(self):
self.playerId = 1
self.posx = 0
self.posy = 0
self.energy = 50
self.gameinfo = GameInfo()
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
class StepState:
def __init__(self):
self.players = []
self.golds = []
self.changedObstacles = []
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
#Main class in GAME_SOCKET_DUMMY.py
class GameSocket:
bog_energy_chain = {-5: -20, -20: -40, -40: -100, -100: -100}
def __init__(self):
self.stepCount = 0
self.maxStep = 0
self.mapdir = "Maps" # where to load all pre-defined maps
self.mapid = ""
self.userMatch = UserMatch()
self.user = PlayerInfo(1)
self.stepState = StepState()
self.maps = {} # key: map file name, value: file content
self.map = [] # running map info: 0->Land, -1->Forest, -2->Trap, -3:Swamp, >0:Gold
self.energyOnMap = [] # self.energyOnMap[x][y]: <0, amount of energy which player will consume if it move into (x,y)
self.E = 50
self.resetFlag = True
self.craftUsers = [] # players that craft at current step - for calculating amount of gold
self.bots = []
self.craftMap = {} # cells that players craft at current step, key: x_y, value: number of players that craft at (x,y)
def init_bots(self):
self.bots = [Bot1(2), Bot2(3), Bot3(4)] # use bot1(id=2), bot2(id=3), bot3(id=4)
#for (bot) in self.bots: # at the beginning, all bots will have same position, energy as player
for bot in self.bots: # at the beginning, all bots will have same position, energy as player
bot.info.posx = self.user.posx
bot.info.posy = self.user.posy
bot.info.energy = self.user.energy
bot.info.lastAction = -1
bot.info.status = PlayerInfo.STATUS_PLAYING
bot.info.score = 0
self.stepState.players.append(bot.info)
self.userMatch.gameinfo.numberOfPlayers = len(self.stepState.players)
#print("numberOfPlayers: ", self.userMatch.gameinfo.numberOfPlayers)
def reset(self, requests): # load new game by given request: [map id (filename), posx, posy, initial energy]
# load new map
self.reset_map(requests[0])
self.userMatch.posx = int(requests[1])
self.userMatch.posy = int(requests[2])
self.userMatch.energy = int(requests[3])
self.userMatch.gameinfo.steps = int(requests[4])
self.maxStep = self.userMatch.gameinfo.steps
# init data for players
self.user.posx = self.userMatch.posx # in
self.user.posy = self.userMatch.posy
self.user.energy = self.userMatch.energy
self.user.status = PlayerInfo.STATUS_PLAYING
self.user.score = 0
self.stepState.players = [self.user]
self.E = self.userMatch.energy
self.resetFlag = True
self.init_bots()
self.stepCount = 0
def reset_map(self, id): # load map info
self.mapId = id
self.map = json.loads(self.maps[self.mapId])
self.userMatch = self.map_info(self.map)
self.stepState.golds = self.userMatch.gameinfo.golds
self.map = json.loads(self.maps[self.mapId])
self.energyOnMap = json.loads(self.maps[self.mapId])
for x in range(len(self.map)):
for y in range(len(self.map[x])):
if self.map[x][y] > 0: # gold
self.energyOnMap[x][y] = -4
else: # obstacles
self.energyOnMap[x][y] = ObstacleInfo.types[self.map[x][y]]
def connect(self): # simulate player's connect request
print("Connected to server.")
for mapid in range(len(Maps)):
filename = "map" + str(mapid)
print("Found: " + filename)
self.maps[filename] = str(Maps[mapid])
def map_info(self, map): # get map info
# print(map)
userMatch = UserMatch()
userMatch.gameinfo.height = len(map)
userMatch.gameinfo.width = len(map[0])
i = 0
while i < len(map):
j = 0
while j < len(map[i]):
if map[i][j] > 0: # gold
g = GoldInfo()
g.posx = j
g.posy = i
g.amount = map[i][j]
userMatch.gameinfo.golds.append(g)
else: # obstacles
o = ObstacleInfo()
o.posx = j
o.posy = i
o.type = -map[i][j]
o.value = ObstacleInfo.types[map[i][j]]
userMatch.gameinfo.obstacles.append(o)
j += 1
i += 1
return userMatch
def receive(self): # send data to player (simulate player's receive request)
if self.resetFlag: # for the first time -> send game info
self.resetFlag = False
data = self.userMatch.to_json()
for (bot) in self.bots:
bot.new_game(data)
# print(data)
return data
else: # send step state
self.stepCount = self.stepCount + 1
if self.stepCount >= self.maxStep:
for player in self.stepState.players:
player.status = PlayerInfo.STATUS_STOP_END_STEP
data = self.stepState.to_json()
#for (bot) in self.bots: # update bots' state
for bot in self.bots: # update bots' state
bot.new_state(data)
# print(data)
return data
def send(self, message): # receive message from player (simulate send request from player)
if message.isnumeric(): # player send action
self.resetFlag = False
self.stepState.changedObstacles = []
action = int(message)
# print("Action = ", action)
self.user.lastAction = action
self.craftUsers = []
self.step_action(self.user, action)
for bot in self.bots:
if bot.info.status == PlayerInfo.STATUS_PLAYING:
action = bot.next_action()
bot.info.lastAction = action
# print("Bot Action: ", action)
self.step_action(bot.info, action)
self.action_5_craft()
for c in self.stepState.changedObstacles:
self.map[c["posy"]][c["posx"]] = -c["type"]
self.energyOnMap[c["posy"]][c["posx"]] = c["value"]
else: # reset game
requests = message.split(",")
#print("Reset game: ", requests[:3], end='')
self.reset(requests)
def step_action(self, user, action):
switcher = {
0: self.action_0_left,
1: self.action_1_right,
2: self.action_2_up,
3: self.action_3_down,
4: self.action_4_free,
5: self.action_5_craft_pre
}
func = switcher.get(action, self.invalidAction)
func(user)
def action_5_craft_pre(self, user): # collect players who craft at current step
user.freeCount = 0
if self.map[user.posy][user.posx] <= 0: # craft at the non-gold cell
user.energy -= 10
if user.energy <= 0:
user.status = PlayerInfo.STATUS_ELIMINATED_OUT_OF_ENERGY
user.lastAction = 6 #eliminated
else:
user.energy -= 5
if user.energy > 0:
self.craftUsers.append(user)
key = str(user.posx) + "_" + str(user.posy)
if key in self.craftMap:
count = self.craftMap[key]
self.craftMap[key] = count + 1
else:
self.craftMap[key] = 1
else:
user.status = PlayerInfo.STATUS_ELIMINATED_OUT_OF_ENERGY
user.lastAction = 6 #eliminated
def action_0_left(self, user): # user go left
user.freeCount = 0
user.posx = user.posx - 1
if user.posx < 0:
user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP
user.lastAction = 6 #eliminated
else:
self.go_to_pos(user)
def action_1_right(self, user): # user go right
user.freeCount = 0
user.posx = user.posx + 1
if user.posx >= self.userMatch.gameinfo.width:
user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP
user.lastAction = 6 #eliminated
else:
self.go_to_pos(user)
def action_2_up(self, user): # user go up
user.freeCount = 0
user.posy = user.posy - 1
if user.posy < 0:
user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP
user.lastAction = 6 #eliminated
else:
self.go_to_pos(user)
def action_3_down(self, user): # user go right
user.freeCount = 0
user.posy = user.posy + 1
if user.posy >= self.userMatch.gameinfo.height:
user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP
user.lastAction = 6 #eliminated
else:
self.go_to_pos(user)
def action_4_free(self, user): # user free
user.freeCount += 1
if user.freeCount == 1:
user.energy += int(self.E / 4)
elif user.freeCount == 2:
user.energy += int(self.E / 3)
elif user.freeCount == 3:
user.energy += int(self.E / 2)
else:
user.energy = self.E
if user.energy > self.E:
user.energy = self.E
def action_5_craft(self):
craftCount = len(self.craftUsers)
# print ("craftCount",craftCount)
if (craftCount > 0):
for user in self.craftUsers:
x = user.posx
y = user.posy
key = str(user.posx) + "_" + str(user.posy)
c = self.craftMap[key]
m = min(math.ceil(self.map[y][x] / c), 50)
user.score += m
# print ("user", user.playerId, m)
for user in self.craftUsers:
x = user.posx
y = user.posy
key = str(user.posx) + "_" + str(user.posy)
if key in self.craftMap:
c = self.craftMap[key]
del self.craftMap[key]
m = min(math.ceil(self.map[y][x] / c), 50)
self.map[y][x] -= m * c
if self.map[y][x] < 0:
self.map[y][x] = 0
self.energyOnMap[y][x] = ObstacleInfo.types[0]
for g in self.stepState.golds:
if g.posx == x and g.posy == y:
g.amount = self.map[y][x]
if g.amount == 0:
self.stepState.golds.remove(g)
self.add_changed_obstacle(x, y, 0, ObstacleInfo.types[0])
if len(self.stepState.golds) == 0:
for player in self.stepState.players:
player.status = PlayerInfo.STATUS_STOP_EMPTY_GOLD
break;
self.craftMap = {}
def invalidAction(self, user):
user.status = PlayerInfo.STATUS_ELIMINATED_INVALID_ACTION
user.lastAction = 6 #eliminated
def go_to_pos(self, user): # player move to cell(x,y)
if self.map[user.posy][user.posx] == -1:
user.energy -= randrange(16) + 5
elif self.map[user.posy][user.posx] == 0:
user.energy += self.energyOnMap[user.posy][user.posx]
elif self.map[user.posy][user.posx] == -2:
user.energy += self.energyOnMap[user.posy][user.posx]
self.add_changed_obstacle(user.posx, user.posy, 0, ObstacleInfo.types[0])
elif self.map[user.posy][user.posx] == -3:
user.energy += self.energyOnMap[user.posy][user.posx]
self.add_changed_obstacle(user.posx, user.posy, 3,
self.bog_energy_chain[self.energyOnMap[user.posy][user.posx]])
else:
user.energy -= 4
if user.energy <= 0:
user.status = PlayerInfo.STATUS_ELIMINATED_OUT_OF_ENERGY
user.lastAction = 6 #eliminated
def add_changed_obstacle(self, x, y, t, v):
added = False
for o in self.stepState.changedObstacles:
if o["posx"] == x and o["posy"] == y:
added = True
break
if added == False:
o = {}
o["posx"] = x
o["posy"] = y
o["type"] = t
o["value"] = v
self.stepState.changedObstacles.append(o)
def close(self):
print("Close socket.")
class Bot1:
ACTION_GO_LEFT = 0
ACTION_GO_RIGHT = 1
ACTION_GO_UP = 2
ACTION_GO_DOWN = 3
ACTION_FREE = 4
ACTION_CRAFT = 5
def __init__(self, id):
self.state = State()
self.info = PlayerInfo(id)
def get_state(self):
view = np.zeros([self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1], dtype=int)
for x in range(self.state.mapInfo.max_x + 1):
for y in range(self.state.mapInfo.max_y + 1):
if self.state.mapInfo.get_obstacle(x, y) == TreeID: # Tree
view[y, x] = -TreeID
if self.state.mapInfo.get_obstacle(x, y) == TrapID: # Trap
view[y, x] = -TrapID
if self.state.mapInfo.get_obstacle(x, y) == SwampID: # Swamp
view[y, x] = -SwampID
if self.state.mapInfo.gold_amount(x, y) > 0:
view[y, x] = self.state.mapInfo.gold_amount(x, y)
DQNState = view.flatten().tolist() #Flattening the map matrix to a vector
#DQNState.append(self.state.x)
#DQNState.append(self.state.y)
#DQNState.append(self.state.energy)
DQNState.append(self.info.posx)
DQNState.append(self.info.posy)
DQNState.append(self.info.energy)
for player in self.state.players:
# self.info.playerId is the id of the current bot
if player["playerId"] != self.info.playerId:
DQNState.append(player["posx"])
DQNState.append(player["posy"])
DQNState = np.array(DQNState)
return DQNState
def next_action(self):
s = self.get_state()
#return int(greedy_policy(s))
return int(non_RL_agent.greedy_policy(s))
def get_score(self):
return [player["score"] for player in minerEnv.socket.bots[1].state.players if player["playerId"] == self.info.playerId][0]
def new_game(self, data):
try:
self.state.init_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def new_state(self, data):
# action = self.next_action();
# self.socket.send(action)
try:
self.state.update_state(data)
except Exception as e:
import traceback
traceback.print_exc()
class Bot2:
ACTION_GO_LEFT = 0
ACTION_GO_RIGHT = 1
ACTION_GO_UP = 2
ACTION_GO_DOWN = 3
ACTION_FREE = 4
ACTION_CRAFT = 5
def __init__(self, id):
self.state = State()
self.info = PlayerInfo(id)
def get_state(self):
view = np.zeros([self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1], dtype=int)
for x in range(self.state.mapInfo.max_x + 1):
for y in range(self.state.mapInfo.max_y + 1):
if self.state.mapInfo.get_obstacle(x, y) == TreeID: # Tree
view[y, x] = -TreeID
if self.state.mapInfo.get_obstacle(x, y) == TrapID: # Trap
view[y, x] = -TrapID
if self.state.mapInfo.get_obstacle(x, y) == SwampID: # Swamp
view[y, x] = -SwampID
if self.state.mapInfo.gold_amount(x, y) > 0:
view[y, x] = self.state.mapInfo.gold_amount(x, y)
DQNState = view.flatten().tolist() #Flattening the map matrix to a vector
#DQNState.append(self.state.x)
#DQNState.append(self.state.y)
#DQNState.append(self.state.energy)
DQNState.append(self.info.posx)
DQNState.append(self.info.posy)
DQNState.append(self.info.energy)
for player in self.state.players:
# self.info.playerId is the id of the current bot
if player["playerId"] != self.info.playerId:
DQNState.append(player["posx"])
DQNState.append(player["posy"])
DQNState = np.array(DQNState)
return DQNState
def next_action(self):
s = self.get_state()
#return int(non_RL_agent03.greedy_policy(s))
return int(non_RL_agent.greedy_policy(s, how_gold=non_RL_agent.find_worthiest_gold))
#if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0:
# if self.info.energy >= 6:
# return self.ACTION_CRAFT
# else:
# return self.ACTION_FREE
#if self.info.energy < 5:
# return self.ACTION_FREE
#else:
# action = np.random.randint(0, 4)
# return action
def new_game(self, data):
try:
self.state.init_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def new_state(self, data):
# action = self.next_action();
# self.socket.send(action)
try:
self.state.update_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def get_score(self):
return [player["score"] for player in minerEnv.socket.bots[1].state.players if player["playerId"] == self.info.playerId][0]
class Bot3:
ACTION_GO_LEFT = 0
ACTION_GO_RIGHT = 1
ACTION_GO_UP = 2
ACTION_GO_DOWN = 3
ACTION_FREE = 4
ACTION_CRAFT = 5
def __init__(self, id):
self.state = State()
self.info = PlayerInfo(id)
def get_state(self):
view = np.zeros([self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1], dtype=int)
for x in range(self.state.mapInfo.max_x + 1):
for y in range(self.state.mapInfo.max_y + 1):
if self.state.mapInfo.get_obstacle(x, y) == TreeID: # Tree
view[y, x] = -TreeID
if self.state.mapInfo.get_obstacle(x, y) == TrapID: # Trap
view[y, x] = -TrapID
if self.state.mapInfo.get_obstacle(x, y) == SwampID: # Swamp
view[y, x] = -SwampID
if self.state.mapInfo.gold_amount(x, y) > 0:
view[y, x] = self.state.mapInfo.gold_amount(x, y)
DQNState = view.flatten().tolist() #Flattening the map matrix to a vector
#DQNState.append(self.state.x)
#DQNState.append(self.state.y)
#DQNState.append(self.state.energy)
DQNState.append(self.info.posx)
DQNState.append(self.info.posy)
DQNState.append(self.info.energy)
for player in self.state.players:
# self.info.playerId is the id of the current bot
if player["playerId"] != self.info.playerId:
DQNState.append(player["posx"])
DQNState.append(player["posy"])
DQNState = np.array(DQNState)
return DQNState
def next_action(self):
s = self.get_state()
return int(non_RL_agent02.greedy_policy(s))
#if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0:
# if self.info.energy >= 6:
# return self.ACTION_CRAFT
# else:
# return self.ACTION_FREE
#if self.info.energy < 5:
# return self.ACTION_FREE
#else:
# action = self.ACTION_GO_LEFT
# if self.info.posx % 2 == 0:
# if self.info.posy < self.state.mapInfo.max_y:
# action = self.ACTION_GO_DOWN
# else:
# if self.info.posy > 0:
# action = self.ACTION_GO_UP
# else:
# action = self.ACTION_GO_RIGHT
# return action
def new_game(self, data):
try:
self.state.init_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def new_state(self, data):
# action = self.next_action();
# self.socket.send(action)
try:
self.state.update_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def get_score(self):
return [player["score"] for player in minerEnv.socket.bots[1].state.players if player["playerId"] == self.info.playerId][0]
#MinerState.py
def str_2_json(str):
return json.loads(str, encoding="utf-8")
class MapInfo:
def __init__(self):
self.max_x = 0 #Width of the map
self.max_y = 0 #Height of the map
self.golds = [] #List of the golds in the map
self.obstacles = []
self.numberOfPlayers = 0
self.maxStep = 0 #The maximum number of step is set for this map
def init_map(self, gameInfo):
#Initialize the map at the begining of each episode
self.max_x = gameInfo["width"] - 1
self.max_y = gameInfo["height"] - 1
self.golds = gameInfo["golds"]
self.obstacles = gameInfo["obstacles"]
self.maxStep = gameInfo["steps"]
self.numberOfPlayers = gameInfo["numberOfPlayers"]
def update(self, golds, changedObstacles):
#Update the map after every step
self.golds = golds
for cob in changedObstacles:
newOb = True
for ob in self.obstacles:
if cob["posx"] == ob["posx"] and cob["posy"] == ob["posy"]:
newOb = False
#print("cell(", cob["posx"], ",", cob["posy"], ") change type from: ", ob["type"], " -> ",
# cob["type"], " / value: ", ob["value"], " -> ", cob["value"])
ob["type"] = cob["type"]
ob["value"] = cob["value"]
break
if newOb:
self.obstacles.append(cob)
#print("new obstacle: ", cob["posx"], ",", cob["posy"], ", type = ", cob["type"], ", value = ",
# cob["value"])
def get_min_x(self):
return min([cell["posx"] for cell in self.golds])
def get_max_x(self):
return max([cell["posx"] for cell in self.golds])
def get_min_y(self):
return min([cell["posy"] for cell in self.golds])
def get_max_y(self):
return max([cell["posy"] for cell in self.golds])
def is_row_has_gold(self, y):
return y in [cell["posy"] for cell in self.golds]
def is_column_has_gold(self, x):
return x in [cell["posx"] for cell in self.golds]
def gold_amount(self, x, y): #Get the amount of golds at cell (x,y)
for cell in self.golds:
if x == cell["posx"] and y == cell["posy"]:
return cell["amount"]
return 0
def get_obstacle(self, x, y): # Get the kind of the obstacle at cell(x,y)
for cell in self.obstacles:
if x == cell["posx"] and y == cell["posy"]:
return cell["type"]
return -1 # No obstacle at the cell (x,y)
class State:
STATUS_PLAYING = 0
STATUS_ELIMINATED_WENT_OUT_MAP = 1
STATUS_ELIMINATED_OUT_OF_ENERGY = 2
STATUS_ELIMINATED_INVALID_ACTION = 3
STATUS_STOP_EMPTY_GOLD = 4
STATUS_STOP_END_STEP = 5
def __init__(self):
self.end = False
self.score = 0
self.lastAction = None
self.id = 0
self.x = 0
self.y = 0
self.energy = 0
self.energy_pre = 0
self.mapInfo = MapInfo()
self.players = []
self.stepCount = 0
self.status = State.STATUS_PLAYING
def init_state(self, data): #parse data from server into object
game_info = str_2_json(data)
self.end = False
self.score = 0
self.lastAction = None
self.id = game_info["playerId"]
self.x = game_info["posx"]
self.y = game_info["posy"]
self.energy = game_info["energy"]
self.mapInfo.init_map(game_info["gameinfo"])
self.stepCount = 0
self.status = State.STATUS_PLAYING
self.players = [{"playerId": 2, "posx": self.x, "posy": self.y},
{"playerId": 3, "posx": self.x, "posy": self.y},
{"playerId": 4, "posx": self.x, "posy": self.y}]
def update_state(self, data):
new_state = str_2_json(data)
for player in new_state["players"]:
if player["playerId"] == self.id:
self.x = player["posx"]
self.y = player["posy"]
self.energy_pre = self.energy
self.energy = player["energy"]
self.score = player["score"]
self.lastAction = player["lastAction"]
self.status = player["status"]
self.mapInfo.update(new_state["golds"], new_state["changedObstacles"])
self.players = new_state["players"]
for i in range(len(self.players), 4, 1):
self.players.append({"playerId": i, "posx": self.x, "posy": self.y})
self.stepCount = self.stepCount + 1
#MinerEnv.py
TreeID = 1
TrapID = 2
SwampID = 3
class MinerEnv:
def __init__(self):
self.socket = GameSocket()
self.state = State()
self.score_pre = self.state.score
def start(self): #connect to server
self.socket.connect()
def end(self): #disconnect server
self.socket.close()
def send_map_info(self, request):#tell server which map to run
self.socket.send(request)
def reset(self): #start new game
try:
message = self.socket.receive() #receive game info from server
self.state.init_state(message) #init state
except Exception as e:
import traceback
traceback.print_exc()
def step(self, action): #step process
self.socket.send(action) #send action to server
try:
message = self.socket.receive() #receive new state from server
self.state.update_state(message) #update to local state
except Exception as e:
import traceback
traceback.print_exc()
def get_state(self):
"""
Fuse `view` and `energyOnMap` into a single matrix to have a simple and concise state/observation.
We want a matrix showing the following:
`gold`: The amount of gold
`all the others`: The energy that each type of terrain is going to take if being stepped into, e.g.
`land` => -1, `trap` => -10, etc.
"""
view = np.zeros([self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1], dtype=int)
for x in range(self.state.mapInfo.max_x + 1):
for y in range(self.state.mapInfo.max_y + 1):
if self.state.mapInfo.get_obstacle(x, y) == TreeID: # Tree
view[y, x] = -TreeID
if self.state.mapInfo.get_obstacle(x, y) == TrapID: # Trap
view[y, x] = -TrapID
if self.state.mapInfo.get_obstacle(x, y) == SwampID: # Swamp
view[y, x] = -SwampID
if self.state.mapInfo.gold_amount(x, y) > 0:
view[y, x] = self.state.mapInfo.gold_amount(x, y)
energyOnMap = np.array(self.socket.energyOnMap)
# `view` will contribute only to the type of terrain of `gold`
view[view <= 0] = -9999 # Just a dummy large negative number to be got rid of later
# `energyOnMap` will contribute to the types of terrain of `land`, `trap`, `forest` and `swamp`.
# Recall. `forest` was designated by BTC to the value of 0, to mean random integer btw [5..20].
energyOnMap[energyOnMap == 0] = - constants.forest_energy
channel0 = np.maximum(view, energyOnMap)
# Finish channel 0
# Channel 1 will contain the position of the agent
channel1 = np.zeros_like(channel0)
x_agent_out_of_map = self.state.x < 0 or self.state.x >= constants.width
y_agent_out_of_map = self.state.y < 0 or self.state.y >= constants.height
if x_agent_out_of_map or y_agent_out_of_map:
pass
else:
channel1[self.state.y, self.state.x] = self.state.energy
state = np.stack((channel0, channel1), axis=-1)
return state
def get_reward(self):
# Initialize reward
reward = 0
if self.state.status == constants.agent_state_str2id["out_of_MAP"]:
#if self.state.stepCount < 50:
# reward += -5*(50 - self.state.stepCount)
reward -= 1000
#elif self.state.status == constants.agent_state_str2id["no_more_STEP"]:
# #reward += (self.state.score/total_gold) * 100
# pass
elif self.state.status == constants.agent_state_str2id["no_more_ENERGY"]:
reward -= 300
#elif self.state.status == constants.agent_state_str2id["no_more_GOLD"]:
# pass
#elif self.state.status == constants.agent_state_str2id["INVALID_action"]:
# pass
else: # Here below: we are almost sure that agent is not out of map
s = self.get_state()
try:
terrain_now = s[self.state.y, self.state.x, 0]
except Exception as e:
print(f"{e}")
print(f"self.state.x, self.state.y = {self.state.x}, {self.state.y} ")
raise e
pos_now = np.array([self.state.x, self.state.y])
reverse_mv = constants.action_id2ndarray[constants.reverse_action_id[self.state.lastAction]]
pos_pre = pos_now + reverse_mv
x_pre, y_pre = pos_pre
terrain_pre = s[y_pre, x_pre, 0]
# Punish `dig on obstacle`
if self.state.lastAction == constants.dig:
if terrain_now < 0:
reward -= 100
elif terrain_now > 0:
score_action = self.state.score - self.score_pre
reward += score_action
self.score_pre = self.state.score
if self.state.lastAction in (constants.up, constants.down, constants.left, constants.right,): # i.e. if agent moved
if terrain_pre > 100: # punish leaving gold
reward -= terrain_pre
if terrain_now > 0: # entering gold
if self.state.energy > constants.punishments["gold"]:
reward += 50
else:
reward -= 100
if terrain_now < 0: # punish according to terrain_now
reward += terrain_now
if terrain_now == -100: # i.e. fatal swamp
reward -= 500
if self.state.lastAction == constants.rest:
if self.state.energy_pre >= 40:
reward -= 200
if self.state.energy_pre <= 5:
reward += 20
if self.state.status == constants.agent_state_str2id["PLAYing"]:
reward += 1
return reward
def check_terminate(self):
#Checking the status of the game
#it indicates the game ends or is playing
return self.state.status != State.STATUS_PLAYING
Maps = [constants.maps[i] for i in range(1, 6)]
env = MinerEnv() # Creating a communication environment between the DQN model and the game environment
env.start() # Connect to the game
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
tf.random.set_seed(42)
np.random.seed(42)
#input_shape = [constants.height, constants.width, 1+4]
input_shape = [constants.height, constants.width, 1+1]
n_outputs = 6
model = keras.models.Sequential([
Conv2D(4, 3, activation="relu", padding="same", input_shape=input_shape),
#MaxPooling2D(2),
Conv2D(8, 3, activation="relu", padding="same"),
#Conv2D(128, 3, activation="relu", padding="same"),
#MaxPooling2D(2),
Flatten(),
#Dense(128, activation="elu"),
Dense(128, activation="elu"),
Dense(64, activation="elu"),
Dense(32, activation="elu"),
Dense(n_outputs)
])
#h5 = "models/30_11_dDQN_light_tweak14/avg-1785.00-episode-11155-30_11_dDQN_light_tweak14-gold-1800-step-100-20200827-0903.h5"
#model = keras.models.load_model(h5)
target = keras.models.clone_model(model)
target.set_weights(model.get_weights())
from collections import deque
replay_memory = deque(maxlen=max_replay_len)
def sample_experiences(batch_size):
indices = np.random.randint(len(replay_memory), size=batch_size)
batch = [replay_memory[index] for index in indices]
states, actions, rewards, next_states, dones = [
np.array([experience[field_index] for experience in batch])
for field_index in range(5)]
return states, actions, rewards, next_states, dones
def epsilon_greedy_policy(state, epsilon=0, n_actions=6):
if np.random.rand() < epsilon:
return np.random.randint(n_actions)
else:
#pictorial = pictorial_state(state)
#Q_values = model.predict(pictorial[np.newaxis])
Q_values = model.predict(state[np.newaxis])
return np.argmax(Q_values[0])
def play_one_step(env, state, epsilon):
action = epsilon_greedy_policy(state, epsilon)
#next_state, reward, done, info = env.step(action)
env.step(str(action))
next_state = env.get_state()
reward = env.get_reward()
done = env.check_terminate()
replay_memory.append((state, action, reward, next_state, done))
return next_state, reward, done
#optimizer = keras.optimizers.Adam(lr=1e-3)
#optimizer = keras.optimizers.Adam(lr=2.5e-4)
optimizer = keras.optimizers.Adam(lr=lr_optimizer)
def training_step(batch_size):
experiences = sample_experiences(batch_size)
states, actions, rewards, next_states, dones = experiences
#pictorials = np.array([pictorial_state(s) for s in states])
#next_pictorials = np.array([pictorial_state(next_s) for next_s in next_states])
#next_Q_values = model.predict(next_pictorials)
next_Q_values = model.predict(next_states)
#max_next_Q_values = np.max(next_Q_values, axis=1)
best_next_actions = np.argmax(next_Q_values, axis=1)
next_mask = tf.one_hot(best_next_actions, n_outputs).numpy()
next_best_Q_values = (target.predict(next_states) * next_mask).sum(axis=1)
#target_Q_values = rewards + (1 - dones) * discount_rate * max_next_Q_values
target_Q_values = rewards + (1 - dones) * discount_rate * next_best_Q_values
target_Q_values = target_Q_values.reshape(-1, 1)
mask = tf.one_hot(actions, n_outputs)
with tf.GradientTape() as tape:
#all_Q_values = model(pictorials)
all_Q_values = model(states)
Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)
loss = tf.reduce_mean(loss_fn(target_Q_values, Q_values))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
np.random.seed(42)
tf.random.set_seed(42)
from constants import n_allowed_steps
now = datetime.datetime.now()
now_str = now.strftime("%Y%m%d-%H%M")
script_name = __file__.split('.')[0]
save_path = os.path.join("models", script_name)
os.makedirs(save_path, exist_ok=True)
scores = []
scores_avg = []
best_score = 0
k = 10
scores_k_most_recent = deque([0]*k, maxlen=k)
best_score_avg = 1400
with open(os.path.join(save_path, f"log-{now_str}.txt"), 'w') as log:
for episode in range(n_episodes):
mapID = np.random.randint(0, 5)
posID_x = np.random.randint(constants.width)
posID_y = np.random.randint(constants.height)
request = "map{},{},{},50,100".format(mapID, posID_x, posID_y)
env.send_map_info(request)
env.reset()
obs = env.get_state()
undiscounted_return = 0
for step in range(n_allowed_steps):
epsilon = max(1 - episode / n_epsilon_decay, 0.01)
obs, reward, done = play_one_step(env, obs, epsilon)
undiscounted_return += reward
if done:
break
score = env.state.score
scores.append(score)
scores_k_most_recent.append(score)
#score_avg = np.mean(scores_k_most_recent)
score_avg = round(np.mean(scores_k_most_recent), 1)
scores_avg.append(score_avg)
#if score > best_score:
if score_avg > best_score_avg:
#best_weights = model.get_weights()
best_score_avg = score_avg
#best_score = score
#model.save(os.path.join(save_path, f"episode-{episode+1}-gold-{env.state.score}-avg-{score_avg:4.2f}-step-{step+1}-{now_str}.h5"))
model.save(os.path.join(save_path, f"avg-{score_avg:07.2f}-episode-{episode+1}-{__file__.split('.')[0]}-gold-{env.state.score}-step-{step+1}-{now_str}.h5"))
#message = "(Episode {: 5d}/{}) Gold {: 4d} avg {: 8.2f} undisc_return {: 6d} step {: 3d} eps {:.2f} ({})\n".format(episode+1, n_episodes, env.state.score, score_avg, undiscounted_return, step + 1, epsilon, constants.agent_state_id2str[env.state.status])
message = "(Episode {: 5d}/{}) Gold {: 4d} avg {: 8.1f} undisc_return {: 6d} step {: 3d} eps: {:.2f} ({})\n".format(episode+1, n_episodes, env.state.score, score_avg, undiscounted_return, step + 1, epsilon, constants.agent_state_id2str[env.state.status])
##############################################
#score = env.state.score*(n_allowed_steps - step)
#score = env.state.score
#scores.append(score)
#if score > best_score:
# #best_weights = model.get_weights()
# best_score = score
# model.save(os.path.join(save_path, f"episode-{episode+1}-gold-{env.state.score}-step-{step+1}-{now_str}.h5"))
#message = "(Episode {: 5d}/{}) Gold: {: 4d} undiscounted_return: {: 6d} Steps: {: 3d} eps: {:.3f} ({})\n".format(episode+1, n_episodes, env.state.score, undiscounted_return, step + 1, epsilon, constants.agent_state_id2str[env.state.status])
print(message, end='')
log.write(message)
#if episode > 500:
if episode > n_episodes_buf_fill:
training_step(batch_size)
if episode % n_episodes_buf_fill == 0:
target.set_weights(model.get_weights())
#np.save(f"scores-{now_str}", np.array(scores))
#np.save(f"scores-N-scores_avg-{now_str}", np.array([scores, scores_avg]))
np.save(f"scores-N-scores_avg-{__file__.split('.')[0]}-{now_str}", np.array([scores, scores_avg]))
|
the-stack_106_14099
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
UNDERLINE_CHARS = ['-', '`', ':', '~', '^', '_', '*', '+', '#', '<', '>'] # Characters that can underline title
SDC_USR_GUIDE_HEADING_STR = 'Intel Scalable Dataframe Compiler User Guide'
SDC_USER_GUIDE_PANDAS_STR = 'Pandas API:'
SDC_DEV_GUIDE_HEADING_STR = 'Intel Scalable Dataframe Compiler Developer Guide'
def get_indent(text):
"""
Returns indentation for a given ``text``.
:param text: String, can be multi-line. Only first non-empty line is used to determine the indentation
:return: Indentation (the number of whitespace characters)
"""
lines = text.split('\n')
while len(lines) > 0 and lines[0] == '':
lines.pop(0)
if len(lines) == 0:
return 0 # Text was empty, indentation for empty text is 0
n_stripped = len(lines[0].lstrip()) # Length of the string after stripping whitespaces on the left
return len(lines[0]) - n_stripped
def reindent(old_text, new_indent):
"""
Perform re-indentation of the text ``old_text`` with new indent ``new_indent``.
:param old_text: Multi-line string for which re-indentation is performed
:param new_indent: New indent
:return: New multi-line text
"""
if old_text == '':
return ' '*new_indent
old_indent = get_indent(old_text)
lines = old_text.split('\n')
new_text = ''
for line in lines:
if line.strip() == '':
new_text += '\n'
else:
line = line[old_indent:]
new_text += ' '*new_indent + line + '\n'
# If ``old_text`` has no ``'\n'`` in the end, remove it too from the ``new_text``
if old_text[-1] != '\n':
new_text = new_text[:-1]
return new_text
def create_heading_str(title, underlying_symbol='-'):
"""
Creates heading string for a given ``title``. Second line under title is decorated with ``underlying_symbol``
Heading is created taking into account of ``title`` indentation.
:param title:
:param underlying_symbol:
:return: resulting heading string
"""
indent = get_indent(title)
n = len(title.strip())
return title + '\n' + ' '*indent + underlying_symbol*n
def get_docstring(obj):
"""
Returns docstring for a given object or empty string if no-object is provided or there is no docstring for it.
:param obj: Object for which the docstring to be provided
:return: Docstring
"""
if obj is None:
return ''
doc = obj.__doc__
if doc is None:
return ''
else:
return doc
def is_section_title(line, underline):
"""
Checks whether line and consecutive underline form valid section title.
.. note::
Function expects leading and trailing whitespaces removed for both strings prior to the call.
:param line: String, title text
:param underline: String, underlying characters
:return: True if line and underline form valid section title
"""
if line is None:
return False
if underline is None:
return False
if line == '':
return False
if underline == '':
return False
n = len(line)
for c in UNDERLINE_CHARS:
s = c * n
if underline == s:
return True
return False
def is_sdc_user_guide_header(sdc_header):
"""
Checks whether a given title-text tuple forms valid Intel SDC header for User Guide.
The header is expected to be 4 lines long, where the first three lines are of the form:
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: <pandas API name>
The fourth line must be empty
:param sdc_header: Tuple (title, text)
:return: True if sdc_header forms valid Intel SDC User Guide docstring header
"""
title, text = sdc_header
return title.strip() == SDC_USR_GUIDE_HEADING_STR and text.strip().startswith(SDC_USER_GUIDE_PANDAS_STR)
def is_sdc_dev_guide_header(sdc_header):
"""
Checks whether a given title-text tuple forms valid Intel SDC header for Developer Guide.
The header is expected to be 3 lines long, where the first two lines are of the form:
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
The third line must be empty
:param sdc_header: Tuple (title, text)
:return: True if sdc_header forms valid Intel SDC Developer Guide docstring header
"""
title, text = sdc_header
return title.strip() == SDC_DEV_GUIDE_HEADING_STR
def extract_pandas_name_from(text):
"""
Extracts Pandas API from ``text``.
This function is used in conjunction with :func:`split_title`, which returns the tuple (title, text).
The ``title`` must contain valid Intel SDC header. The ``text`` is expected to be in the form
``Pandas API: *fully qualified Pandas name*``
:param text:
:return: Pandas API name as a string
"""
line = text.strip().split('\n', 1)[0] # Pandas API is in the first line. Ignore whitespaces
return line.replace(SDC_USER_GUIDE_PANDAS_STR, '').strip() # Name begins right after ``Pandas API:``
def split_title(section):
"""
Split section into title and remaining text.
:param section: String, documented section
:return: Tuple (title, text)
"""
if section is None:
return '', ''
section = section.lstrip('\n') # Remove leading empty lines
lines = section.split('\n', 2)
if len(lines) > 1:
# Only sections with number of lines >= 2 can be a title
if is_section_title(lines[0].strip(), lines[1].strip()):
if len(lines) > 2:
return lines[0], lines[2] # First line is title, second is underline, remaining is text
else:
return lines[0], '' # First line is title, second line is underline, but the text is empty string
else:
return '', section # First two lines do not form valid heading
else:
return '', section # When section is less than 3 lines we consider it having no title
def _merge_paragraphs_within_section(sections):
"""
Internal utility function that merges paragraphs into a single section.
This function call is required after initial splitting of the docstring into sections. The initial split
is based on the presence of ``'\n\n'``, which separates sections and paragraphs. The difference between
section and paragraph is that section starts with the title of the form:
This is title
-------------
This is the first paragraph. It may be multi-line.
This is the second line of the paragraph.
This is another multi-line paragraph.
This is the second line of the paragraph.
Special treatment is required for Intel SDC header section and the following description section. Intel SDC
header section must the the first one in the docstring. It consists of exactly 3 lines:
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: *pandas_api_fully_qualified_name*
Right after the Intel SDC header section the description section (if any) goes. It generally consists of two
or more paragraphs. The first paragraph represents short description, which is typically single line.
The following paragraphs provide full description. In rare cases documentation does not have description section,
and this must be treated accordingly.
:param sections: List of tuples ``(title, text)``.
:return: Reformatted list of tuples ``(title, text)`, where paragraphs belonging to one section are merged in
single ``text`` item.
"""
if len(sections) == 0:
return sections
merged_sections = []
# Check if the very first section is Intel SDC header
section_title, section_text = sections[0]
if is_sdc_user_guide_header((section_title, section_text)):
merged_sections.append(sections[0])
sections.pop(0)
# Check if the next section is the short description
section_title, section_text = sections[0]
if section_title.strip() == '':
merged_sections.append(sections[0])
sections.pop(0)
if len(sections) == 0:
return merged_sections
# Merge next sections with empty title into a single section representing full description
section_title, section_text = sections[0]
if section_title.strip() == '':
sections.pop(0)
while len(sections) > 0:
title, text = sections[0]
if title.strip() == '':
section_text += '\n\n' + text
sections.pop(0)
else:
break
merged_sections.append((section_title, section_text))
# Now merge paragraphs of remaining titled sections
while len(sections) > 0:
section_title, section_text = sections[0]
sections.pop(0)
while len(sections) > 0:
title, text = sections[0]
if title.strip() == '':
section_text += '\n\n' + text
sections.pop(0)
else:
break
merged_sections.append((section_title, section_text))
return merged_sections
def split_in_sections(doc):
"""
Splits the doc string into sections
Each section is separated by empty line. Sections can start with headers or without. Each header follows NumPy
style:
Section Title
-------------
Other permitted characters can be used to underline section title
:param doc: Docstring to be split into sections
:return: List, sections of the doc. Each section is a tuple of strings (title, text)
:seealso: NumPy style `example
<https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html#example-numpy>`_
"""
doc = reindent(doc, 0)
sections = doc.split('\n\n') # Sections are separated by empty lines
titled_sections = []
while len(sections) > 0:
title, text = split_title(sections[0])
sections.pop(0)
titled_sections.append((title, text))
return _merge_paragraphs_within_section(titled_sections)
def get_short_description(obj, sdc_header_flag=False):
"""
Returns short description for a given object obj
:param obj: Object for which short description needs to be returned
:param sdc_header_flag: Flag indicating that the first three lines must be considered as Intel SDC header
:return: String, short description
:raises: NameError, when ``sdc_header_flag==True`` and no Intel SDC header section found.
The header is expected to be 4 lines long, where the first three lines are of the form:
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: <pandas API name>
The fourth line must be empty
"""
doc = get_docstring(obj)
if doc == '':
return doc
sections = split_in_sections(doc) # tuple (title, text)
if sdc_header_flag:
if len(sections) > 1: # There must be at least one more section after Intel SDC header section
if not is_sdc_user_guide_header(sections[0]):
raise NameError('No Intel SDC header section found')
sections.pop(0) # Ignore Intel SDC header section
if len(sections) == 0:
return '' # Docstring has no sections, i.e. short description is absent
title, text = sections[0] # Short description is the first section of the docstring
text = text.strip()
lines = text.split('\n')
lines = [line.strip() for line in lines]
lines = ' '.join(lines)
return lines
def cut_sdc_dev_guide(doc):
"""
Removes Intel SDC Developer Guide related sections from the docstring.
It is assumed that Developer Guide docstring follows the User Guide related sections of the docstring.
Everything after section the titled *Intel Scalable Dataframe Compiler Developer Guide* is cut
:param doc: Docstring that includes User Guide and the following Developer Guide sections
:return: Docstring with the cut Developer Guide sections
"""
sections = split_in_sections(doc) # tuple (title, text)
trimmed_sections = []
while len(sections) > 0:
if is_sdc_dev_guide_header(sections[0]):
break
trimmed_sections.append(sections[0])
sections.pop(0)
return trimmed_sections
|
the-stack_106_14101
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
'''
Script to visualize permeability data in the O2/N2, CO2/CH4, CO2/N2, and H2/CO2 Robeson spaces.
Data must be a csv file with 6 columns in the order of ['He','H2','O2','N2','CO2','CH4'] permeabilities.
Update the filelist list with paths to each of the .csv files that are desired.
Works with the .csv outputs of screen.py and train.py.
'''
#change the paths to the files that are desired to be plotted
filelist= ['models/DNN_BLR_fing/Y_pred_datasetBX_fing.csv', 'models/DNN_BLR_fing/Y_pred_datasetCX_fing_0.csv', 'models/DNN_BLR_fing2/Y_pred_datasetDX_fing.csv', 'models/RF_BLR_fing/Y_train.csv']
sns.set_palette("colorblind")
#plot permeability values on a Robeson plot
fig = plt.figure(figsize=(12,8))
for i, dataset in enumerate(filelist[:-1]):
#read in the permeability data for each dataset
Y_pred = pd.read_csv(dataset, header=None)
Y_pred = np.array(Y_pred)
#O2/N2 separations
plt.subplot(2,2,1)
permeability = Y_pred[:,-4]
selectivity = Y_pred[:,-4] - Y_pred[:,-3]
plt.plot(permeability, selectivity, '.', alpha = 0.2)
#CO2/CH4 separations
plt.subplot(2,2,2)
permeability = Y_pred[:,-2]
selectivity = Y_pred[:,-2] - Y_pred[:,-1]
plt.plot(permeability, selectivity, '.', alpha=0.2)
#CO2/N2 separations
plt.subplot(2,2,3)
permeability = Y_pred[:,-2]
selectivity = Y_pred[:,-2] - Y_pred[:,-3]
plt.plot(permeability, selectivity, '.', alpha=0.2)
#H2/CO2 separations
plt.subplot(2,2,4)
permeability = Y_pred[:,-5]
selectivity = Y_pred[:,-5] - Y_pred[:,-2]
plt.plot(permeability, selectivity, '.', alpha=0.2)
#read in the permeability data for each dataset
Y_pred = pd.read_csv(filelist[-1], header=None)
Y_pred = np.array(Y_pred)
#O2/N2 separations
plt.subplot(2,2,1)
permeability = Y_pred[:,-4]
selectivity = Y_pred[:,-4] - Y_pred[:,-3]
plt.plot(permeability, selectivity, '.', markersize=3, color='k')
#CO2/CH4 separations
plt.subplot(2,2,2)
permeability = Y_pred[:,-2]
selectivity = Y_pred[:,-2] - Y_pred[:,-1]
plt.plot(permeability, selectivity, '.', markersize=3, color='k')
#CO2/N2 separations
plt.subplot(2,2,3)
permeability = Y_pred[:,-2]
selectivity = Y_pred[:,-2] - Y_pred[:,-3]
plt.plot(permeability, selectivity, '.', markersize=3, color='k')
#H2/CO2 separations
plt.subplot(2,2,4)
permeability = Y_pred[:,-5]
selectivity = Y_pred[:,-5] - Y_pred[:,-2]
plt.plot(permeability, selectivity, '.', markersize=3, color='k')
#format the plot and add Robeson upper bounds
plt.subplot(2,2,1)
xmin = -4
xmax = 7
plt.xlim([xmin, xmax])
plt.ylim([-1, 2])
plt.plot([xmin, xmax], [np.log10(9.2008)-0.1724*xmin, np.log10(9.2008)-0.1724*xmax], '-k') #1991 upper bound
plt.plot([xmin, xmax], [np.log10(12.148)-0.1765*xmin, np.log10(12.148)-0.1765*xmax], '--k') #2008 upper bound
plt.plot([xmin, xmax], [np.log10(18.50)-0.1754*xmin, np.log10(18.50)-0.1754*xmax], ':k') #2015 upper bound
plt.title("O2/N2 Separations")
#plt.legend(filelist)
plt.subplot(2,2,2)
xmin = -2
xmax =7
plt.xlim([xmin, xmax])
plt.ylim([-2, 4])
plt.plot([xmin, xmax], [np.log10(197.81)-0.3807*xmin, np.log10(197.81)-0.3807*xmax], '-k') #1991 upper bound
plt.plot([xmin, xmax], [np.log10(357.33)-0.3794*xmin, np.log10(357.33)-0.3794*xmax], '--k') #2008 upper bound
plt.plot([xmin, xmax], [np.log10(1155.60)-0.4165*xmin, np.log10(1155.60)-0.4165*xmax], ':k') #2019 upper bound
plt.title('CO2/CH4 Separations')
plt.subplot(2,2,3)
xmin = -2
xmax =7
plt.xlim([xmin, xmax])
plt.ylim([-1, 3])
plt.plot([xmin, xmax], -1/2.888*np.array([-np.log10(30967000)+xmin, -np.log10(30967000)+xmax]), '--k') #2008 upper bound
plt.plot([xmin, xmax], -1/3.409*np.array([-np.log10(755.58e6)+xmin, -np.log10(755.58e6)+xmax]), ':k') #2019 upper bound
plt.title('CO2/N2 Separations')
plt.subplot(2,2,4)
xmin = -2
xmax =7
plt.xlim([xmin, xmax])
plt.ylim([-1.5, 2])
plt.plot([xmin, xmax], -1/1.9363*np.array([-np.log10(1200)+xmin, -np.log10(1200)+xmax]), '-k') #1991 upper bound
plt.plot([xmin, xmax], -1/2.302*np.array([-np.log10(4515)+xmin, -np.log10(4515)+xmax]), '--k') #2008 upper bound
plt.title('H2/CO2 Separations')
plt.show()
|
the-stack_106_14102
|
import discord
from discord.ext import commands
# Info category
class Music(commands.Cog):
def __init__(self, nep):
self.nep = nep
self.util = nep.get_cog('Utils')
# Ping command
@commands.command(
name='queue',
aliases=['q'],
description='Shows and manipulates the server\'s queue'
)
async def queue(self, c, *, args):
# List of acceptable flags
acceptable_flags = {
'list': ['sq', 'li', 'show'],
'remove': ['rm']
}
correct_flag = self.util.get_queue_flags(args, acceptable_flags)
# Check for correct flags
if correct_flag == None:
return self.util.error(c, 'Unvalid arguments', f'Acceptable flags are: {acceptable_flags}')
await c.send(f'{args}\n{correct_flag}')
def setup(nep):
nep.add_cog(Music(nep))
|
the-stack_106_14103
|
import inspect
import rubrik_cdm
import os
import sys
import reprlib
def print_doc_string(doc_string, section):
if section is 'arguments':
for line in doc_string:
line = line.replace(' -- ', '').strip().split('}', 1)
value_type = line[0].split('{', 1)
if value_type[0] is not '':
function_name = value_type[0]
python_type = value_type[1]
description = line[1]
# Name | Type | Description | Choices |
if '(choices: {' in description:
choices = description.split("(choices: {")
choices = choices[1].replace("})", "").strip()
description = description.split("(choices: {")
description = description[0]
else:
choices = ''
markdown.write('| {} | {} | {} | {} |\n'.format(
function_name, python_type, description, choices))
elif section is 'keyword_arguments':
for line in doc_string:
line = line.replace(' -- ', '').strip().split('}', 1)
value_type = line[0].split('{', 1)
if value_type[0] is not '':
name = value_type[0]
python_type = value_type[1]
description = line[1]
# Name | Type | Description | Choices | Default
# (default: {'latest'})
if '(default: {' in description:
default = description.split("(default: {")
default = default[1].replace("})", "").strip()
if "(choices:" in default:
default = default.split('(choices')
default = default[0]
default = default.replace("'", "").replace('"', '')
else:
default = ''
if '(choices: {' in description:
choices = description.split("(choices: {")
choices = choices[1].replace("})", "").strip()
choices = choices.replace("'", "").replace('"', '')
else:
choices = ''
if '(default: {' in description:
description = description.split("(default: {")
description = description[0]
markdown.write('| {} | {} | {} | {} | {} |\n'.format(
name, python_type, description, choices, default))
elif section is 'description':
for line in doc_string:
markdown.write(line)
markdown.write('\n')
elif section is 'returns':
markdown.write(
'| Type | Return Value |\n')
markdown.write(
'|------|-----------------------------------------------------------------------------------------------|\n')
for line in doc_string:
line = line.strip().split(' -- ', 1)
if line[0] is not '':
markdown.write('| {} | {} |\n'.format(line[0], line[1]))
def doc_string_description(doc_string):
for index, line in enumerate(doc_string):
if not line.strip():
return index
def doc_string_ending(doc_string):
ending_lines = []
for index, line in enumerate(doc_string):
if not line.strip():
ending_lines.append(index)
return ending_lines
def doc_string_end_block(ending_lines, starting_line):
for index in ending_lines:
if index > starting_line:
return index
def example_code(function_name):
print()
connect_functions = inspect.getmembers(rubrik_cdm.Connect, inspect.isfunction)
bootstrap_functions_all = inspect.getmembers(rubrik_cdm.Bootstrap, inspect.isfunction)
bootstrap_functions = []
for function in bootstrap_functions_all:
if 'setup_cluster' in function or 'status' in function and 'job_status' not in function:
bootstrap_functions.append(function)
rubrk_sdk_functions = connect_functions + bootstrap_functions
# rubrk_sdk_functions = connect_functions + bootstrap_functions
# rubrk_sdk_functions = inspect.getmembers(rubrik.Connect, inspect.isfunction)
function_examples = {}
function_documentation = {}
for function in rubrk_sdk_functions:
function_documentation[function[0]] = function[1].__doc__
function_code = inspect.getsource(function[1])
if '@staticmethod' in function_code:
function_code = function_code.replace('@staticmethod\n', '')
function_code = function_code.splitlines()[0].replace(
'self, ', '').replace('self', '').replace(':', '').strip()
else:
function_code = function_code.splitlines()[0].replace(
'self, ', '').replace('self', '').replace(':', '').strip()
function_examples[function[0]] = function_code
for function_name, function_doc_string in function_documentation.items():
# print(function_name)
if 'init' not in function_name:
arguments_start = None
keyword_argument_start = None
return_start = None
arguments_present = False
arguments_present = False
markdown = open('{}.md'.format(function_name), 'w')
markdown.write('# {}\n\n'.format(function_name))
doc_string = function_documentation[function_name]
doc_string = doc_string.splitlines()
description_start = doc_string_description(doc_string)
ending_lines = doc_string_ending(doc_string)
for index, line in enumerate(doc_string):
if 'Arguments' in line and 'Keyword' not in line:
arguments_start = index
if 'Keyword Arguments' in line:
keyword_argument_start = index
if 'Returns' in line:
return_start = index
description = []
arguments = []
keyword_arguments = []
returns = []
# # Doc Sring Returns
for index, line in enumerate(doc_string):
# Parse the function description
if index < description_start:
if len(line) is not 0:
description.append(line.replace('\n', ''))
try:
if arguments_start:
argument_ending = doc_string_end_block(ending_lines, arguments_start)
if arguments_start + 1 <= index <= argument_ending - 1:
if len(line) is not 0:
arguments.append(line)
arguments_present = True
except NameError:
pass
try:
if keyword_argument_start:
keyword_argument_ending = doc_string_end_block(ending_lines, keyword_argument_start)
if keyword_argument_start + 1 <= index <= keyword_argument_ending - 1:
if len(line) is not 0:
keyword_arguments.append(line)
arguments_present = True
except NameError:
pass
try:
if return_start:
return_ending = doc_string_end_block(ending_lines, return_start)
if return_start + 1 <= index <= return_ending - 1:
if len(line) is not 0:
returns.append(line)
except NameError:
pass
if len(description) > 1:
# Combine the multi-line description into a single line
parse_description = ''.join(description).split()
parse_description = ' '.join(parse_description)
description = []
description.append(parse_description)
print_doc_string(description, 'description')
for function, function_code_example in function_examples.items():
if function_name is function:
markdown.write('```py\n')
markdown.write(function_code_example)
markdown.write('\n```\n\n')
if arguments:
markdown.write('## Arguments\n')
markdown.write(
'| Name | Type | Description | Choices |\n')
markdown.write(
'|-------------|------|-----------------------------------------------------------------------------|---------|\n')
print_doc_string(arguments, 'arguments')
if keyword_arguments:
markdown.write('## Keyword Arguments\n')
markdown.write(
'| Name | Type | Description | Choices | Default |\n')
markdown.write(
'|-------------|------|-----------------------------------------------------------------------------|---------|---------|\n')
print_doc_string(keyword_arguments, 'keyword_arguments')
if returns:
markdown.write('\n## Returns\n')
print_doc_string(returns, 'returns')
if function_name[0] is not '_':
with open("../sample/{}.py".format(function_name)) as code:
example_code = code.read()
markdown.write('## Example\n')
markdown.write("```py\n")
markdown.write(example_code)
markdown.write("```")
markdown.close()
base_api_functions_search = inspect.getmembers(rubrik_cdm.api.Api, inspect.isfunction)
base_api_functions = []
for function in base_api_functions_search:
# If first character of the function name...
base_api_functions.append(function[0])
del base_api_functions[0]
cluster_functions_search = inspect.getmembers(rubrik_cdm.cluster.Cluster, inspect.isfunction)
cluster_functions = []
for function in cluster_functions_search:
if function[0] not in base_api_functions:
cluster_functions.append(function[0])
for function in cluster_functions:
if function[0] is '_':
cluster_functions.remove(function)
data_management_search = inspect.getmembers(rubrik_cdm.data_management.Data_Management, inspect.isfunction)
data_management_functions = []
for function in data_management_search:
if function[0] not in base_api_functions:
data_management_functions.append(function[0])
for function in data_management_functions:
if function[0] is '_':
data_management_functions.remove(function)
physical_search = inspect.getmembers(rubrik_cdm.physical.Physical, inspect.isfunction)
physical_functions = []
for function in physical_search:
if function[0] not in base_api_functions:
physical_functions.append(function[0])
for function in physical_functions:
if function[0] is '_':
physical_functions.remove(function)
cloud_search = inspect.getmembers(rubrik_cdm.cloud.Cloud, inspect.isfunction)
cloud_functions = []
for function in cloud_search:
if function[0] not in base_api_functions:
cloud_functions.append(function[0])
for function in cloud_functions:
if function[0] is '_':
cloud_functions.remove(function)
combined_function_list = base_api_functions + cluster_functions + \
data_management_functions + physical_functions + cloud_functions
connect_functions_search = inspect.getmembers(rubrik_cdm.rubrik_cdm.Connect, inspect.isfunction)
connect_functions = []
for function in connect_functions_search:
if function[0] not in combined_function_list:
connect_functions.append(function[0])
del connect_functions[0]
bootstrap_functions_search = inspect.getmembers(rubrik_cdm.rubrik_cdm.Bootstrap, inspect.isfunction)
bootstrap_functions = []
for function in bootstrap_functions_search:
if function[0] not in combined_function_list:
bootstrap_functions.append(function[0])
del bootstrap_functions[0]
del bootstrap_functions[2]
# Create the SUMMARY (side navigation) Document
markdown = open('SUMMARY.md', 'w')
markdown.write('# Summary\n\n')
markdown.write('### Getting Started\n\n')
markdown.write('* [Quick Start](README.md)\n\n')
markdown.write('### Base API Calls\n')
for function in base_api_functions:
if function[0] is not '_':
markdown.write('* [{}]({}.md)\n'.format(function, function))
markdown.write('\n### Bootstrap Functions\n')
for function in bootstrap_functions:
if function[0] is not '_':
markdown.write('* [{}]({}.md)\n'.format(function, function))
markdown.write('\n### Cluster Functions\n')
for function in cluster_functions:
markdown.write('* [{}]({}.md)\n'.format(function, function))
markdown.write('\n### Cloud Functions\n')
for function in cloud_functions:
markdown.write('* [{}]({}.md)\n'.format(function, function))
markdown.write('\n### Data Management Functions\n')
for function in data_management_functions:
if function[0] is not '_':
markdown.write('* [{}]({}.md)\n'.format(function, function))
markdown.write('\n### Physical Host Functions\n')
for function in physical_functions:
if function[0] is not '_':
markdown.write('* [{}]({}.md)\n'.format(function, function))
markdown.write('\n### SDK Helper Functions\n')
for function in connect_functions:
if function[0] is not '_':
markdown.write('* [{}]({}.md)\n'.format(function, function))
markdown.write('\n### Internal Functions\n')
for function in connect_functions:
if function[0] is '_':
markdown.write('* [{}]({}.md)\n'.format(function, function))
for function in combined_function_list:
if function[0] is '_':
markdown.write('* [{}]({}.md)\n'.format(function, function))
markdown.close()
|
the-stack_106_14104
|
from adafruit_circuitplayground.express import circuit
import board
import neopixel
import time
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10)
while True:
temp = circuit.temperature
if temp >= 26.00:
pixels.fill((255,0,0))
elif temp < 26.00:
pixels.fill((0,0,255))
pixels.write()
time.sleep(0.5)
pixels.fill((0,0,0))
print("Temperatura: %.2f °C" %temp)
time.sleep(1)
|
the-stack_106_14105
|
#!/usr/bin/env python
from bacpypes.primitivedata import Date
year_group = ('04', '75', '1929', '255', '*')
month_group = ('1', '12', 'odd', 'even', '255', '*')
day_group = ('1', '22', 'last', 'odd', 'even', '255', '*')
dow_group = ('1', 'mon', '255', '*')
patterns = [
"%(year)s-%(month)s-%(day)s %(day_of_week)s",
"%(month)s/%(day)s/%(year)s %(day_of_week)s",
"%(day)s/%(month)s/%(year)s %(day_of_week)s",
]
def permutation(**kwargs):
for pattern in patterns:
test_string = pattern % kwargs
try:
test_date = Date(test_string)
test_value = test_date.value
except Exception as why:
test_value = str(why)
print(test_string + '\t' + str(test_value))
print("")
for year in year_group:
for month in month_group:
for day in day_group:
for day_of_week in dow_group:
permutation(
year=year, month=month, day=day, day_of_week=day_of_week,
)
|
the-stack_106_14107
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class GatewayRouteListResult(Model):
"""List of virtual network gateway routes.
:param value: List of gateway routes
:type value: list[~azure.mgmt.network.v2017_10_01.models.GatewayRoute]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[GatewayRoute]'},
}
def __init__(self, **kwargs):
super(GatewayRouteListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
|
the-stack_106_14108
|
from .tool_io import printWeights as pw
import numpy as np
def computeAverage(model, ast):
#print("computing ASTs' weights with the average weights of their words")
words=model.wv.index2word
#words=d2v_model.wv.index2word
#print(words)
size=len(model.wv.get_vector(words[0]))
myarray=np.zeros(size)
for i in range(len(ast)):
w=model.wv.get_vector(ast[i])
myarray=myarray+w
myarray=myarray/len(ast)
return myarray
def computeAverageWithBest10(model, ast):
#print("computing ASTs' weights with the average weights of their words")
words=model.wv.index2word
#words=d2v_model.wv.index2word
#print(words)
size=len(model.wv.get_vector(words[0]))
myarray=np.zeros(size)
counter=0
for i in range(len(ast)):
if ((ast[i] in words[:10]) == False):
continue
w=model.wv.get_vector(ast[i])
myarray=myarray+w
counter=counter+1
myarray=myarray/counter
return myarray
def computeWeights(d2v_model, filename, outputDir, astFile, bugs, metrics, mheader, kind):
print("Computing weights with alternative methods")
if (kind == 1):
pw.printWeights(d2v_model, filename, outputDir, astFile, bugs, metrics, mheader, computeAverage)
elif (kind==2):
pw.printWeights(d2v_model, filename, outputDir, astFile, bugs, metrics, mheader, computeAverageWithBest10)
|
the-stack_106_14109
|
import src.LPReader as LPReader
from src.ModelRealizer import realize
def main(filename):
tmp_model = LPReader.read(filename)
model = realize(tmp_model)
model.Solve()
# for var in model.variables():
# print(var.name, var.solution_value())
print('Objective value =', model.Objective().Value())
assert model.Objective().Value() == 924
if __name__ == '__main__':
main("resources/lp_examples/10teams.lp")
|
the-stack_106_14110
|
import os
from flask import render_template, request, redirect, url_for, send_from_directory
from ctrp_rest_client import app, api_client
from ctrp_rest_client.forms import TrialSearchForm
# home page - redirecting straight to search form
@app.route('/')
def home():
# Render template
return render_template('home.html')
@app.route('/live_search')
def test():
# Render template
return render_template('live_search.html')
# tree
@app.route('/tree')
def tree():
# Render template
return render_template('tree.html')
# favicon serving
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static/img'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
# display information for the trial identified by the nct_id (or nci id)
@app.route('/find_trial', methods=['POST'])
def find_trial():
# parse form parameter
trial_id = request.form["trial_id"]
# Render template
return redirect(url_for('display_trial', trial_id=trial_id))
# display information for the trial identified by the nct_id (or nci id)
@app.route('/display_trial/<trial_id>', methods=['GET'])
def display_trial(trial_id):
# retrieving trial as dictionary from the CTRP API client
trial_dict = api_client.get_trial_by_nct_id(trial_id)
# Render template
return render_template('display_trial.html', trial=trial_dict)
# display search form
@app.route('/search', methods=('GET', 'POST'))
def search():
form = TrialSearchForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
search_params = _parse_search_params(form)
terms = _parse_terms(form)
# calling the API
result = api_client.find_all_trials(search_params)
return render_template('display_results.html', search_params=search_params, result=result, terms=terms)
# Render template
return render_template('search_form.html', form=form)
# parse list of names for terminology based search fields
# these are for display only as the search is using the codes (concept ids)
def _parse_terms(form):
terms = {}
if form.disease_names.data:
terms['disease_names'] = form.disease_names.data.split(', ')
if form.biomarker_names.data:
terms['biomarker_names'] = form.biomarker_names.data.split(', ')
return terms
# extract values from form fields and populate the query for the
# search request to the API
def _parse_search_params(form):
search_params = api_client.add_included_fields({})
if form.accepts_healthy_volunteers_indicator.data != 'NA':
search_params["accepts_healthy_volunteers_indicator"] = form.accepts_healthy_volunteers_indicator.data
if form.gender.data != 'Any':
search_params["eligibility.structured.gender"] = form.gender.data
_conditional_add_value(search_params, '_fulltext', form.fulltext.data)
_conditional_add_value(search_params, 'eligibility.structured.min_age_in_years_gte', form.min_age_number.data)
_conditional_add_value(search_params, 'eligibility.structured.max_age_in_years_lte', form.max_age_number.data)
disease_codes = _parse_list_values(form.disease_codes)
_conditional_add_value(search_params, 'diseases.nci_thesaurus_concept_id', disease_codes)
biomarker_codes = _parse_list_values(form.biomarker_codes)
_conditional_add_value(search_params, 'biomarkers.nci_thesaurus_concept_id', biomarker_codes)
assay_purposes = _parse_biomarker_assay_purpose(form)
_conditional_add_value(search_params, 'biomarkers.assay_purpose', assay_purposes)
phases = _parse_phase(form)
_conditional_add_value(search_params, 'phase.phase', phases)
return search_params
def _conditional_add_value(target_dict, key, value):
if value:
target_dict[key] = value
def _parse_list_values(formfield):
items = []
if formfield.data:
items = [x.strip() for x in formfield.data.split(',')]
return items
def _parse_biomarker_assay_purpose(form):
assay_purposes = []
_conditional_add_to_list(assay_purposes, ['Eligibility Criterion - Inclusion'],
form.biomarker_assay_purpose_inclusion.data)
_conditional_add_to_list(assay_purposes, ['Eligibility Criterion - Exclusion'],
form.biomarker_assay_purpose_exclusion.data)
return assay_purposes
def _parse_phase(form):
phases = []
_conditional_add_to_list(phases, ['NA'], form.phasena.data)
_conditional_add_to_list(phases, ['I', 'I_II'], form.phase1.data)
_conditional_add_to_list(phases, ['I_II', 'II', 'II_III'], form.phase2.data)
_conditional_add_to_list(phases, ['II_III', 'III'], form.phase3.data)
_conditional_add_to_list(phases, ['IV'], form.phase4.data)
return phases
def _conditional_add_to_list(target_list, values, form_value):
if form_value:
for value in values:
if value not in target_list:
target_list.append(value)
|
the-stack_106_14111
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
# fmt: off
'''
Every template contains an ordered list of TemplateObjects.
TemplateObject is defined in template_objects.py
Resume templates are written for an action name and represents the intent
for the action: Resume. This action represents resuming a given or last action.
Examples:
[Human, ResumeSingle]
- resume
- start again
[Human, Resume, ActionBuild]
- resume building
- continue the build action
'''
from droidlet.dialog.ttad.generation_dialogues.template_objects import *
RESUME_TEMPLATES = [
[Human, ResumeSingle],
## Resume action name ##
[Human, Resume, ActionBuild],
[Human, Resume, ActionDestroy],
[Human, Resume, ActionTag],
[Human, Resume, ActionFill],
[Human, Resume, ActionDig],
[Human, Resume, ActionMove],
]
|
the-stack_106_14113
|
"""Provides the worker thread needed for processing streams."""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator, Mapping
from io import BytesIO
import logging
from threading import Event
from typing import Any, Callable, cast
import av
from . import redact_credentials
from .const import (
AUDIO_CODECS,
MAX_MISSING_DTS,
MAX_TIMESTAMP_GAP,
MIN_SEGMENT_DURATION,
PACKETS_TO_WAIT_FOR_AUDIO,
SEGMENT_CONTAINER_FORMAT,
SOURCE_TIMEOUT,
TARGET_PART_DURATION,
)
from .core import Part, Segment, StreamOutput
_LOGGER = logging.getLogger(__name__)
class SegmentBuffer:
"""Buffer for writing a sequence of packets to the output as a segment."""
def __init__(
self, outputs_callback: Callable[[], Mapping[str, StreamOutput]]
) -> None:
"""Initialize SegmentBuffer."""
self._stream_id: int = 0
self._outputs_callback: Callable[
[], Mapping[str, StreamOutput]
] = outputs_callback
# sequence gets incremented before the first segment so the first segment
# has a sequence number of 0.
self._sequence = -1
self._segment_start_dts: int = cast(int, None)
self._memory_file: BytesIO = cast(BytesIO, None)
self._av_output: av.container.OutputContainer = None
self._input_video_stream: av.video.VideoStream = None
self._input_audio_stream: Any | None = None # av.audio.AudioStream | None
self._output_video_stream: av.video.VideoStream = None
self._output_audio_stream: Any | None = None # av.audio.AudioStream | None
self._segment: Segment | None = None
# the following 3 member variables are used for Part formation
self._memory_file_pos: int = cast(int, None)
self._part_start_dts: int = cast(int, None)
self._part_has_keyframe = False
@staticmethod
def make_new_av(
memory_file: BytesIO, sequence: int, input_vstream: av.video.VideoStream
) -> av.container.OutputContainer:
"""Make a new av OutputContainer."""
return av.open(
memory_file,
mode="w",
format=SEGMENT_CONTAINER_FORMAT,
container_options={
# Removed skip_sidx - see https://github.com/home-assistant/core/pull/39970
# "cmaf" flag replaces several of the movflags used, but too recent to use for now
"movflags": "empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer",
# Sometimes the first segment begins with negative timestamps, and this setting just
# adjusts the timestamps in the output from that segment to start from 0. Helps from
# having to make some adjustments in test_durations
"avoid_negative_ts": "make_non_negative",
"fragment_index": str(sequence + 1),
"video_track_timescale": str(int(1 / input_vstream.time_base)),
# Create a fragments every TARGET_PART_DURATION. The data from each fragment is stored in
# a "Part" that can be combined with the data from all the other "Part"s, plus an init
# section, to reconstitute the data in a "Segment".
"frag_duration": str(int(TARGET_PART_DURATION * 1e6)),
},
)
def set_streams(
self,
video_stream: av.video.VideoStream,
audio_stream: Any,
# no type hint for audio_stream until https://github.com/PyAV-Org/PyAV/pull/775 is merged
) -> None:
"""Initialize output buffer with streams from container."""
self._input_video_stream = video_stream
self._input_audio_stream = audio_stream
def reset(self, video_dts: int) -> None:
"""Initialize a new stream segment."""
# Keep track of the number of segments we've processed
self._sequence += 1
self._segment_start_dts = video_dts
self._segment = None
self._memory_file = BytesIO()
self._memory_file_pos = 0
self._av_output = self.make_new_av(
memory_file=self._memory_file,
sequence=self._sequence,
input_vstream=self._input_video_stream,
)
self._output_video_stream = self._av_output.add_stream(
template=self._input_video_stream
)
# Check if audio is requested
self._output_audio_stream = None
if self._input_audio_stream and self._input_audio_stream.name in AUDIO_CODECS:
self._output_audio_stream = self._av_output.add_stream(
template=self._input_audio_stream
)
def mux_packet(self, packet: av.Packet) -> None:
"""Mux a packet to the appropriate output stream."""
# Check for end of segment
if packet.stream == self._input_video_stream:
if (
packet.is_keyframe
and (packet.dts - self._segment_start_dts) * packet.time_base
>= MIN_SEGMENT_DURATION
):
# Flush segment (also flushes the stub part segment)
self.flush(packet, last_part=True)
# Reinitialize
self.reset(packet.dts)
# Mux the packet
packet.stream = self._output_video_stream
self._av_output.mux(packet)
self.check_flush_part(packet)
self._part_has_keyframe |= packet.is_keyframe
elif packet.stream == self._input_audio_stream:
packet.stream = self._output_audio_stream
self._av_output.mux(packet)
def check_flush_part(self, packet: av.Packet) -> None:
"""Check for and mark a part segment boundary and record its duration."""
if self._memory_file_pos == self._memory_file.tell():
return
if self._segment is None:
# We have our first non-zero byte position. This means the init has just
# been written. Create a Segment and put it to the queue of each output.
self._segment = Segment(
sequence=self._sequence,
stream_id=self._stream_id,
init=self._memory_file.getvalue(),
)
self._memory_file_pos = self._memory_file.tell()
self._part_start_dts = self._segment_start_dts
# Fetch the latest StreamOutputs, which may have changed since the
# worker started.
for stream_output in self._outputs_callback().values():
stream_output.put(self._segment)
else: # These are the ends of the part segments
self.flush(packet, last_part=False)
def flush(self, packet: av.Packet, last_part: bool) -> None:
"""Output a part from the most recent bytes in the memory_file.
If last_part is True, also close the segment, give it a duration,
and clean up the av_output and memory_file.
"""
if last_part:
# Closing the av_output will write the remaining buffered data to the
# memory_file as a new moof/mdat.
self._av_output.close()
assert self._segment
self._memory_file.seek(self._memory_file_pos)
self._segment.parts.append(
Part(
duration=float((packet.dts - self._part_start_dts) * packet.time_base),
has_keyframe=self._part_has_keyframe,
data=self._memory_file.read(),
)
)
if last_part:
self._segment.duration = float(
(packet.dts - self._segment_start_dts) * packet.time_base
)
self._memory_file.close() # We don't need the BytesIO object anymore
else:
self._memory_file_pos = self._memory_file.tell()
self._part_start_dts = packet.dts
self._part_has_keyframe = False
def discontinuity(self) -> None:
"""Mark the stream as having been restarted."""
# Preserving sequence and stream_id here keep the HLS playlist logic
# simple to check for discontinuity at output time, and to determine
# the discontinuity sequence number.
self._stream_id += 1
def close(self) -> None:
"""Close stream buffer."""
self._av_output.close()
self._memory_file.close()
def stream_worker( # noqa: C901
source: str,
options: dict[str, str],
segment_buffer: SegmentBuffer,
quit_event: Event,
) -> None:
"""Handle consuming streams."""
try:
container = av.open(source, options=options, timeout=SOURCE_TIMEOUT)
except av.AVError:
_LOGGER.error("Error opening stream %s", redact_credentials(str(source)))
return
try:
video_stream = container.streams.video[0]
except (KeyError, IndexError):
_LOGGER.error("Stream has no video")
container.close()
return
try:
audio_stream = container.streams.audio[0]
except (KeyError, IndexError):
audio_stream = None
# These formats need aac_adtstoasc bitstream filter, but auto_bsf not
# compatible with empty_moov and manual bitstream filters not in PyAV
if container.format.name in {"hls", "mpegts"}:
audio_stream = None
# Some audio streams do not have a profile and throw errors when remuxing
if audio_stream and audio_stream.profile is None:
audio_stream = None
# Iterator for demuxing
container_packets: Iterator[av.Packet]
# The decoder timestamps of the latest packet in each stream we processed
last_dts = {video_stream: float("-inf"), audio_stream: float("-inf")}
# Keep track of consecutive packets without a dts to detect end of stream.
missing_dts = 0
# The video dts at the beginning of the segment
segment_start_dts: int | None = None
# Because of problems 1 and 2 below, we need to store the first few packets and replay them
initial_packets: deque[av.Packet] = deque()
# Have to work around two problems with RTSP feeds in ffmpeg
# 1 - first frame has bad pts/dts https://trac.ffmpeg.org/ticket/5018
# 2 - seeking can be problematic https://trac.ffmpeg.org/ticket/7815
def peek_first_dts() -> bool:
"""Initialize by peeking into the first few packets of the stream.
Deal with problem #1 above (bad first packet pts/dts) by recalculating using pts/dts from second packet.
Also load the first video keyframe dts into segment_start_dts and check if the audio stream really exists.
"""
nonlocal segment_start_dts, audio_stream, container_packets
missing_dts = 0
found_audio = False
try:
container_packets = container.demux((video_stream, audio_stream))
first_packet: av.Packet | None = None
# Get to first video keyframe
while first_packet is None:
packet = next(container_packets)
if (
packet.dts is None
): # Allow MAX_MISSING_DTS packets with no dts, raise error on the next one
if missing_dts >= MAX_MISSING_DTS:
raise StopIteration(
f"Invalid data - got {MAX_MISSING_DTS+1} packets with missing DTS while initializing"
)
missing_dts += 1
continue
if packet.stream == audio_stream:
found_audio = True
elif packet.is_keyframe: # video_keyframe
first_packet = packet
initial_packets.append(packet)
# Get first_dts from subsequent frame to first keyframe
while segment_start_dts is None or (
audio_stream
and not found_audio
and len(initial_packets) < PACKETS_TO_WAIT_FOR_AUDIO
):
packet = next(container_packets)
if (
packet.dts is None
): # Allow MAX_MISSING_DTS packet with no dts, raise error on the next one
if missing_dts >= MAX_MISSING_DTS:
raise StopIteration(
f"Invalid data - got {MAX_MISSING_DTS+1} packets with missing DTS while initializing"
)
missing_dts += 1
continue
if packet.stream == audio_stream:
# detect ADTS AAC and disable audio
if audio_stream.codec.name == "aac" and packet.size > 2:
with memoryview(packet) as packet_view:
if packet_view[0] == 0xFF and packet_view[1] & 0xF0 == 0xF0:
_LOGGER.warning(
"ADTS AAC detected - disabling audio stream"
)
container_packets = container.demux(video_stream)
audio_stream = None
continue
found_audio = True
elif (
segment_start_dts is None
): # This is the second video frame to calculate first_dts from
segment_start_dts = packet.dts - packet.duration
first_packet.pts = first_packet.dts = segment_start_dts
initial_packets.append(packet)
if audio_stream and not found_audio:
_LOGGER.warning(
"Audio stream not found"
) # Some streams declare an audio stream and never send any packets
except (av.AVError, StopIteration) as ex:
_LOGGER.error(
"Error demuxing stream while finding first packet: %s", str(ex)
)
return False
return True
if not peek_first_dts():
container.close()
return
segment_buffer.set_streams(video_stream, audio_stream)
assert isinstance(segment_start_dts, int)
segment_buffer.reset(segment_start_dts)
while not quit_event.is_set():
try:
if len(initial_packets) > 0:
packet = initial_packets.popleft()
else:
packet = next(container_packets)
if packet.dts is None:
# Allow MAX_MISSING_DTS consecutive packets without dts. Terminate the stream on the next one.
if missing_dts >= MAX_MISSING_DTS:
raise StopIteration(
f"No dts in {MAX_MISSING_DTS+1} consecutive packets"
)
missing_dts += 1
continue
missing_dts = 0
except (av.AVError, StopIteration) as ex:
_LOGGER.error("Error demuxing stream: %s", str(ex))
break
# Discard packet if dts is not monotonic
if packet.dts <= last_dts[packet.stream]:
if (
packet.time_base * (last_dts[packet.stream] - packet.dts)
> MAX_TIMESTAMP_GAP
):
_LOGGER.warning(
"Timestamp overflow detected: last dts %s, dts = %s, resetting stream",
last_dts[packet.stream],
packet.dts,
)
break
continue
# Update last_dts processed
last_dts[packet.stream] = packet.dts
# Mux packets, and possibly write a segment to the output stream.
# This mutates packet timestamps and stream
segment_buffer.mux_packet(packet)
# Close stream
segment_buffer.close()
container.close()
|
the-stack_106_14114
|
import sys
import linecache
import copy
option = sys.argv
ifinfo = []
value_name_key = ['set', 'policy', 'id', 'policy_id', 'name', 'value_name', 'from', 'src_zone', 'to',
'dst_zone', 'src_ip', 'dst_ip', 'protocol', 'nat', 'src', 'dst', 'ip', 'dst_nat_ip', 'expect', 'log']
value_name_keyex = ['set', 'policy', 'id', 'policy_id', 'name', 'value_name',
'from', 'src_zone', 'to', 'dst_zone', 'src_ip', 'dst_ip', 'protocol', 'expect']
value_noname_key = ['set', 'policy', 'id', 'policy_id', 'from', 'src_zone',
'to', 'dst_zone', 'src_ip', 'dst_ip', 'protocol', 'expect', 'log']
value_noname_key1 = ['set', 'policy', 'id', 'policy_id', 'from', 'src_zone',
'to', 'dst_zone', 'src_ip', 'dst_ip', 'protocol', 'nat', 'src', 'expect', 'log']
value_noname_key2 = ['set', 'policy', 'id', 'policy_id', 'from', 'src_zone', 'to', 'dst_zone',
'src_ip', 'dst_ip', 'protocol', 'nat', 'src', 'dip_id', 'dip_num', 'expect', 'log']
value_noname_key3 = ['set', 'policy', 'id', 'policy_id', 'from', 'src_zone', 'to',
'dst_zone', 'src_ip', 'dst_ip', 'protocol', 'nat', 'src', 'ip', 'src_nat_ip', 'expect', 'log']
value_noname_key4 = ['set', 'policy', 'id', 'policy_id', 'from', 'src_zone', 'to',
'dst_zone', 'src_ip', 'dst_ip', 'protocol', 'nat', 'dst', 'ip', 'dst_nat_ip', 'expect', 'log']
value_noname_key5 = ['set', 'policy', 'id', 'policy_id', 'from', 'src_zone', 'to', 'dst_zone',
'src_ip', 'dst_ip', 'protocol', 'nat', 'src', 'dst', 'ip', 'dst_nat_ip', 'expect', 'log']
value_noname_key6 = ['set', 'policy', 'id', 'policy_id', 'from', 'src_zone', 'to', 'dst_zone', 'src_ip',
'dst_ip', 'protocol', 'nat', 'dst', 'ip', 'dst_nat_ip', 'port', 'dst_nat_port', 'expect', 'log']
service_key = ['set', 'service', 'service_name', 'protocol', 'protocol_name',
'src-port', 'src_port_num', 'dst_port', 'dst_port_num', 'timeout', 'timeouttime']
route_key = ['set', 'route', 'network_address', 'interface',
'if_name', 'gateway', 'gateway_ip', 'metric', 'metric_num']
address_key = ['set', 'address', 'zone_name',
'address_name', 'ip_address', 'subnet_mask']
group_address_key = ['set', 'group', 'address',
'zone_name', 'group_name', 'add', 'address_name']
group_service_key = ['set', 'group', 'service',
'group_service_name', 'add', 'service_name']
vip_key = ['set', 'interface', 'if_name', 'vip',
'global_ip', 'port_num', 'service_name', 'private_ip']
vip_keys = ['set', 'interface', 'if_name', 'vip', 'global_ip',
'+', 'port_num', 'service_name', 'private_ip']
mip_key = ['set', 'interface', 'if_name', 'mip', 'private_ip',
'host', 'global_ip', 'netmask', 'subnet_mask', 'vr', 'vr_name']
dip_key = ['set', 'interface', 'if_name',
'dip', 'dip_num', 'start_ip', 'fish_ip']
dip_ext_key = ['set', 'interface', 'if_name', 'ext', 'ip', 'global_ip',
'subnet_mask', 'dip', 'dip_num', 'start_ip', 'fish_ip']
if_ip_key = ['set', 'interface', 'if_name', 'ip', 'ip_address']
if_nat_key = ['set', 'interface', 'if_name', 'nat']
if_mip_key = ['set', 'interface', 'if_name', 'mip', 'ip_address',
'host' 'global_ip' 'netmask' 'subnet_mask' 'vr', 'vr_name']
if_zone_key = ['set', 'interface', 'if_name', 'zone', 'zone_name']
if_zonev_key = ['set', 'interface', 'if_name',
'tag', 'vlan_num', 'zone', 'zone_name']
zone_block_key = ['set', 'zone', 'zone_name', 'block']
disable_policy_key = ['set', 'policy', 'id', 'policy_id', 'disable']
policy_dict = []
service_dict = []
route_dict = []
address_dict = []
group_address_dict = []
group_service_dict = []
vip_dict = []
mip_dict = []
dip_dict = []
if_ip_dict = []
if_nat_dict = []
if_zone_dict = []
zone_block_dict = []
disable_policy_dict = []
def convert_list_to_dict(key, value, dictionary):
d = {k: v for k, v in zip(
key, value)}
dictionary.append(d)
def policy_multicell(file_name, target_line_no):
i = 0
policy_element = []
policy_value_list = []
target_line = []
base_policy = []
while True:
#本関数 呼び出し元で処理していた行の値を取得
config_file = open(file_name, 'r')
target_line = config_file.readlines()[target_line_no - 1]
config_file.close
#"set policy id **" 〜 "exit"までの値を取得する
if 'exit' in target_line:
break
else:
policy_element = target_line.strip().split()
if 'set' in policy_element and 'policy' in policy_element and 'from' in policy_element :
#Policy定義1行目(ベースとなるポリシー)
base_policy = policy_element
policy_value_list.append(base_policy)
elif 'set' in policy_element and 'src-address' in policy_element :
policy_src_address = copy.copy(base_policy)
#名前付きPolicy:base_policyリストの11個目の要素(src-address)にマルチセルポリシーの要素を追加する
#名前無しPolicy:base_policyリストの9個目の要素(src-address)にマルチセルポリシーの要素を追加する
if base_policy[4] == "name" :
policy_src_address[10] = policy_element[2]
else:
policy_src_address[8] = policy_element[2]
policy_value_list.append(policy_src_address)
elif 'set' in policy_element and 'dst-address' in policy_element :
policy_dst_address = copy.copy(base_policy)
#名前付きPolicy:base_policyリストの12個目の要素(dst-address)にマルチセルポリシーの要素を追加する
#名前無しPolicy:base_policyリストの10個目の要素(dst-address)にマルチセルポリシーの要素を追加する
if base_policy[4] == "name" :
policy_dst_address[11] = policy_element[2]
else:
policy_dst_address[9] = policy_element[2]
policy_value_list.append(policy_dst_address)
elif 'set' in policy_element and 'service' in policy_element :
policy_service = copy.copy(base_policy)
#名前付きPolicy:base_policyリストの13個目の要素(service)にマルチセルポリシーの要素を追加する
#名前無しPolicy:base_policyリストの11個目の要素(service)にマルチセルポリシーの要素を追加する
if base_policy[4] == "name" :
policy_service[12] = policy_element[2]
else:
policy_service[10] = policy_element[2]
policy_value_list.append(policy_service)
target_line_no += 1
i += 1
return policy_value_list
def takeout_policy_value(key, value , dictionary) :
i = 0
#value配列に入ったPolicyの値を取り出して関数に渡す
for v in value :
convert_list_to_dict(key, value[i], dictionary)
i += 1
def append_noname_to_policy_dict(value):
i = 0
dictionary = policy_dict
#value配列に入ったPolicyの値をチェック
for v in value :
if len(value[i]) == 13 and "log" in value[i] or len(value[i]) == 12 and "log" not in value[i]:
key = value_noname_key
convert_list_to_dict(key, value[i], dictionary)
elif len(value[i]) == 15 and "log" in value[i] or len(value[i]) == 14 and "log" not in value[i]:
key = value_noname_key1
convert_list_to_dict(key, value[i], dictionary)
elif len(value[i]) == 17 and "src" in value[i] and "dip-id" in value[i] and "log" in value[i] or len(value[i]) == 16 and "src" in value[i] and "dip-id" in value[i] and "log" not in value[i]:
key = value_noname_key2
convert_list_to_dict(key, value[i], dictionary)
elif len(value[i]) == 17 and "src" in value[i] and "log" in value[i] or len(value[i]) == 16 and "src" in value[i] and "log" not in value[i]:
key = value_noname_key3
convert_list_to_dict(key, value[i], dictionary)
elif len(value[i]) == 17 and "dst" in value[i] and "log" in value[i] or len(value[i]) == 16 and "dst" in value[i] and "log" not in value[i]:
key = value_noname_key4
convert_list_to_dict(key, value[i], dictionary)
elif len(value[i]) == 18 and "log" in value[i] or len(value[i]) == 17 and "log" not in value[i]:
key = value_noname_key5
convert_list_to_dict(key, value[i], dictionary)
elif len(value[i]) == 19 and "log" in value[i] or len(value[i]) == 18 and "log" not in value[i]:
key = value_noname_key6
convert_list_to_dict(key, value[i], dictionary)
i += 1
def append_if_zone_to_zone_dict(value):
if_zone = value
if '"bri0/0"' in if_zone:
pass
elif "tag" in if_zone:
d = {k: v for k, v in zip(if_zonev_key, if_zone)}
if_zone_dict.append(d)
else:
d = {k: v for k, v in zip(if_zone_key, if_zone)}
if_zone_dict.append(d)
def create_ifinfo():
global ifinfo
for if_zone_c in if_zone_dict:
flag = False
for if_ip_c in if_ip_dict:
if if_zone_c['if_name'].replace('"', '') in if_ip_c['if_name']:
flag = True
d = {'IF_Name': if_zone_c['if_name'].replace('"', ''), 'Zone': if_zone_c['zone_name'], 'IP': if_ip_c.get('ip_address')}
ifinfo.append(d)
else:
if not flag:
d = {'IF_Name': if_zone_c['if_name'], 'Zone': if_zone_c['zone_name'], 'IP': 'None'}
ifinfo.append(d)
def absorb_config():
line_count = 0
with open(file_name) as f:
for line in f:
line_count += 1
value = line.strip().split()
if "manageable" in line or "manage-ip" in line or "bypass" in line or "proxy-arp-entry" in line or "mtu" in line or "unset" in line or "sharable" in line:
continue
if "set policy id" in line and "name" in line and "from" in line:
dictionary = policy_dict
if len(value) == 20 and "log" in value or len(value) == 19 and "log" not in value:
key = value_name_key
value = policy_multicell(file_name, line_count)
takeout_policy_value(key, value, dictionary)
elif len(value) == 15 and "log" in value or len(value) == 14 and "log" not in value:
key = value_name_keyex
value = policy_multicell(file_name, line_count)
takeout_policy_value(key, value, dictionary)
elif "set policy id" in line and not "name" in line and "from" in line:
value = policy_multicell(file_name, line_count)
append_noname_to_policy_dict(value)
elif "set group service" in line:
group_service = value
dictionary = group_service_dict
if len(group_service) == 6:
key = group_service_key
convert_list_to_dict(key, value, dictionary)
elif "set service" in line:
service = value
if len(service) >= 9:
d = {k: v for k, v in zip(service_key, service)}
service_dict.append(d)
elif "set route" in line and "interface" in line:
route = value
d = {k: v for k, v in zip(route_key, route)}
route_dict.append(d)
elif "set group address" in line and "comment" not in line:
group_address = value
if len(group_address) == 7:
d = {k: v for k, v in zip(
group_address_key, group_address)}
group_address_dict.append(d)
elif "set address" in line:
address = value
d = {k: v for k, v in zip(address_key, address)}
address_dict.append(d)
elif "set interface" in line and "vip" in line:
vip = value
if "+" in vip:
d = {k: v for k, v in zip(vip_keys, vip)}
vip_dict.append(d)
else:
d = {k: v for k, v in zip(vip_key, vip)}
vip_dict.append(d)
elif "set interface" in line and "dip" in line:
dip = value
if "ext" in dip:
d = {k: v for k, v in zip(dip_ext_key, dip)}
dip_dict.append(d)
else:
d = {k: v for k, v in zip(dip_key, dip)}
dip_dict.append(d)
elif "set interface" in line and "mip" in line:
mip = value
d = {k: v for k, v in zip(mip_key, mip)}
mip_dict.append(d)
elif "set interface" in line and "ip" in line:
if_ip = value
d = {k: v for k, v in zip(if_ip_key, if_ip)}
if_ip_dict.append(d)
elif "set interface" in line and "nat" in line:
if_nat = value
d = {k: v for k, v in zip(if_nat_key, if_nat)}
if_nat_dict.append(d)
elif "set interface" in line and "zone" in line:
append_if_zone_to_zone_dict(value)
elif "set zone" in line and "block" in line:
dictionary = zone_block_dict
key = zone_block_key
convert_list_to_dict(key, value, dictionary)
elif "disable" in line:
dictionary = disable_policy_dict
key = disable_policy_key
convert_list_to_dict(key, value, dictionary)
else:
continue
def exclude_disable_policy():
for policy in policy_dict:
for disable_policy in disable_policy_dict:
if policy['policy_id'] == disable_policy['policy_id']:
policy_dict.remove(policy)
break
else:
continue
def handle_disable_policy_output():
if disable_policy_output == 'y':
print('有効化していないポリシーも出力します')
absorb_config()
elif disable_policy_output == 'n':
print('有効化していないポリシーは出力しません')
absorb_config()
exclude_disable_policy()
else:
print('第2引数を入力する場合はyかnを入力してください')
exit()
def confirm_file():
global file_name
try:
file_name = option[1]
except IndexError:
print('コンフィグファイル名を入力してください')
exit()
def confirm_disable_policy_output():
global disable_policy_output
if len(option) == 2:
print('有効化していないポリシーの出力オプションが入力されていません')
disable_policy_output = 'n'
else:
disable_policy_output = option[2]
handle_disable_policy_output()
confirm_file()
confirm_disable_policy_output()
create_ifinfo()
|
the-stack_106_14115
|
import os
import warnings
import torch
import numpy as np
import soundfile as sf
try:
from typing import Protocol
except ImportError: # noqa
# Python < 3.8
class Protocol:
pass
from .dsp.overlap_add import LambdaOverlapAdd
from .utils import get_device
class Separatable(Protocol):
"""Things that are separatable."""
def forward_wav(self, wav, **kwargs):
"""
Args:
wav (torch.Tensor): waveform tensor.
Shape: 1D, 2D or 3D tensor, time last.
**kwargs: Keyword arguments from `separate`.
Returns:
torch.Tensor: the estimated sources.
Shape: [batch, n_src, time] or [n_src, time] if the input `wav`
did not have a batch dim.
"""
...
@property
def sample_rate(self):
"""Operating sample rate of the model (float)."""
...
def separate(
model: Separatable, wav, output_dir=None, force_overwrite=False, resample=False, **kwargs
):
"""Infer separated sources from input waveforms.
Also supports filenames.
Args:
model (Separatable, for example asteroid.models.BaseModel): Model to use.
wav (Union[torch.Tensor, numpy.ndarray, str]): waveform array/tensor.
Shape: 1D, 2D or 3D tensor, time last.
output_dir (str): path to save all the wav files. If None,
estimated sources will be saved next to the original ones.
force_overwrite (bool): whether to overwrite existing files
(when separating from file).
resample (bool): Whether to resample input files with wrong sample rate
(when separating from file).
**kwargs: keyword arguments to be passed to `forward_wav`.
Returns:
Union[torch.Tensor, numpy.ndarray, None], the estimated sources.
(batch, n_src, time) or (n_src, time) w/o batch dim.
.. note::
`separate` calls `model.forward_wav` which calls `forward` by default.
For models whose `forward` doesn't have waveform tensors as input/ouput,
overwrite their `forward_wav` method to separate from waveform to waveform.
"""
if isinstance(wav, str):
file_separate(
model,
wav,
output_dir=output_dir,
force_overwrite=force_overwrite,
resample=resample,
**kwargs,
)
elif isinstance(wav, np.ndarray):
return numpy_separate(model, wav, **kwargs)
elif isinstance(wav, torch.Tensor):
return torch_separate(model, wav, **kwargs)
else:
raise ValueError(
f"Only support filenames, numpy arrays and torch tensors, received {type(wav)}"
)
@torch.no_grad()
def torch_separate(model: Separatable, wav: torch.Tensor, **kwargs) -> torch.Tensor:
"""Core logic of `separate`."""
# Handle device placement
input_device = get_device(wav, default="cpu")
model_device = get_device(model, default="cpu")
wav = wav.to(model_device)
# Forward
separate_func = getattr(model, "forward_wav", model)
out_wavs = separate_func(wav, **kwargs)
# FIXME: for now this is the best we can do.
out_wavs *= wav.abs().sum() / (out_wavs.abs().sum())
# Back to input device (and numpy if necessary)
out_wavs = out_wavs.to(input_device)
return out_wavs
def numpy_separate(model: Separatable, wav: np.ndarray, **kwargs) -> np.ndarray:
"""Numpy interface to `separate`."""
wav = torch.from_numpy(wav)
out_wavs = torch_separate(model, wav, **kwargs)
out_wavs = out_wavs.data.numpy()
return out_wavs
def file_separate(
model: Separatable,
filename: str,
output_dir=None,
force_overwrite=False,
resample=False,
**kwargs,
) -> None:
"""Filename interface to `separate`."""
if not hasattr(model, "sample_rate"):
raise TypeError(
f"This function requires your model ({type(model).__name__}) to have a "
"'sample_rate' attribute. See `BaseModel.sample_rate` for details."
)
# Estimates will be saved as filename_est1.wav etc...
base, _ = os.path.splitext(filename)
if output_dir is not None:
base = os.path.join(output_dir, os.path.basename(base))
save_name_template = base + "_est{}.wav"
# Bail out early if an estimate file already exists and we shall not overwrite.
est1_filename = save_name_template.format(1)
if os.path.isfile(est1_filename) and not force_overwrite:
warnings.warn(
f"File {est1_filename} already exists, pass `force_overwrite=True` to overwrite it",
UserWarning,
)
return
# SoundFile wav shape: [time, n_chan]
wav, fs = _load_audio(filename)
if wav.shape[-1] > 1:
warnings.warn(
f"Received multichannel signal with {wav.shape[-1]} signals, "
f"using the first channel only."
)
# FIXME: support only single-channel files for now.
if resample:
wav = _resample(wav[:, 0], orig_sr=fs, target_sr=int(model.sample_rate))[:, None]
elif fs != model.sample_rate:
raise RuntimeError(
f"Received a signal with a sampling rate of {fs}Hz for a model "
f"of {model.sample_rate}Hz. You can pass `resample=True` to resample automatically."
)
# Pass wav as [batch, n_chan, time]; here: [1, 1, time]
wav = wav[:, 0][None, None]
(est_srcs,) = numpy_separate(model, wav, **kwargs)
# Resample to original sr
est_srcs = [
_resample(est_src, orig_sr=int(model.sample_rate), target_sr=fs) for est_src in est_srcs
]
# Save wav files to filename_est1.wav etc...
for src_idx, est_src in enumerate(est_srcs, 1):
sf.write(save_name_template.format(src_idx), est_src, fs)
def _resample(wav, orig_sr, target_sr, _resamplers={}):
from julius import ResampleFrac
if orig_sr == target_sr:
return wav
# Cache ResampleFrac instance to speed up resampling if we're repeatedly
# resampling between the same two sample rates.
try:
resampler = _resamplers[(orig_sr, target_sr)]
except KeyError:
resampler = _resamplers[(orig_sr, target_sr)] = ResampleFrac(orig_sr, target_sr)
return resampler(torch.from_numpy(wav)).numpy()
def _load_audio(filename):
try:
return sf.read(filename, dtype="float32", always_2d=True)
except Exception as sf_err:
# If soundfile fails to load the file, try with librosa next, which uses
# the 'audioread' library to support a wide range of audio formats.
# We try with soundfile first because librosa takes a long time to import.
try:
import librosa
except ModuleNotFoundError:
raise RuntimeError(
f"Could not load file {filename!r} with soundfile. "
"Install 'librosa' to be able to load more file types."
) from sf_err
wav, sr = librosa.load(filename, dtype="float32", sr=None)
# Always return wav of shape [time, n_chan]
if wav.ndim == 1:
return wav[:, None], sr
else:
return wav.T, sr
|
the-stack_106_14116
|
"""IHC component.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/ihc/
"""
import logging
import os.path
import xml.etree.ElementTree
import voluptuous as vol
from homeassistant.components.ihc.const import (
ATTR_IHC_ID, ATTR_VALUE, CONF_INFO, CONF_AUTOSETUP,
CONF_BINARY_SENSOR, CONF_LIGHT, CONF_SENSOR, CONF_SWITCH,
CONF_XPATH, CONF_NODE, CONF_DIMMABLE, CONF_INVERTING,
SERVICE_SET_RUNTIME_VALUE_BOOL, SERVICE_SET_RUNTIME_VALUE_INT,
SERVICE_SET_RUNTIME_VALUE_FLOAT)
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
CONF_URL, CONF_USERNAME, CONF_PASSWORD, CONF_ID, CONF_NAME,
CONF_UNIT_OF_MEASUREMENT, CONF_TYPE, TEMP_CELSIUS)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
REQUIREMENTS = ['ihcsdk==2.1.1']
DOMAIN = 'ihc'
IHC_DATA = 'ihc'
IHC_CONTROLLER = 'controller'
IHC_INFO = 'info'
AUTO_SETUP_YAML = 'ihc_auto_setup.yaml'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_URL): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_AUTOSETUP, default=True): cv.boolean,
vol.Optional(CONF_INFO, default=True): cv.boolean
}),
}, extra=vol.ALLOW_EXTRA)
AUTO_SETUP_SCHEMA = vol.Schema({
vol.Optional(CONF_BINARY_SENSOR, default=[]):
vol.All(cv.ensure_list, [
vol.All({
vol.Required(CONF_XPATH): cv.string,
vol.Required(CONF_NODE): cv.string,
vol.Optional(CONF_TYPE): cv.string,
vol.Optional(CONF_INVERTING, default=False): cv.boolean,
})
]),
vol.Optional(CONF_LIGHT, default=[]):
vol.All(cv.ensure_list, [
vol.All({
vol.Required(CONF_XPATH): cv.string,
vol.Required(CONF_NODE): cv.string,
vol.Optional(CONF_DIMMABLE, default=False): cv.boolean,
})
]),
vol.Optional(CONF_SENSOR, default=[]):
vol.All(cv.ensure_list, [
vol.All({
vol.Required(CONF_XPATH): cv.string,
vol.Required(CONF_NODE): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT,
default=TEMP_CELSIUS): cv.string,
})
]),
vol.Optional(CONF_SWITCH, default=[]):
vol.All(cv.ensure_list, [
vol.All({
vol.Required(CONF_XPATH): cv.string,
vol.Required(CONF_NODE): cv.string,
})
]),
})
SET_RUNTIME_VALUE_BOOL_SCHEMA = vol.Schema({
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Required(ATTR_VALUE): cv.boolean
})
SET_RUNTIME_VALUE_INT_SCHEMA = vol.Schema({
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Required(ATTR_VALUE): int
})
SET_RUNTIME_VALUE_FLOAT_SCHEMA = vol.Schema({
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Required(ATTR_VALUE): vol.Coerce(float)
})
_LOGGER = logging.getLogger(__name__)
IHC_PLATFORMS = ('binary_sensor', 'light', 'sensor', 'switch')
def setup(hass, config):
"""Setup the IHC component."""
from ihcsdk.ihccontroller import IHCController
conf = config[DOMAIN]
url = conf[CONF_URL]
username = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
ihc_controller = IHCController(url, username, password)
if not ihc_controller.authenticate():
_LOGGER.error("Unable to authenticate on ihc controller.")
return False
if (conf[CONF_AUTOSETUP] and
not autosetup_ihc_products(hass, config, ihc_controller)):
return False
hass.data[IHC_DATA] = {
IHC_CONTROLLER: ihc_controller,
IHC_INFO: conf[CONF_INFO]}
setup_service_functions(hass, ihc_controller)
return True
def autosetup_ihc_products(hass: HomeAssistantType, config, ihc_controller):
"""Auto setup of IHC products from the ihc project file."""
project_xml = ihc_controller.get_project()
if not project_xml:
_LOGGER.error("Unable to read project from ihc controller.")
return False
project = xml.etree.ElementTree.fromstring(project_xml)
# if an auto setup file exist in the configuration it will override
yaml_path = hass.config.path(AUTO_SETUP_YAML)
if not os.path.isfile(yaml_path):
yaml_path = os.path.join(os.path.dirname(__file__), AUTO_SETUP_YAML)
yaml = load_yaml_config_file(yaml_path)
try:
auto_setup_conf = AUTO_SETUP_SCHEMA(yaml)
except vol.Invalid as exception:
_LOGGER.error("Invalid IHC auto setup data: %s", exception)
return False
groups = project.findall('.//group')
for component in IHC_PLATFORMS:
component_setup = auto_setup_conf[component]
discovery_info = get_discovery_info(component_setup, groups)
if discovery_info:
discovery.load_platform(hass, component, DOMAIN, discovery_info,
config)
return True
def get_discovery_info(component_setup, groups):
"""Get discovery info for specified component."""
discovery_data = {}
for group in groups:
groupname = group.attrib['name']
for product_cfg in component_setup:
products = group.findall(product_cfg[CONF_XPATH])
for product in products:
nodes = product.findall(product_cfg[CONF_NODE])
for node in nodes:
if ('setting' in node.attrib
and node.attrib['setting'] == 'yes'):
continue
ihc_id = int(node.attrib['id'].strip('_'), 0)
name = '{}_{}'.format(groupname, ihc_id)
device = {
'ihc_id': ihc_id,
'product': product,
'product_cfg': product_cfg}
discovery_data[name] = device
return discovery_data
def setup_service_functions(hass: HomeAssistantType, ihc_controller):
"""Setup the ihc service functions."""
def set_runtime_value_bool(call):
"""Set a IHC runtime bool value service function."""
ihc_id = call.data[ATTR_IHC_ID]
value = call.data[ATTR_VALUE]
ihc_controller.set_runtime_value_bool(ihc_id, value)
def set_runtime_value_int(call):
"""Set a IHC runtime integer value service function."""
ihc_id = call.data[ATTR_IHC_ID]
value = call.data[ATTR_VALUE]
ihc_controller.set_runtime_value_int(ihc_id, value)
def set_runtime_value_float(call):
"""Set a IHC runtime float value service function."""
ihc_id = call.data[ATTR_IHC_ID]
value = call.data[ATTR_VALUE]
ihc_controller.set_runtime_value_float(ihc_id, value)
hass.services.register(DOMAIN, SERVICE_SET_RUNTIME_VALUE_BOOL,
set_runtime_value_bool,
schema=SET_RUNTIME_VALUE_BOOL_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_RUNTIME_VALUE_INT,
set_runtime_value_int,
schema=SET_RUNTIME_VALUE_INT_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_RUNTIME_VALUE_FLOAT,
set_runtime_value_float,
schema=SET_RUNTIME_VALUE_FLOAT_SCHEMA)
def validate_name(config):
"""Validate device name."""
if CONF_NAME in config:
return config
ihcid = config[CONF_ID]
name = 'ihc_{}'.format(ihcid)
config[CONF_NAME] = name
return config
|
the-stack_106_14117
|
import os
import sys
import subprocess
sys.path.append('/home/cyakaboski/src/python/projects/bkb-pathway-provider/core')
from query import Query
DRIVER_PATH = '/home/cyakaboski/src/python/projects/bkb-pathway-provider/core'
#-- Setup Query
for i in range(2):
query0 = Query(name='query{}'.format(i),
evidence=dict(),
targets=list(),
meta_evidence=[('Age_of_Diagnosis', '<=', 9706)],
meta_targets=[('Survival_Time', '>=', 943)])
#-- Save the query.
pickle_file, json_file = query0.save(os.getcwd())
#-- Build system command
command = ['python3', os.path.join(DRIVER_PATH, 'driver.py'),
'--config_file', os.path.join(DRIVER_PATH, 'driver.config'),
'--headless',
'--query_file', pickle_file,
'--save_dir', os.getcwd()]
subprocess.run(command)
|
the-stack_106_14119
|
'''
Function:
Implementation of LRASPPNet
Author:
Zhenchao Jin
'''
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...backbones import *
from ..base import BaseModel
'''LRASPPNet'''
class LRASPPNet(BaseModel):
def __init__(self, cfg, **kwargs):
super(LRASPPNet, self).__init__(cfg, **kwargs)
align_corners, norm_cfg, act_cfg = self.align_corners, self.norm_cfg, self.act_cfg
# build aspp
aspp_cfg = cfg['aspp']
self.branch_convs, self.branch_ups = nn.Sequential(), nn.Sequential()
for idx, branch_channels in enumerate(aspp_cfg['branch_channels_list']):
self.branch_convs.add_module(
f'conv{idx}',
nn.Conv2d(aspp_cfg['in_channels_list'][idx], branch_channels, kernel_size=1, stride=1, padding=0, bias=False)
)
self.branch_ups.add_module(
f'conv{idx}',
nn.Sequential(
nn.Conv2d(aspp_cfg['out_channels'] + branch_channels, aspp_cfg['out_channels'], kernel_size=1, stride=1, padding=0, bias=False),
BuildNormalization(norm_cfg['type'], (aspp_cfg['out_channels'], norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
)
)
self.aspp_conv = nn.Sequential(
nn.Conv2d(aspp_cfg['in_channels_list'][-1], aspp_cfg['out_channels'], kernel_size=1, stride=1, padding=0, bias=False),
BuildNormalization(norm_cfg['type'], (aspp_cfg['out_channels'], norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
)
self.image_pool = nn.Sequential(
nn.AvgPool2d(kernel_size=49, stride=(16, 20)),
nn.Conv2d(aspp_cfg['in_channels_list'][-1], aspp_cfg['out_channels'], kernel_size=1, stride=1, padding=0, bias=False),
BuildNormalization(norm_cfg['type'], (aspp_cfg['out_channels'], norm_cfg['opts'])),
BuildActivation('sigmoid', **{}),
)
self.bottleneck = nn.Conv2d(aspp_cfg['out_channels'], aspp_cfg['out_channels'], kernel_size=1, stride=1, padding=0, bias=False)
# build decoder
decoder_cfg = cfg['decoder']
self.decoder = nn.Sequential(
nn.Dropout2d(decoder_cfg['dropout']),
nn.Conv2d(decoder_cfg['in_channels'], cfg['num_classes'], kernel_size=1, stride=1, padding=0)
)
# freeze normalization layer if necessary
if cfg.get('is_freeze_norm', False): self.freezenormalization()
'''forward'''
def forward(self, x, targets=None, losses_cfg=None):
h, w = x.size(2), x.size(3)
# feed to backbone network
outputs = self.transforminputs(self.backbone_net(x), selected_indices=self.cfg['backbone'].get('selected_indices'))
# feed to aspp
feats = self.aspp_conv(outputs[-1]) * F.interpolate(self.image_pool(outputs[-1]), size=outputs[-1].size()[2:], mode='bilinear', align_corners=self.align_corners)
feats = self.bottleneck(feats)
for idx in range(len(self.cfg['aspp']['branch_channels_list']) - 1, -1, -1):
feats = F.interpolate(feats, size=outputs[idx].size()[2:], mode='bilinear', align_corners=self.align_corners)
feats = torch.cat([feats, self.branch_convs[idx](outputs[idx])], dim=1)
feats = self.branch_ups[idx](feats)
# feed to decoder
preds = self.decoder(feats)
# feed to auxiliary decoder and return according to the mode
if self.mode == 'TRAIN':
preds = F.interpolate(preds, size=(h, w), mode='bilinear', align_corners=self.align_corners)
return self.calculatelosses(
predictions={'loss_cls': preds},
targets=targets,
losses_cfg=losses_cfg
)
return preds
'''return all layers'''
def alllayers(self):
return {
'backbone_net': self.backbone_net,
'branch_convs': self.branch_convs,
'branch_ups': self.branch_ups,
'aspp_conv': self.aspp_conv,
'image_pool': self.image_pool,
'bottleneck': self.bottleneck,
'decoder': self.decoder,
}
|
the-stack_106_14120
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import pkgutil
import importlib.util
import time
import threading
from typing import NamedTuple, Any, Union, TYPE_CHECKING, Optional
from .i18n import _
from .util import (profiler, DaemonThread, UserCancelled, ThreadJob)
from . import bip32
from . import plugins
from .simple_config import SimpleConfig
from .logging import get_logger, Logger
if TYPE_CHECKING:
from .plugins.hw_wallet import HW_PluginBase
_logger = get_logger(__name__)
plugin_loaders = {}
hook_names = set()
hooks = {}
class Plugins(DaemonThread):
LOGGING_SHORTCUT = 'p'
@profiler
def __init__(self, config: SimpleConfig, gui_name):
DaemonThread.__init__(self)
self.setName('Plugins')
self.pkgpath = os.path.dirname(plugins.__file__)
self.config = config
self.hw_wallets = {}
self.plugins = {}
self.gui_name = gui_name
self.descriptions = {}
self.device_manager = DeviceMgr(config)
self.load_plugins()
self.add_jobs(self.device_manager.thread_jobs())
self.start()
def load_plugins(self):
for loader, name, ispkg in pkgutil.iter_modules([self.pkgpath]):
full_name = f'lynx_code.plugins.{name}'
spec = importlib.util.find_spec(full_name)
if spec is None: # pkgutil found it but importlib can't ?!
raise Exception(f"Error pre-loading {full_name}: no spec")
try:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
except Exception as e:
raise Exception(f"Error pre-loading {full_name}: {repr(e)}") from e
d = module.__dict__
gui_good = self.gui_name in d.get('available_for', [])
if not gui_good:
continue
details = d.get('registers_wallet_type')
if details:
self.register_wallet_type(name, gui_good, details)
details = d.get('registers_keystore')
if details:
self.register_keystore(name, gui_good, details)
self.descriptions[name] = d
if not d.get('requires_wallet_type') and self.config.get('use_' + name):
try:
self.load_plugin(name)
except BaseException as e:
self.logger.exception(f"cannot initialize plugin {name}: {e}")
def get(self, name):
return self.plugins.get(name)
def count(self):
return len(self.plugins)
def load_plugin(self, name):
if name in self.plugins:
return self.plugins[name]
full_name = f'lynx.plugins.{name}.{self.gui_name}'
spec = importlib.util.find_spec(full_name)
if spec is None:
raise RuntimeError("%s implementation for %s plugin not found"
% (self.gui_name, name))
try:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
plugin = module.Plugin(self, self.config, name)
except Exception as e:
raise Exception(f"Error loading {name} plugin: {repr(e)}") from e
self.add_jobs(plugin.thread_jobs())
self.plugins[name] = plugin
self.logger.info(f"loaded {name}")
return plugin
def close_plugin(self, plugin):
self.remove_jobs(plugin.thread_jobs())
def enable(self, name):
self.config.set_key('use_' + name, True, True)
p = self.get(name)
if p:
return p
return self.load_plugin(name)
def disable(self, name):
self.config.set_key('use_' + name, False, True)
p = self.get(name)
if not p:
return
self.plugins.pop(name)
p.close()
self.logger.info(f"closed {name}")
def toggle(self, name):
p = self.get(name)
return self.disable(name) if p else self.enable(name)
def is_available(self, name, w):
d = self.descriptions.get(name)
if not d:
return False
deps = d.get('requires', [])
for dep, s in deps:
try:
__import__(dep)
except ImportError as e:
self.logger.warning(f'Plugin {name} unavailable: {repr(e)}')
return False
requires = d.get('requires_wallet_type', [])
return not requires or w.wallet_type in requires
def get_hardware_support(self):
out = []
for name, (gui_good, details) in self.hw_wallets.items():
if gui_good:
try:
p = self.get_plugin(name)
if p.is_enabled():
out.append(HardwarePluginToScan(name=name,
description=details[2],
plugin=p,
exception=None))
except Exception as e:
self.logger.exception(f"cannot load plugin for: {name}")
out.append(HardwarePluginToScan(name=name,
description=details[2],
plugin=None,
exception=e))
return out
def register_wallet_type(self, name, gui_good, wallet_type):
from .wallet import register_wallet_type, register_constructor
self.logger.info(f"registering wallet type {(wallet_type, name)}")
def loader():
plugin = self.get_plugin(name)
register_constructor(wallet_type, plugin.wallet_class)
register_wallet_type(wallet_type)
plugin_loaders[wallet_type] = loader
def register_keystore(self, name, gui_good, details):
from .keystore import register_keystore
def dynamic_constructor(d):
return self.get_plugin(name).keystore_class(d)
if details[0] == 'hardware':
self.hw_wallets[name] = (gui_good, details)
self.logger.info(f"registering hardware {name}: {details}")
register_keystore(details[1], dynamic_constructor)
def get_plugin(self, name):
if not name in self.plugins:
self.load_plugin(name)
return self.plugins[name]
def run(self):
while self.is_running():
time.sleep(0.1)
self.run_jobs()
self.on_stop()
def hook(func):
hook_names.add(func.__name__)
return func
def run_hook(name, *args):
results = []
f_list = hooks.get(name, [])
for p, f in f_list:
if p.is_enabled():
try:
r = f(*args)
except Exception:
_logger.exception(f"Plugin error. plugin: {p}, hook: {name}")
r = False
if r:
results.append(r)
if results:
assert len(results) == 1, results
return results[0]
class BasePlugin(Logger):
def __init__(self, parent, config, name):
self.parent = parent # The plugins object
self.name = name
self.config = config
self.wallet = None
Logger.__init__(self)
# add self to hooks
for k in dir(self):
if k in hook_names:
l = hooks.get(k, [])
l.append((self, getattr(self, k)))
hooks[k] = l
def __str__(self):
return self.name
def close(self):
# remove self from hooks
for attr_name in dir(self):
if attr_name in hook_names:
# found attribute in self that is also the name of a hook
l = hooks.get(attr_name, [])
try:
l.remove((self, getattr(self, attr_name)))
except ValueError:
# maybe attr name just collided with hook name and was not hook
continue
hooks[attr_name] = l
self.parent.close_plugin(self)
self.on_close()
def on_close(self):
pass
def requires_settings(self):
return False
def thread_jobs(self):
return []
def is_enabled(self):
return self.is_available() and self.config.get('use_'+self.name) is True
def is_available(self):
return True
def can_user_disable(self):
return True
def settings_dialog(self):
pass
class DeviceNotFoundError(Exception): pass
class DeviceUnpairableError(Exception): pass
class Device(NamedTuple):
path: Union[str, bytes]
interface_number: int
id_: str
product_key: Any # when using hid, often Tuple[int, int]
usage_page: int
transport_ui_string: str
class DeviceInfo(NamedTuple):
device: Device
label: Optional[str] = None
initialized: Optional[bool] = None
exception: Optional[Exception] = None
class HardwarePluginToScan(NamedTuple):
name: str
description: str
plugin: Optional['HW_PluginBase']
exception: Optional[Exception]
class DeviceMgr(ThreadJob):
'''Manages hardware clients. A client communicates over a hardware
channel with the device.
In addition to tracking device HID IDs, the device manager tracks
hardware wallets and manages wallet pairing. A HID ID may be
paired with a wallet when it is confirmed that the hardware device
matches the wallet, i.e. they have the same master public key. A
HID ID can be unpaired if e.g. it is wiped.
Because of hotplugging, a wallet must request its client
dynamically each time it is required, rather than caching it
itself.
The device manager is shared across plugins, so just one place
does hardware scans when needed. By tracking HID IDs, if a device
is plugged into a different port the wallet is automatically
re-paired.
Wallets are informed on connect / disconnect events. It must
implement connected(), disconnected() callbacks. Being connected
implies a pairing. Callbacks can happen in any thread context,
and we do them without holding the lock.
Confusingly, the HID ID (serial number) reported by the HID system
doesn't match the device ID reported by the device itself. We use
the HID IDs.
This plugin is thread-safe. Currently only devices supported by
hidapi are implemented.'''
def __init__(self, config):
ThreadJob.__init__(self)
# Keyed by xpub. The value is the device id
# has been paired, and None otherwise.
self.xpub_ids = {}
# A list of clients. The key is the client, the value is
# a (path, id_) pair.
self.clients = {}
# What we recognise. Each entry is a (vendor_id, product_id)
# pair.
self.recognised_hardware = set()
# Custom enumerate functions for devices we don't know about.
self.enumerate_func = set()
# For synchronization
self.lock = threading.RLock()
self.hid_lock = threading.RLock()
self.config = config
def thread_jobs(self):
# Thread job to handle device timeouts
return [self]
def run(self):
'''Handle device timeouts. Runs in the context of the Plugins
thread.'''
with self.lock:
clients = list(self.clients.keys())
cutoff = time.time() - self.config.get_session_timeout()
for client in clients:
client.timeout(cutoff)
def register_devices(self, device_pairs):
for pair in device_pairs:
self.recognised_hardware.add(pair)
def register_enumerate_func(self, func):
self.enumerate_func.add(func)
def create_client(self, device, handler, plugin):
# Get from cache first
client = self.client_lookup(device.id_)
if client:
return client
client = plugin.create_client(device, handler)
if client:
self.logger.info(f"Registering {client}")
with self.lock:
self.clients[client] = (device.path, device.id_)
return client
def xpub_id(self, xpub):
with self.lock:
return self.xpub_ids.get(xpub)
def xpub_by_id(self, id_):
with self.lock:
for xpub, xpub_id in self.xpub_ids.items():
if xpub_id == id_:
return xpub
return None
def unpair_xpub(self, xpub):
with self.lock:
if not xpub in self.xpub_ids:
return
_id = self.xpub_ids.pop(xpub)
self._close_client(_id)
def unpair_id(self, id_):
xpub = self.xpub_by_id(id_)
if xpub:
self.unpair_xpub(xpub)
else:
self._close_client(id_)
def _close_client(self, id_):
client = self.client_lookup(id_)
self.clients.pop(client, None)
if client:
client.close()
def pair_xpub(self, xpub, id_):
with self.lock:
self.xpub_ids[xpub] = id_
def client_lookup(self, id_):
with self.lock:
for client, (path, client_id) in self.clients.items():
if client_id == id_:
return client
return None
def client_by_id(self, id_):
'''Returns a client for the device ID if one is registered. If
a device is wiped or in bootloader mode pairing is impossible;
in such cases we communicate by device ID and not wallet.'''
self.scan_devices()
return self.client_lookup(id_)
def client_for_keystore(self, plugin, handler, keystore, force_pair):
self.logger.info("getting client for keystore")
if handler is None:
raise Exception(_("Handler not found for") + ' ' + plugin.name + '\n' + _("A library is probably missing."))
handler.update_status(False)
devices = self.scan_devices()
xpub = keystore.xpub
derivation = keystore.get_derivation()
client = self.client_by_xpub(plugin, xpub, handler, devices)
if client is None and force_pair:
info = self.select_device(plugin, handler, keystore, devices)
client = self.force_pair_xpub(plugin, handler, info, xpub, derivation, devices)
if client:
handler.update_status(True)
self.logger.info("end client for keystore")
return client
def client_by_xpub(self, plugin, xpub, handler, devices):
_id = self.xpub_id(xpub)
client = self.client_lookup(_id)
if client:
# An unpaired client might have another wallet's handler
# from a prior scan. Replace to fix dialog parenting.
client.handler = handler
return client
for device in devices:
if device.id_ == _id:
return self.create_client(device, handler, plugin)
def force_pair_xpub(self, plugin, handler, info, xpub, derivation, devices):
# The wallet has not been previously paired, so let the user
# choose an unpaired device and compare its first address.
xtype = bip32.xpub_type(xpub)
client = self.client_lookup(info.device.id_)
if client and client.is_pairable():
# See comment above for same code
client.handler = handler
# This will trigger a PIN/passphrase entry request
try:
client_xpub = client.get_xpub(derivation, xtype)
except (UserCancelled, RuntimeError):
# Bad / cancelled PIN / passphrase
client_xpub = None
if client_xpub == xpub:
self.pair_xpub(xpub, info.device.id_)
return client
# The user input has wrong PIN or passphrase, or cancelled input,
# or it is not pairable
raise DeviceUnpairableError(
_('lynx cannot pair with your {}.\n\n'
'Before you request AUDAX coins to be sent to addresses in this '
'wallet, ensure you can pair with your device, or that you have '
'its seed (and passphrase, if any). Otherwise all coins you '
'receive will be unspendable.').format(plugin.device))
def unpaired_device_infos(self, handler, plugin: 'HW_PluginBase', devices=None,
include_failing_clients=False):
'''Returns a list of DeviceInfo objects: one for each connected,
unpaired device accepted by the plugin.'''
if not plugin.libraries_available:
message = plugin.get_library_not_available_message()
raise Exception(message)
if devices is None:
devices = self.scan_devices()
devices = [dev for dev in devices if not self.xpub_by_id(dev.id_)]
infos = []
for device in devices:
if device.product_key not in plugin.DEVICE_IDS:
continue
try:
client = self.create_client(device, handler, plugin)
except Exception as e:
self.logger.error(f'failed to create client for {plugin.name} at {device.path}: {repr(e)}')
if include_failing_clients:
infos.append(DeviceInfo(device=device, exception=e))
continue
if not client:
continue
infos.append(DeviceInfo(device=device,
label=client.label(),
initialized=client.is_initialized()))
return infos
def select_device(self, plugin, handler, keystore, devices=None):
'''Ask the user to select a device to use if there is more than one,
and return the DeviceInfo for the device.'''
while True:
infos = self.unpaired_device_infos(handler, plugin, devices)
if infos:
break
msg = _('Please insert your {}').format(plugin.device)
if keystore.label:
msg += ' ({})'.format(keystore.label)
msg += '. {}\n\n{}'.format(
_('Verify the cable is connected and that '
'no other application is using it.'),
_('Try to connect again?')
)
if not handler.yes_no_question(msg):
raise UserCancelled()
devices = None
if len(infos) == 1:
return infos[0]
# select device by label
for info in infos:
if info.label == keystore.label:
return info
msg = _("Please select which {} device to use:").format(plugin.device)
descriptions = [str(info.label) + ' (%s)'%(_("initialized") if info.initialized else _("wiped")) for info in infos]
c = handler.query_choice(msg, descriptions)
if c is None:
raise UserCancelled()
info = infos[c]
# save new label
keystore.set_label(info.label)
if handler.win.wallet is not None:
handler.win.wallet.save_keystore()
return info
def _scan_devices_with_hid(self):
try:
import hid
except ImportError:
return []
with self.hid_lock:
hid_list = hid.enumerate(0, 0)
devices = []
for d in hid_list:
product_key = (d['vendor_id'], d['product_id'])
if product_key in self.recognised_hardware:
# Older versions of hid don't provide interface_number
interface_number = d.get('interface_number', -1)
usage_page = d['usage_page']
id_ = d['serial_number']
if len(id_) == 0:
id_ = str(d['path'])
id_ += str(interface_number) + str(usage_page)
devices.append(Device(path=d['path'],
interface_number=interface_number,
id_=id_,
product_key=product_key,
usage_page=usage_page,
transport_ui_string='hid'))
return devices
def scan_devices(self):
self.logger.info("scanning devices...")
# First see what's connected that we know about
devices = self._scan_devices_with_hid()
# Let plugin handlers enumerate devices we don't know about
for f in self.enumerate_func:
try:
new_devices = f()
except BaseException as e:
self.logger.error('custom device enum failed. func {}, error {}'
.format(str(f), str(e)))
else:
devices.extend(new_devices)
# find out what was disconnected
pairs = [(dev.path, dev.id_) for dev in devices]
disconnected_ids = []
with self.lock:
connected = {}
for client, pair in self.clients.items():
if pair in pairs and client.has_usable_connection_with_device():
connected[client] = pair
else:
disconnected_ids.append(pair[1])
self.clients = connected
# Unpair disconnected devices
for id_ in disconnected_ids:
self.unpair_id(id_)
return devices
|
the-stack_106_14121
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from xml.sax.handler import ContentHandler,EntityResolver
from cairis.core.AssetParameters import AssetParameters
import cairis.core.AssetParametersFactory
from cairis.core.AttackerParameters import AttackerParameters
from cairis.core.AttackerEnvironmentProperties import AttackerEnvironmentProperties
from cairis.core.VulnerabilityParameters import VulnerabilityParameters
from cairis.core.VulnerabilityEnvironmentProperties import VulnerabilityEnvironmentProperties
from cairis.core.ThreatParameters import ThreatParameters
from cairis.core.ThreatEnvironmentProperties import ThreatEnvironmentProperties
from cairis.core.MisuseCaseEnvironmentProperties import MisuseCaseEnvironmentProperties
from cairis.core.MisuseCase import MisuseCase
from cairis.core.RiskParameters import RiskParameters
from cairis.core.TemplateObstacleParameters import TemplateObstacleParameters
from cairis.core.ObstacleParameters import ObstacleParameters
from cairis.core.ObstacleEnvironmentProperties import ObstacleEnvironmentProperties
from cairis.core.GoalAssociationParameters import GoalAssociationParameters
from cairis.core.Borg import Borg
__author__ = 'Shamal Faily'
def a2i(spLabel):
if spLabel == 'Low':
return 1
elif spLabel == 'Medium':
return 2
elif spLabel == 'High':
return 3
else:
return 0
def it2Id(itLabel):
if itLabel == 'required':
return 1
else:
return 0
class AttackPatternContentHandler(ContentHandler,EntityResolver):
def __init__(self,session_id = None):
self.thePatternName = ''
self.theLikelihood = ''
self.theSeverity = ''
self.theObstacles = []
self.theObstacleAssociations = []
self.inIntent = 0
self.theIntent = ''
self.theMotivations = []
self.theEnvironment = ''
self.theAttack = ''
self.theExploit = ''
self.theAttackObstacle = ''
self.theExploitObstacle = ''
self.theParticipants = []
self.theTargets = []
self.theExploits = []
self.inConsequences = 0
self.theConsequences = ''
self.inImplementation = 0
self.theImplementation = ''
self.inKnownUses = 0
self.theKnownUses = ''
self.inRelatedPatterns = 0
self.theRelatedPatterns = ''
b = Borg()
self.configDir = b.configDir
self.dbProxy = b.get_dbproxy(session_id)
self.theObstacleParameters = []
self.theObstacleAssociationParameters = []
self.theAssetParameters = []
self.theAttackerParameters = []
self.theVulnerabilityParameters = None
self.theThreatParameters = None
self.theRiskParameters = None
self.resetObstacleElements()
self.resetObstacleAssociationElements()
self.resetMotivationElements()
self.resetParticipantElements()
def obstacles(self): return self.theObstacleParameters
def obstacleAssociations(self): return self.theObstacleAssociationParameters
def assets(self): return self.theAssetParameters
def attackers(self): return self.theAttackerParameters
def vulnerability(self): return self.theVulnerabilityParameters
def threat(self): return self.theThreatParameters
def risk(self): return self.theRiskParameters
def resetObstacleElements(self):
self.theObstacleName = ''
self.theObstacleCategory = ''
self.inDefinition = 0
self.theDefinition = ''
self.theConcerns = []
self.theResponsibilities = []
self.resetProbabilityElements()
def resetProbabilityElements(self):
self.theProbability = 0.0
self.inRationale = 0
self.theRationale = ''
def resetObstacleAssociationElements(self):
self.theObstacleName = ''
self.theRefType = ''
self.theSubObstacleName = ''
self.inRationale = 0
self.theRationale = ''
def resetMotivationElements(self):
self.theGoal = ''
self.theValue = 'None'
self.inDescription = 0
self.theDescription = ''
def resetParticipantElements(self):
self.theParticipant = ''
self.theMotives = []
self.theResponsibilities = []
def resolveEntity(self,publicId,systemId):
return systemId
def startElement(self,name,attrs):
if (name == 'attack_pattern'):
self.thePatternName = attrs['name']
self.theLikelihood = attrs['likelihood']
self.theSeverity = attrs['severity']
elif (name == 'obstacle'):
self.theObstacleName = attrs['name']
self.theObstacleCategory = attrs['category'].replace('_',' ')
elif (name == 'probability'):
self.theProbability = attrs['value']
elif (name == 'obstacle_association'):
self.theObstacleName = attrs['obstacle_name']
self.theSubObstacleName = attrs['subobstacle_name']
self.theRefType = attrs['ref_type']
elif (name == 'definition'):
self.inDefinition = 1
self.theDefinition = ''
elif (name == 'rationale'):
self.inRationale = 1
self.theRationale = ''
elif (name == 'intent'):
self.inIntent = 1
self.theIntent = ''
elif (name == 'motivation'):
self.theGoal = attrs['goal']
self.theValue = attrs['value']
elif (name == 'description'):
self.inDescription = 1
self.theDescription = ''
if self.inImplementation:
self.theImplementation = ''
elif (name == 'applicability'):
self.theEnvironment = attrs['environment']
elif (name == 'structure'):
self.theAttack = attrs['attack']
self.theExploit = attrs['exploit']
try:
self.theAttackObstacle = attrs['attack_obstacle']
except KeyError:
self.theAttackObstacle = ''
try:
self.theExploitObstacle = attrs['exploit_obstacle']
except KeyError:
self.theExploitObstacle = ''
elif (name == 'participant'):
self.theParticipant = attrs['name']
elif (name == 'motive'):
self.theMotives.append(attrs['name'])
elif (name == 'capability'):
self.theResponsibilities.append((attrs['name'],attrs['value']))
elif (name == 'target'):
self.theTargets.append(attrs['name'])
elif (name == 'exploit'):
self.theExploits.append(attrs['name'])
elif (name == 'consequences'):
self.inConsequences = 1
self.theConsequences = ''
elif name == 'implementation':
self.inImplementation = 1
self.theImplementation = ''
elif name == 'known_uses':
self.inKnownUses = 1
self.theKnownUses = ''
elif name == 'related_patterns':
self.inRelatedPatterns = 1
self.theRelatedPatterns = ''
elif name == 'concern':
self.theConcerns.append(attrs['name'])
elif name == 'responsibility':
self.theResponsibilities.append(attrs['name'])
def characters(self,data):
if self.inDefinition:
self.theDefinition += data
elif self.inRationale:
self.theRationale += data
elif self.inIntent:
self.theIntent += data
elif self.inDescription and self.inImplementation:
self.theImplementation += data
elif self.inDescription:
self.theDescription += data
elif self.inConsequences:
self.theConsequences += data
elif self.inImplementation:
self.theImplementation += data
elif self.inKnownUses:
self.theKnownUses += data
elif self.inRelatedPatterns:
self.theRelatedPatterns += data
def endElement(self,name):
if name == 'intent':
self.inIntent = 0
elif name == 'definition':
self.inDefinition = 0
elif name == 'rationale':
self.inRationale = 0
elif name == 'motivation':
self.theMotivations.append((self.theGoal,self.theValue,self.theDescription))
self.resetMotivationElements()
elif name == 'participant':
self.theParticipants.append((self.theParticipant,self.theMotives,self.theResponsibilities))
self.resetParticipantElements()
elif name == 'description':
self.inDescription = 0
if self.inImplementation:
self.inImplementation = 0
elif name == 'consequences':
self.inConsequences = 0
elif name == 'implementation':
self.inImplementation = 0
elif name == 'known_uses':
self.inKnownUses = 0
elif name == 'related_patterns':
self.inRelatedPatterns = 0
elif name == 'obstacle':
self.theObstacles.append( TemplateObstacleParameters(self.theObstacleName,self.theObstacleCategory,self.theDefinition,self.theConcerns,self.theResponsibilities,self.theProbability,self.theRationale))
self.resetObstacleElements()
elif name == 'obstacle_association':
self.theObstacleAssociations.append((self.theObstacleName,self.theRefType,self.theSubObstacleName,self.theRationale))
self.resetObstacleAssociationElements()
elif name == 'attack_pattern':
assetList = self.theTargets + self.theExploits
for assetName in assetList:
self.theAssetParameters.append(cairis.core.AssetParametersFactory.buildFromTemplate(assetName,[self.theEnvironment]))
attackerNames = []
for attackerName,attackerMotives,attackerCapabilities in self.theParticipants:
attackerRoles = self.dbProxy.dimensionRoles(self.dbProxy.getDimensionId(attackerName,'persona'),self.dbProxy.getDimensionId(self.theEnvironment,'environment'),'persona')
ep = AttackerEnvironmentProperties(self.theEnvironment,attackerRoles,attackerMotives,attackerCapabilities)
p = AttackerParameters(attackerName,'','',[],[ep])
p.isPersona = True
self.theAttackerParameters.append(p)
attackerNames.append(attackerName)
for tObs in self.theObstacles:
sgRefs = []
for resp in tObs.responsibilities():
sgRefs.append((resp,'role','responsible',0,'None'))
ep = ObstacleEnvironmentProperties(self.theEnvironment,'',tObs.definition(),tObs.category(),[],sgRefs,tObs.concerns())
ep.theProbability = tObs.probability()
ep.theProbabilityRationale = tObs.probabilityRationale()
self.theObstacleParameters.append(ObstacleParameters(tObs.name(),self.thePatternName,[],[ep]))
for obsAssoc in self.theObstacleAssociations:
obsName = obsAssoc[0]
refType = obsAssoc[1]
subObsName = obsAssoc[2]
assocRationale = obsAssoc[3]
self.theObstacleAssociationParameters.append(GoalAssociationParameters(self.theEnvironment,obsName,'obstacle',refType,subObsName,'obstacle',0,assocRationale))
vp = VulnerabilityEnvironmentProperties(self.theEnvironment,self.theSeverity,self.theExploits)
vulRows = self.dbProxy.getVulnerabilityDirectory(self.theExploit)
vulData = vulRows[0]
self.theVulnerabilityParameters = VulnerabilityParameters(self.theExploit,vulData[2],vulData[3],[],[vp])
spDict = {}
spDict['confidentiality'] = (0,'None')
spDict['integrity'] = (0,'None')
spDict['availability'] = (0,'None')
spDict['accountability'] = (0,'None')
spDict['anonymity'] = (0,'None')
spDict['pseudonymity'] = (0,'None')
spDict['unlinkability'] = (0,'None')
spDict['unobservability'] = (0,'None')
for thrMotivation in self.theMotivations:
spName = thrMotivation[0]
spValue = thrMotivation[1]
spRationale = thrMotivation[2]
spDict[spName] = (a2i(spValue),spRationale)
cProperty,cRationale = spDict['confidentiality']
iProperty,iRationale = spDict['integrity']
avProperty,avRationale = spDict['availability']
acProperty,acRationale = spDict['accountability']
anProperty,anRationale = spDict['anonymity']
panProperty,panRationale = spDict['pseudonymity']
unlProperty,unlRationale = spDict['unlinkability']
unoProperty,unoRationale = spDict['unobservability']
tp = ThreatEnvironmentProperties(self.theEnvironment,self.theLikelihood,self.theTargets,attackerNames,[cProperty,iProperty,avProperty,acProperty,anProperty,panProperty,unlProperty,unoProperty],[cRationale,iRationale,avRationale,acRationale,anRationale,panRationale,unlRationale,unoRationale])
thrRows = self.dbProxy.getThreatDirectory(self.theAttack)
thrData = thrRows[0]
self.theThreatParameters = ThreatParameters(self.theAttack,thrData[3],thrData[2],[],[tp])
if (self.theAttackObstacle != ''):
self.theObstacleAssociationParameters.append(GoalAssociationParameters(self.theEnvironment,self.theAttackObstacle,'obstacle','or',self.theAttack,'threat',0,'None'))
if (self.theExploitObstacle != ''):
self.theObstacleAssociationParameters.append(GoalAssociationParameters(self.theEnvironment,self.theExploitObstacle,'obstacle','or',self.theExploit,'vulnerability',0,'None'))
rep = MisuseCaseEnvironmentProperties(self.theEnvironment,self.theImplementation )
mc = MisuseCase(-1,'Exploit ' + self.thePatternName,[rep],self.thePatternName)
self.theRiskParameters = RiskParameters(self.thePatternName,self.theAttack,self.theExploit,mc,[],self.theIntent,self.theEnvironment)
|
the-stack_106_14122
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from arch.api.utils import log_utils
from federatedml.param.base_param import BaseParam
LOGGER = log_utils.getLogger()
class OneHotEncoderParam(BaseParam):
"""
Parameters
----------
transform_col_indexes: list or int, default: -1
Specify which columns need to calculated. -1 represent for all columns.
transform_col_names : list of string, default: []
Specify which columns need to calculated. Each element in the list represent for a column name in header.
need_run: bool, default True
Indicate if this module needed to be run
"""
def __init__(self, transform_col_indexes=-1, transform_col_names=None, need_run=True):
super(OneHotEncoderParam, self).__init__()
if transform_col_names is None:
transform_col_names = []
self.transform_col_indexes = transform_col_indexes
self.transform_col_names = transform_col_names
self.need_run = need_run
def check(self):
descr = "One-hot encoder param's"
self.check_defined_type(self.transform_col_indexes, descr, ['list', 'int', 'NoneType'])
self.check_defined_type(self.transform_col_names, descr, ['list', 'NoneType'])
return True
|
the-stack_106_14123
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 1 17:11:26 2020
@author: haolinl
"""
import copy
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import scipy.io # For extracting data from .mat file
import scipy.stats as st
import torch
import torch.nn as nn
import torch.utils.data
class Net1(nn.Module):
"""
MLP modeling hyperparams:
----------
Input: FM_num (FMs' displacements).
Hidden layers: Default architecture: 128 x 64. Optimization available.
Output: PC_num (weights generated from deformation's PCA).
"""
def __init__(self, FM_num, PC_num):
"""
Parameters:
----------
FM_num: Int.
The number of fiducial markers.
PC_num: Int.
The number of picked principal compoments.
"""
super(Net1, self).__init__()
self.FM_num = FM_num
self.PC_num = PC_num
self.hidden_1 = nn.Sequential(
nn.Linear(int(self.FM_num*3), 128),
nn.ReLU(),
# nn.Dropout(0.5)
)
self.hidden_2 = nn.Sequential(
nn.Linear(128, 64),
nn.ReLU(),
# nn.Dropout(0.5)
)
# self.hidden_3 = nn.Sequential(
# nn.Linear(128, 64),
# nn.ReLU(),
# # nn.Dropout(0.5)
# )
# self.hidden_4 = nn.Sequential(
# nn.Linear(64, 32),
# nn.ReLU(),
# # nn.Dropout(0.5)
# )
self.out_layer = nn.Linear(64, self.PC_num)
def forward(self, x):
"""
Forward mapping: FM displacements -> Principal weights.
Parameters:
----------
x: 2D Array.
Matrix of FM displacements of all DOFs.
Returns:
----------
output: 2D Array.
Matrix of principal weights.
"""
f1 = self.hidden_1(x)
f2 = self.hidden_2(f1)
# f3 = self.hidden_3(f2)
# f4 = self.hidden_4(f3)
output = self.out_layer(f2)
return output
def saveLog(lossList_train, lossList_valid, FM_num, PC_num, batch_size, learning_rate,
num_epochs, center_indices_list, elapsed_time, max_mean, mean_mean, write_path="train_valid_loss.log"):
"""
Save the training & validation loss, training parameters and testing performance into .log file.
Parameters:
----------
lossList_train: List.
The train loss of each epoch.
In exact order.
lossList_valid: List.
The valid loss of each epoch.
In exact order.
FM_num: Int.
Number of fiducial markers.
PC_num: Int.
Number of principal components.
batch_size: Int.
The size of one single training batch.
learning_rate: Float.
Learning rate.
num_epochs: Int.
The number of total training iterations.
center_indices_list: List.
Picked indices of all generated centers/FMs.
elapsed_time: Float.
The time spent for training and validation process.
Unit: s.
max_mean: Float.
The mean value of max nodal errors of all test samples.
Unit: mm.
max_mean: Float.
The mean value of mean nodal errors of all test samples.
Unit: mm.
write_path (optional): String.
The path of to-be-saved .log file.
Default: "train_valid_loss.log"
"""
content = ["FM_num = {}".format(FM_num),
"FM_indices (indexed from 0) = {}".format(list(np.sort(center_indices_list[0:FM_num]))),
"Center_indices_list (indexed from 0, exact order) = {}".format(list(center_indices_list)),
"PC_num = {}".format(PC_num),
"Batch_size = {}".format(str(batch_size)),
"Learning_rate = {}".format(str(learning_rate)),
"Num_epochs = {}".format(str(num_epochs)),
"----------------------------------------------------------",
"Epoch\tTraining loss\tValidation loss"]
for i in range(len(lossList_train)):
loss_string_temp = "%d\t%.8f\t%.8f" % (i, lossList_train[i], lossList_valid[i])
content.append(loss_string_temp)
content += ["----------------------------------------------------------",
"Elapsed_time = {} s".format(elapsed_time),
"\nTesting reconstruction performance parameters:",
"Max_mean = %.8f mm" % (max_mean),
"Mean_mean = %.8f mm" % (mean_mean)]
content = '\n'.join(content)
with open(write_path, 'w') as f: f.write(content)
def normalization(data):
"""
Normalize the input data (displacements) of each direction within the range of [0,1].
For unmatched displacement range along with different directions/each single feature.
Parameters:
----------
data: 2D Array.
Matrix of training/testing input data.
Returns:
----------
data_nor: 2D Array.
Matrix of the normalized data with the same shape as the input.
norm_params: 1D Array (6 x 1).
Containing "min" and "max" of each direction for reconstruction.
Row order: [x_min; x_max; y_min; y_max; z_min; z_max].
"""
data_nor, norm_params = np.zeros(data.shape), None
# Partition the data matrix into x,y,z_matrices
x_temp, y_temp, z_temp = data[::3,:], data[1::3,:], data[2::3,:]
x_max, x_min = np.max(x_temp), np.min(x_temp)
y_max, y_min = np.max(y_temp), np.min(y_temp)
z_max, z_min = np.max(z_temp), np.min(z_temp)
# Min-max normalization: [0,1]
x_temp = (x_temp - x_min) / (x_max - x_min)
y_temp = (y_temp - y_min) / (y_max - y_min)
z_temp = (z_temp - z_min) / (z_max - z_min)
data_nor[::3,:], data_nor[1::3,:], data_nor[2::3,:] = x_temp, y_temp, z_temp
norm_params = np.array([x_max, x_min, y_max, y_min, z_max, z_min]).astype(float).reshape(-1,1)
return data_nor, norm_params
def matrixShrink(data_matrix, fix_indices_list=[]):
"""
Remove rows of zero displacement (fixed DOFs).
Parameters:
----------
data_matrix: 2D Array.
Size: nDOF x SampleNum.
The full matrix of deformation data.
dix_indices_list (optional): List of ints.
The list of fixed indices.
Indexed from 1.
For nonlinear dataset, this list should be specified.
Default: [].
Returns:
----------
data_shrinked: 2D Array.
Size: nDOF' x SampleNum.
The matrix without zero rows.
nDOF: Int.
Number of all DOFs of original deformation matrix.
non_zero_indices_list: List.
All indices of non zero rows for deformation reconstruction.
In exact order.
"""
if fix_indices_list == []:
nDOF = data_matrix.shape[0]
zero_indices_list, non_zero_indices_list = [], []
for i in range(nDOF):
if data_matrix[i,0] == 0: zero_indices_list.append(i)
else: non_zero_indices_list.append(i)
data_shrinked = np.delete(data_matrix, zero_indices_list, axis=0)
else:
fix_indices_list = [item-1 for item in fix_indices_list] # Make the fixed nodes indexed from 0.
nDOF = data_matrix.shape[0]
zero_indices_list, non_zero_indices_list = [], []
for i in range(int(nDOF/3)): # Iterate within the range of node_num.
if i in fix_indices_list:
zero_indices_list.append(i*3)
zero_indices_list.append(i*3+1)
zero_indices_list.append(i*3+2)
else:
non_zero_indices_list.append(i*3)
non_zero_indices_list.append(i*3+1)
non_zero_indices_list.append(i*3+2)
data_shrinked = np.delete(data_matrix, zero_indices_list, axis=0)
return data_shrinked, nDOF, non_zero_indices_list
def zeroMean(data_matrix, training_ratio, mean_vect_input=[]):
"""
Shift the origin of new basis coordinate system to mean point of the data.
Parameters:
----------
data_matrix: 2D Array.
Size: nFeatures x nSamples.
training_ratio: float.
The ratio of training dataset.
mean_vect_input (optional): 1D List of floats.
The user input of mean vector of training dataset.
Default: [].
Returns:
----------
data_new: 2D Array with the same size as data_matrix.
Mean-shifted data.
mean_vect: 1D Array of float.
The mean value of each feature.
"""
if mean_vect_input == []:
training_index = int(np.ceil(data_matrix.shape[1] * training_ratio)) # Samples along with axis-1.
mean_vect = np.mean(data_matrix[:,0:training_index], axis=1) # Compute mean along with sample's axis.
else:
mean_vect = np.array(mean_vect_input).astype(float).reshape(-1,)
data_new = np.zeros(data_matrix.shape)
for i in range(data_matrix.shape[1]):
data_new[:,i] = data_matrix[:,i] - mean_vect
return data_new, mean_vect
def PCA(data_matrix, PC_num, training_ratio):
"""
Implement PCA on tumor's deformation covariance matrix (Encoder).
Parameters:
----------
data_matrix: 2D Array.
Size: nNodes*3 x SampleNum.
Each DOF is a feature. Mean-shifted.
PC_num: Int.
The number of picked PCs.
training_ratio: float.
The ratio of training dataset.
Returns:
----------
eigVect_full: 2D Array.
Size: nNodes*3 x nNodes*3.
All principal eigen-vectors.
eigVal_full: 1D Array.
Size: nNodes*3 x 1.
All principal eigen-values.
eigVect: 2D Array.
Size: nNodes*3 x PC_num.
Principal eigen-vectors.
eigVal: 1D Array.
Size: PC_num x 1.
Principal eigen-values.
weights: 2D Array (complex).
Size: PC_num x SampleNum.
Projected coordinates on each PC of all samples.
"""
# Compute covariance matrix & Eigendecompostion
training_index = int(np.ceil(data_matrix.shape[1] * training_ratio)) # Samples along with axis-1.
cov_matrix = data_matrix[:,0:training_index] @ np.transpose(data_matrix[:,0:training_index]) # Size: nDOF * nDOF
eigVal_full, eigVect_full = np.linalg.eig(cov_matrix)
# PCA
eigVal, eigVect = np.zeros(shape=(PC_num, 1), dtype=complex), np.zeros(shape=(eigVect_full.shape[0], PC_num), dtype=complex)
eigVal_sorted_indices = np.argsort(np.real(eigVal_full))
eigVal_PC_indices = eigVal_sorted_indices[-1:-(PC_num+1):-1] # Pick PC_num indices of largest principal eigenvalues
for i, index in enumerate(eigVal_PC_indices): # From biggest to smallest
eigVal[i,0] = eigVal_full[index] # Pick PC_num principal eigenvalues. Sorted.
eigVect[:,i] = eigVect_full[:,index] # Pick PC_num principal eigenvectors. Sorted.
# Compute weights of each sample on the picked basis (encoding).
weights = np.transpose(eigVect) @ data_matrix # Size: PC_num * SampleNum, complex.
return eigVect_full, eigVal_full, eigVect, eigVal, weights
def dataReconstruction(eigVect, weights, mean_vect, nDOF, non_zero_indices_list):
"""
Reconstruct the data with eigenvectors and weights (Decoder).
Parameters:
----------
eigVect: 2D Array.
Size: nDOF x PC_num.
Principal eigenvectors aligned along with axis-1.
weights: 2D Array (complex).
Size: PC_num x SampleNum.
Weights of each sample aligned along with axis-1.
mean_vect: 1D Array.
The mean value of each feature of training data.
nDOF: Int.
Number of all DOFs of original deformation matrix.
non_zero_indices_list: List.
All indices of non zero rows for deformation reconstruction.
Returns:
----------
data_reconstruct: 2D Array.
Size: nDOF x SampleNum.
Reconstructed deformation results.
"""
# Transform weights back to original vector space (decoding)
data_temp = eigVect @ weights
for i in range(data_temp.shape[1]):
data_temp[:,i] += mean_vect # Shifting back
data_reconstruct = np.zeros(shape=(nDOF, data_temp.shape[1]), dtype=complex)
for i, index in enumerate(non_zero_indices_list):
data_reconstruct[index,:] = data_temp[i,:]
return np.real(data_reconstruct)
def performanceEvaluation():
"""
Evaluate 3D Euclidean distance of each node pair of predicted and label deformation.
"""
pass
def greedyClustering(v_space, initial_pt_index, k, style):
"""
Generate `k` centers, starting with the `initial_pt_index`.
Parameters:
----------
v_space: 2D array.
The coordinate matrix of the initial geometry.
The column number is the vertex's index.
initial_pt_index: Int.
The index of the initial point.
k: Int.
The number of centers aiming to generate.
style: String.
Indicate "last" or "mean" to choose the style of evaluation function.
"last": Calculate the farthest point by tracking the last generated center point.
Minimum distance threshold applied.
"mean": Calculate a point with the maximum average distance to all generated centers;
Calculate a point with the minimum distance variance of all generated centers.
Minimum distance threshold applied.
Returns:
----------
center_indices_list: List of int.
Containing the indices of all k centers.
Empty if the input style indicator is wrong.
"""
if style == "last":
center_indices_list = []
center_indices_list.append(initial_pt_index)
min_dist_thrshld = 0.01 # Unit: m. The radius of FM ball.
for j in range(k):
center_coord_temp = v_space[center_indices_list[j],:]
max_dist_temp = 0.0
new_center_index_temp = 0
for i in range(v_space.shape[0]):
if i in center_indices_list: continue
coord_temp = v_space[i,:]
dist_temp = np.linalg.norm(center_coord_temp.reshape(-1,3) - coord_temp.reshape(-1,3))
dist_list = []
for index in center_indices_list:
dist_temp_eachCenter = np.linalg.norm(coord_temp.reshape(-1,3) - v_space[index,:].reshape(-1,3))
dist_list.append(dist_temp_eachCenter)
min_dist_temp = np.min(dist_list)
if dist_temp > max_dist_temp and min_dist_temp >= min_dist_thrshld:
max_dist_temp = dist_temp
new_center_index_temp = i
if new_center_index_temp not in center_indices_list:
center_indices_list.append(new_center_index_temp)
return center_indices_list
elif style == "mean":
center_indices_list = []
center_indices_list.append(initial_pt_index)
min_dist_thrshld = 0.01 # Unit: m. The radius of FM ball.
while(True):
max_dist_thrshld = 0.0
new_center_index_temp = 0
for i in range(v_space.shape[0]):
if i in center_indices_list: continue
coord_temp = v_space[i,:]
dist_list = []
for index in center_indices_list:
dist_temp = np.linalg.norm(coord_temp.reshape(-1,3) - v_space[index,:].reshape(-1,3))
dist_list.append(dist_temp)
avg_dist_temp = np.mean(dist_list)
min_dist_temp = np.min(dist_list)
if avg_dist_temp > max_dist_thrshld and min_dist_temp >= min_dist_thrshld:
max_dist_thrshld = avg_dist_temp
new_center_index_temp = i
if new_center_index_temp not in center_indices_list:
center_indices_list.append(new_center_index_temp)
if len(center_indices_list) >= k: break
var_thrshld = 1e5
new_center_index_temp = 0
for i in range(v_space.shape[0]):
if i in center_indices_list: continue
coord_temp = v_space[i,:]
dist_list = []
for index in center_indices_list:
dist_temp = np.linalg.norm(coord_temp.reshape(-1,3) - v_space[index,:].reshape(-1,3))
dist_list.append(dist_temp)
var_dist_temp = np.var(dist_list)
min_dist_temp = np.min(dist_list)
if var_dist_temp < var_thrshld and min_dist_temp >= min_dist_thrshld:
var_thrshld = var_dist_temp
new_center_index_temp = i
if new_center_index_temp not in center_indices_list:
center_indices_list.append(new_center_index_temp)
if len(center_indices_list) >= k: break
return center_indices_list
else:
print("Wrong input of the style indicator. Will start training based on the optimal FM indices. ")
return []
def generateFMIndices(FM_num, fix_node_list, total_nodes_num):
"""
Generate FM indices for benchmark deformation tracking.
Parameters:
----------
FM_num: Int.
Number of FMs.
fix_node_list: List of ints.
Indices of fixed nodes.
total_nodes_num: Int.
Total number of nodes.
Returns:
----------
FM_indices: List of int.
Random ints (indices) within the range of [0, total_nodes_num].
"""
FM_indices = []
for i in range(FM_num):
rand_temp = np.random.randint(0, total_nodes_num)
if (rand_temp not in FM_indices and
rand_temp+1 not in fix_node_list): FM_indices.append(rand_temp)
return FM_indices
def dataProcessing(data_x, data_y, batch_size, training_ratio, validation_ratio,
FM_indices, bool_norm=False):
"""
Data preprocessing.
Parameters:
----------
data_x: 2D Array (nDOF x SampleNum).
The deformation data (x SampleNum) of all DOFs.
data_y: 2D Array (PC_num x SampleNum, complex).
The label data (x SampleNum).
Here it should be the weights vectors for the force field reconstruction.
batch_size: Int.
The size of a single training batch input.
training_ratio: Float.
Indicates the portion of training dataset.
validation_ratio: Float.
Indicates the portion of validation dataset.
FM_indices: 1D Array.
Randomly picked FM indices.
Typical size: 5.
bool_norm (optional): Boolean.
True: conduct directional input normalization.
False: skip directional input normalization.
Default: False.
Returns:
----------
train_dataloader: Tensor dataloader.
Training dataset.
valid_dataloader: Tensor dataloader.
Validation dataset.
test_dataloader: Tensor dataloader.
Testing dataset.
norm_params: 1D Array.
Min and max values of data matrix.
Return empty list if bool_norm == 0.
"""
# Data normalization
if bool_norm: data_x, norm_params = normalization(data_x)
else: norm_params = []
data_x_FM = np.zeros(shape=(int(len(FM_indices)*3), data_x.shape[1]))
for i, index in enumerate(FM_indices):
data_x_FM[i*3:(i+1)*3,:] = data_x[int(index*3):int((index+1)*3),:]
data_x = copy.deepcopy(data_x_FM) # Size: FM_num*3 x SampleNum
data_y = np.real(data_y) # Discard imaginary part of the weights for the convenience of training.
# Partition the whole dataset into "train" and "test".
training_index = int(np.ceil(data_x.shape[1] * training_ratio)) # Samples along with axis-1.
validation_index = int(np.ceil(data_x.shape[1] * (training_ratio + validation_ratio))) # Samples along with axis-1.
train_x = torch.from_numpy(data_x[:,0:training_index]).float() # size: 15 x nTrain
train_y = torch.from_numpy(data_y[:,0:training_index]).float() # size: 20 x nTrain
valid_x = torch.from_numpy(data_x[:,training_index:validation_index]).float() # size: 15 x nValid
valid_y = torch.from_numpy(data_y[:,training_index:validation_index]).float() # size: 20 x nValid
test_x = torch.from_numpy(data_x[:,validation_index:]).float() # size: 15 x nTest
test_y = torch.from_numpy(data_y[:,validation_index:]).float() # size: 20 x nTest
# Generate dataloaders
# Make sure the sample dimension is on axis-0.
train_dataset = torch.utils.data.TensorDataset(np.transpose(train_x),
np.transpose(train_y))
valid_dataset = torch.utils.data.TensorDataset(np.transpose(valid_x),
np.transpose(valid_y))
test_dataset = torch.utils.data.TensorDataset(np.transpose(test_x),
np.transpose(test_y))
train_dataloader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True
)
valid_dataloader = torch.utils.data.DataLoader(
dataset=valid_dataset,
batch_size=batch_size,
shuffle=True
)
test_dataloader = torch.utils.data.DataLoader(
dataset=test_dataset
)
return train_dataloader, valid_dataloader, test_dataloader, norm_params
def trainValidateNet(train_dataloader, valid_dataloader, neural_net, learning_rate,
num_epochs, neural_net_folderPath, device):
"""
Forward MLP training and validation.
Parameters:
----------
train_dataloader: Tensor dataloader.
Training dataset.
valid_dataloader: Tensor dataloader.
Validation dataset.
neural_net: MLP model.
learning_rate: Float.
Specify a value typically less than 1.
num_epochs: Int.
Total number of training epochs.
neural_net_folderPath: String.
The directory to save the eventual trained ANN.
device: CPU/GPU.
Returns:
----------
neural_net: Trained MLP.
lossList_train: List.
The loss result of each training epoch.
lossList_valid: List.
The loss result of each validation epoch.
"""
# Define criterion and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(neural_net.parameters(), learning_rate)
# Iterative training and validation
lossList_train, lossList_valid = [], [] # List of loss summation during training and validation process.
for epoch in range(num_epochs):
loss_sum_train, loss_sum_valid = 0, 0
# Training
iteration_num_train = 0
for iteration, (displacements, weights) in enumerate(train_dataloader):
# Forward fitting
x_train_batch = torch.autograd.Variable(displacements)
y_train_batch = torch.autograd.Variable(weights)
x_train_batch = x_train_batch.to(device)
y_train_batch = y_train_batch.to(device)
output = neural_net(x_train_batch)
loss_train_temp = criterion(output, y_train_batch)
# Back propagation
optimizer.zero_grad()
loss_train_temp.backward()
optimizer.step()
loss_sum_train += loss_train_temp.cpu().data.numpy()
iteration_num_train += 1
lossList_train.append(loss_sum_train/iteration_num_train)
# Validation
iteration_num_valid = 0
for iteration, (displacements, weights) in enumerate(valid_dataloader):
x_valid_batch = torch.autograd.Variable(displacements)
y_valid_batch = torch.autograd.Variable(weights)
x_valid_batch = x_valid_batch.to(device)
y_valid_batch = y_valid_batch.to(device)
output = neural_net(x_valid_batch)
loss_valid_temp = criterion(output, y_valid_batch)
loss_sum_valid += loss_valid_temp.cpu().data.numpy()
iteration_num_valid += 1
lossList_valid.append(loss_sum_valid/iteration_num_valid)
print("Epoch: ", epoch, "| train loss: %.8f | valid loss: %.8f "
% (loss_sum_train/iteration_num_train, loss_sum_valid/iteration_num_valid))
if (epoch+1) % 100 == 0:
ANN_savePath_temp = os.path.join(neural_net_folderPath,
"ANN_" + str(int((epoch+1)/100)) + ".pkl")
torch.save(neural_net.state_dict(), ANN_savePath_temp) # Save the model every 100 epochs.
torch.save(neural_net.state_dict(), os.path.join(neural_net_folderPath, "ANN_trained.pkl")) # Save the final trained ANN model.
return neural_net, lossList_train, lossList_valid
def testNet(test_dataloader, neural_net, device):
"""
MLP testing.
Parameters:
----------
test_dataloader: Tensor dataloader.
Testing dataset.
neural_net: Pre-trained MLP.
device: CPU/GPU.
Returns:
----------
pred_y_list: List of vectors.
The results of predictions.
test_y_list: List of vectors.
The results of original labels(weights).
lossList_test: List of floats.
The prediction error (MSE) of each test sample (weights).
"""
loss = nn.MSELoss()
pred_y_List, test_y_List, lossList_test = [], [], [] # List of predicted vector, test_y and loss of each sample.
for (displacements, weights) in test_dataloader:
x_sample = torch.autograd.Variable(displacements)
x_sample = x_sample.to(device)
weights = weights.to(device)
pred_y = neural_net(x_sample)
pred_y_List.append(np.array(pred_y.cpu().data.numpy()).astype(float).reshape(-1,1))
test_y_List.append(np.array(weights.cpu().data.numpy()).astype(float).reshape(-1,1))
loss_test_temp = loss(pred_y, weights)
lossList_test.append(loss_test_temp.cpu().data.numpy())
return pred_y_List, test_y_List, lossList_test
def main():
"""
MAIN IMPLEMENTATION AND EXECUTION.
Preparations:
----------
1. Run benchmarkCreation.m in Matlab to generate the file "benchmark20mm1000samples.mat" (main data file) in the working directory;
2. Create two empty folders in the working directory and name them as "ANN_model" and "figure", respectively.
Pipeline:
----------
1. Initialize parameters;
2. Extract data from the aforementioned .mat files;
3. Implement PCA on the extracted data, and generate/obtain the fiducial marker indices;
4. Data preprocessing, and generate train/valid/test tensor dataloaders;
5. Train & Validate & Test ANN. MLP Architecture: [3*FM_num, 128, 64, PC_num];
6. Deformation reconstruction for ANN;
7. Pure PCA-based encoding & decoding, and corresponding deformation reconstruction;
8. Plot & Save the results.
Result files:
----------
1. "ANN_benchmark_results.mat".
The file containing all generated results.
Loadable in Python and Matlab;
2. "ANN_*.pkl" x 15 + "ANN_trained.pkl" x 1.
The model/parameter files of trained ANN.
Automatically saved in the folder "ANN_model" every 100 epochs;
3. "train_valid_loss.log".
The text file contains hyperparameters of ANN, loss & elapsed time of the training-validation process, and the performance of model testing;
4. Figures & Plots.
Statistic diagrams showing the performance of deformation reconstruction.
Generated after running the file "resultPlot.py". All saved in the folder "figure".
Next steps:
----------
1. Run the file "resultPlot.py" in the same working directory to generate more plots evaluating the performance of deformation reconstruction. All saved in the folder "figure";
2. Run the file "visualizeResults.m" in the same working directory in Matlab to visualize the FMs' positions and the results of deformation reconstruction;
3. Change the hidden layer architecture or any other necessary parameters and finish the model parameterization;
4. Run the file "ANN_64x32_FM_opt.py" to find the optimal initlal FM and the corresponding center point indices in a certain distributed order.
"""
# ********************************** INITIALIZE PARAMETERS ********************************** #
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 20
learning_rate = 0.001
num_epochs = 4000 # Default: 1500.
training_ratio = 0.8
validation_ratio = 0.1
FM_num = 5
PC_num = 27 # Optimal. (Default: 27. From Dan 09/02).
isNormOn = False # True/Flase: Normalization on/off.
ANN_folder_path = "ANN_model" # The directory of trained ANN models.
figure_folder_path = "figure" # The directory of figure folder.
FEM_folder_path = "FEM_pipeline" # The directory of FEM pipeline folder, as well as the directory of the deformation benchmark file.
isKCenter = True # True/Flase: Y/N for implementing optimized k-center.
if not os.path.isdir(ANN_folder_path): os.mkdir(ANN_folder_path)
if not os.path.isdir(figure_folder_path): os.mkdir(figure_folder_path)
# ********************************** DATA PROCESSING ********************************** #
# Extract data from .mat file
data_mat = scipy.io.loadmat(os.path.join(FEM_folder_path, "benchmark20mm1000samples.mat"))
v_space, data_x, fix_dof_list = data_mat["NodeI"], data_mat["xI"], data_mat["idxFix3"] # change the variable's name if necessary.
fix_node_list = [int(ind/3) for ind in fix_dof_list if ind % 3 == 0] # Indices of fixed nodes. Indexed from 1.
# Implement PCA
orig_node_num = int(data_x.shape[0] / 3.0)
data_x, nDOF, non_zero_indices_list = matrixShrink(data_x) # Remove zero rows of data_x.
data_x, mean_vect = zeroMean(data_x, training_ratio) # Shift(zero) the data to its mean
eigVect_full, eigVal_full, eigVect, eigVal, data_y = PCA(data_x, PC_num, training_ratio) # PCA on training deformation matrix.
# Generate FM indices (Founded best initial indices: 96, 217, 496, 523, 564, 584, 1063)
v_space, _, _ = matrixShrink(v_space, fix_node_list)
if isKCenter:
initial_pt_index = 96 # Initial point index for k-center clustering. Randomly assigned. Current best result: 584 (best mean_max_nodal_error: 0.92 mm)
k = 20 # The number of wanted centers (must be larger than the FM_num). Default: 20.
style = "mean" # Style of k-center clustering. "mean" or "last".
center_indices_list = greedyClustering(v_space, initial_pt_index, k, style)
if center_indices_list != []: FM_indices = center_indices_list[0:FM_num]
else:
FM_indices = [4, 96, 431, 752, 1144] # Optimal FM indices. Back-up choice when the returned list is empty.
center_indices_list = FM_indices
else:
FM_indices = generateFMIndices(FM_num, fix_node_list, orig_node_num) # Randomly obtain FM indices.
center_indices_list = FM_indices
# Generate train/valid/test tensor dataloaders.
(train_dataloader, valid_dataloader,
test_dataloader, norm_params) = dataProcessing(data_x, data_y,
batch_size, training_ratio,
validation_ratio, FM_indices,
bool_norm=isNormOn)
# ********************************** TRAIN & VALID & TEST ********************************** #
# Generate MLP model
neural_net = Net1(FM_num, PC_num).to(device)
# Forward training & validation
start_time = time.time()
neural_net, lossList_train, lossList_valid = trainValidateNet(train_dataloader, valid_dataloader,
neural_net, learning_rate, num_epochs,
ANN_folder_path, device)
end_time = time.time()
elapsed_time = end_time - start_time # Elapsed time for training.
# Test pre-trained MLP & Plot confidence interval of ANN accuracy
pred_y_List, test_y_List, lossList_test = testNet(test_dataloader, neural_net, device)
lossList_test = np.array(lossList_test).astype(float).reshape(-1,1)
confidence_interval_accuracy = st.norm.interval(0.95, loc=np.mean(1-lossList_test),
scale=st.sem(1-lossList_test))
print("Confidence interval of test accuracy is {}".format(np.array(confidence_interval_accuracy).astype(float).reshape(1,-1)))
# ********************************** PERFORMANCE EVALUATION ********************************** #
# Deformation reconstruction
data_matrix = data_mat["xI"]
test_data = data_matrix[:,int(np.ceil(data_matrix.shape[1] * (training_ratio + validation_ratio))):] # Calling out testing deformation data
dist_nodal_matrix = np.zeros(shape=(int(test_data.shape[0]/3), len(pred_y_List)))
test_reconstruct_list, mean_error_list, max_error_list = [], [], []
for i in range(len(pred_y_List)):
data_reconstruct = dataReconstruction(eigVect, pred_y_List[i], mean_vect,
nDOF, non_zero_indices_list) # Concatenated vector xyzxyz...; A transfer from training dataset (upon which the eigen-space is established) to testing dataset.
dist_vector_temp = (data_reconstruct.reshape(-1,3) -
test_data[:,i].reshape(-1,1).reshape(-1,3)) # Convert into node-wise matrix.
node_pair_distance = []
for j in range(dist_vector_temp.shape[0]): # Number of nodes
node_pair_distance.append(np.linalg.norm(dist_vector_temp[j,:]))
mean_error_temp = np.sum(np.array(node_pair_distance).astype(float).reshape(-1,1)) / len(node_pair_distance)
max_error_temp = np.max(node_pair_distance)
dist_nodal_matrix[:,i] = np.array(node_pair_distance).astype(float).reshape(1,-1)
test_reconstruct_list.append(data_reconstruct)
mean_error_list.append(mean_error_temp)
max_error_list.append(max_error_temp)
# Pure PCA for test samples
test_data_shrinked, _, _ = matrixShrink(test_data)
weights_test = np.transpose(eigVect) @ test_data_shrinked
test_PCA_reconstruct = dataReconstruction(eigVect, weights_test, mean_vect,
nDOF, non_zero_indices_list)
dist_nodal_matrix_testPCA = np.zeros(shape=(int(test_data.shape[0]/3), len(pred_y_List)))
mean_error_list_testPCA, max_error_list_testPCA = [], []
for i in range(test_PCA_reconstruct.shape[1]):
dist_vector_temp = (test_PCA_reconstruct[:,i].reshape(-1,3) -
test_data[:,i].reshape(-1,1).reshape(-1,3))
node_pair_distance = []
for j in range(dist_vector_temp.shape[0]): # Number of nodes
node_pair_distance.append(np.linalg.norm(dist_vector_temp[j,:]))
mean_error_temp = np.sum(np.array(node_pair_distance).astype(float).reshape(-1,1)) / len(node_pair_distance)
max_error_temp = np.max(node_pair_distance)
dist_nodal_matrix_testPCA[:,i] = np.array(node_pair_distance).astype(float).reshape(1,-1)
mean_error_list_testPCA.append(mean_error_temp)
max_error_list_testPCA.append(max_error_temp)
max_nodal_error = 1e3*np.array(max_error_list).astype(float).reshape(-1,1) # Unit: mm.
mean_nodal_error = 1e3*np.array(mean_error_list).astype(float).reshape(-1,1) # Unit: mm.
max_mean = np.mean(max_nodal_error) # Compute the mean value of max errors.
mean_mean = np.mean(mean_nodal_error) # Compute the mean value of mean errors.
# ********************************** PLOT & SAVE RESULTS ********************************** #
# Plot training loss w.r.t. iteration.
plt.figure(figsize=(20.0,12.8))
plt.rcParams.update({"font.size": 35})
plt.tick_params(labelsize=35)
line1, = plt.plot(range(100, num_epochs, 1),np.log(lossList_train)[100:],label="Train Loss (log)")
line2, = plt.plot(range(100, num_epochs, 1),np.log(lossList_valid)[100:],label="Validation Loss (log)")
plt.xlabel("Epoch", fontsize=40)
plt.ylabel("log(Loss)", fontsize=40)
plt.legend([line1,line2], ["Train Loss (log)","Validation Loss (log)"], prop={"size": 40})
plt.title("Train & Valid Loss v/s Epoch")
plt.savefig(figure_folder_path + "/Train_Valid_Loss.png")
# Save training process & test info into .log file.
saveLog(lossList_train, lossList_valid, FM_num, PC_num, batch_size,
learning_rate, num_epochs, center_indices_list, elapsed_time, max_mean, mean_mean)
# Save results to .mat files.
for i, vector in enumerate(test_reconstruct_list):
if i == 0: test_reconstruct_matrix = vector
else: test_reconstruct_matrix = np.concatenate((test_reconstruct_matrix, vector), axis=1)
mdict = {"FM_num": FM_num, "PC_num": PC_num, # Numbers of FMs and principal components.
"test_deformation_label": test_data, # Label deformation results.
"test_deformation_reconstruct": test_reconstruct_matrix, # ANN reconstruction deformation results.
"test_PCA_reconstruct": test_PCA_reconstruct, # Reconstruction of pure PCA decomposition.
"fix_node_list": fix_node_list, # List of fixed node indices. Indexed from 1.
"FM_indices": np.array(FM_indices).astype(int).reshape(-1,1) + 1, # FMs" indices. Add 1 to change to indexing system in Matlab.
"center_indices": np.array(center_indices_list).astype(int).reshape(-1,1) + 1, # Center indices generated from the k-center clustering. Add 1 to change to indexing system in Matlab.
"dist_nodal_matrix": 1e3*dist_nodal_matrix, # Distance between each nodal pair. Unit: mm
"mean_nodal_error": mean_nodal_error, # Mean nodal distance of each sample. Unit: mm
"max_nodal_error": max_nodal_error, # Max nodal distance of each sample. Unit: mm
"eigVect_full": eigVect_full, "eigVal_full": eigVal_full, # Full eigenvector and eigenvalue matrices
"eigVect": eigVect, "eigVal": eigVal, # Principal eigenvector and eigenvalue matrices
"mean_vect": mean_vect, # The mean vector of training dataset for data reconstruction
"dist_nodal_matrix_testPCA": 1e3*dist_nodal_matrix_testPCA, # Distance between each nodal pair (pure PCA reconstruction). Unit: mm
"mean_nodal_error_testPCA": 1e3*np.array(mean_error_list_testPCA).astype(float).reshape(-1,1), # Mean nodal distance of each sample (pure PCA reconstruction). Unit: mm
"max_nodal_error_testPCA": 1e3*np.array(max_error_list_testPCA).astype(float).reshape(-1,1) # Max nodal distance of each sample (pure PCA reconstruction). Unit: mm
}
scipy.io.savemat("ANN_benchmark_results.mat", mdict) # Run visualization on Matlab.
if __name__ == "__main__":
# Run the main function in terminal: python ANN_64x32.py
main()
|
the-stack_106_14124
|
#!/usr/bin/env python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup configuration."""
import platform
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup # pylint: disable=g-import-not-at-top
# Configure the required packages and scripts to install, depending on
# Python version and OS.
REQUIRED_PACKAGES = [
'google-apputils',
'python-gflags',
'google-api-python-client==1.2',
'oauth2client==1.2',
'httplib2',
]
CONSOLE_SCRIPTS = [
'bq = bq:run_main',
]
if platform.system() == 'Windows':
REQUIRED_PACKAGES.append('pyreadline')
py_version = platform.python_version()
if py_version < '2.6.5' or py_version >= '3':
raise ValueError('BigQuery requires Python >= 2.6.5.')
_BQ_VERSION = '2.0.18'
setup(name='bigquery',
version=_BQ_VERSION,
description='BigQuery command-line tool',
url='http://code.google.com/p/google-bigquery-tools/',
author='Google Inc.',
author_email='[email protected]',
# Contained modules and scripts.
py_modules=[
'bq',
'bigquery_client',
'table_formatter',
],
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
install_requires=REQUIRED_PACKAGES,
provides=[
'bigquery (%s)' % (_BQ_VERSION,),
],
# Information for packaging of the discovery document.
include_package_data=True,
packages=['discovery'],
package_data={
'discovery': ['*'],
},
# PyPI package information.
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='google bigquery library',
)
|
the-stack_106_14125
|
import numpy as np
import random
import matplotlib.pyplot as plt
import matplotlib
import os
import pandas as pd
COLORS = [
'tab:blue',
'tab:orange',
'tab:green',
'tab:red',
'tab:purple',
'tab:brown',
'tab:pink',
'tab:gray',
'tab:olive',
'tab:cyan',
]
def show_clusters_centroids(clusters,centroids,title,x_var_indx=0,y_var_indx=1,x_var_name='Variable 1',y_var_name="Variable 2",keep=False):
"""
Show the current clustering for 1 second and save the plot
Input:
clusters (list of lists of lists): A List of Clusters. Each cluster
is also a list of points in the cluster. SEE: k_means.get_clusters()
centroids (list of lists): A list with the current centroids
title (string): The title for the plot.
"""
for i, cluster in enumerate(clusters):
cluster = np.array(cluster)
plt.scatter(
cluster[:,x_var_indx],
cluster[:,y_var_indx],
c = COLORS[i],
label="Cluster {}".format(i)
)
for i, centroid in enumerate(centroids):
plt.scatter(
centroid[x_var_indx],
centroid[y_var_indx],
c = COLORS[i],
marker='x',
s=100
)
plt.title(title)
plt.xlabel(x_var_name)
plt.ylabel(y_var_name)
plt.legend()
if not keep:
plt.show(block=False)
plt.pause(1)
plt.close()
else:
plt.show()
def load_data(filename):
"""
Reads a csv file and returns a list of lists
"""
with open(filename,'r') as fp:
data = fp.read().split('\n')
data_new = [f.split(',') for f in data if f != ""]
data_formatted = []
for instance in data_new:
instance_new = []
for value in instance:
try:
instance_new.append(float(value))
except ValueError:
instance_new.append(value)
data_formatted.append(instance_new)
return data_formatted
def distance(a,b):
"""
Compute Euclidean Distance Between Two Points
Input:
a (list): an n-dimensional list or array
b (list): an n-dimensional list or array
Output:
The Euclidean Distance between vectors a and b
"""
return np.sqrt(np.sum((np.array(b)-np.array(a))**2))
def get_clusters(points,centroids):
"""
Returns a list of clusters given all the points in the dataset and
the current centroids.
Input:
points (list of lists): A list with every point in the dataset
centroids (list of lists): A list with the current centroids
Output:
clusters (list of lists of lists): A List of Clusters. Each cluster
is also a list of points in the cluster.
"""
clusters = [[] for f in centroids]
for i, point in enumerate(points):
point_to_centroids = []
for j, centroid in enumerate(centroids):
point_to_centroids.append(distance(point,centroid))
closest_idx = np.argmin(point_to_centroids)
clusters[closest_idx].append(point)
return clusters
def update_centroids(clusters):
"""
Given a list of clusters (as prepared by get_clusters) get the new centroids
Input:
clusters (list of lists of lists): A List of Clusters. Each cluster
is also a list of points in the cluster.
Output:
A (list of lists): The new centroids.
"""
new_centroids = []
for cluster in clusters:
new_centroids.append(np.mean(cluster,axis = 0))
return new_centroids
def k_means(points, k, iterations=10):
"""
K Means Unsupervised ML Algorithm Implementation with Forgy Initialization
Input:
points (numpy array): a 2D Array with the data to cluster.
k (int): The number of clusters to find
"""
idx = np.random.randint(len(points),size=k)
centroids = points[idx,:]
clusters = get_clusters(points,centroids)
for i in range(iterations):
if i % 1 == 0:
if i == 0:
title = "Initialization"
else:
title = "Iteration {}".format(i+1)
show_clusters_centroids(
clusters,
centroids,
title,
)
clusters = get_clusters(points,centroids)
centroids = update_centroids(clusters)
return clusters,centroids
if __name__ == "__main__":
data = load_data('./data/datasets_14701_19663_CC GENERAL.csv')
k = 3
X = np.array([f[:-1] for f in data])
y = np.array([f[-1] for f in data])
clusters,centroids = k_means(X,3)
show_clusters_centroids(clusters,centroids,"Result", keep=True)
plt.show()
|
the-stack_106_14126
|
# -*- coding: utf-8 -*-
"""MRIO utility functions
"""
import os
import geopandas as gpd
import pandas as pd
def load_table(data_path):
"""Load national Input-Output table as pandas dataframe.
Parameters
- file_path - String name of data path
Outputs
- pandas Dataframe with Input-Output table that is going to be used
"""
vnm_IO_path = os.path.join(data_path, "INPUT-OUTPUT TABLE 2012",
"IO Table 2012 English.xlsx")
return pd.read_excel(vnm_IO_path, sheet_name='IO_clean', index_col=0)
def load_sectors(data_path):
"""Load national Input-Output table and extracted all sectors
Parameters
- data_path - String name of data path
Outputs
- pandas Dataframe with all sectors in national Input-Output table
"""
vnm_IO_path = os.path.join(data_path, "INPUT-OUTPUT TABLE 2012",
"IO Table 2012 English.xlsx")
vnmIO_rowcol = pd.read_excel(vnm_IO_path, sheet_name='SectorName')
return vnmIO_rowcol
def get_final_sector_classification():
"""Return the list of sectors to be used in the new multiregional Input-Output table.
Outputs:
- list of sectors
"""
return ['secA', 'secB', 'secC', 'secD', 'secE', 'secF', 'secG', 'secH', 'secI']
def map_sectors(vnm_IO_rowcol):
"""Map the sectors of the loaded national Input-Output table to the sectors which are going to used in the multiregional Input-Output table.
Parameters
- vnm_IO_rowcol - pandas dataframe with all sectors in the national Input-Output table.
Outputs
- dictionary to map row sectors
- dictionary to map column sectors
"""
row_only = vnm_IO_rowcol[vnm_IO_rowcol['mapped'].str.contains(
"row") | vnm_IO_rowcol['mapped'].str.contains("sec")]
col_only = vnm_IO_rowcol[vnm_IO_rowcol['mapped'].str.contains(
"col") | vnm_IO_rowcol['mapped'].str.contains("sec")]
return dict(zip(row_only.code, row_only.mapped)), dict(zip(col_only.code, col_only.mapped))
def aggregate_table(vnm_IO, vnm_IO_rowcol, in_million=True):
"""Aggregate national Input-Output table to the amount of sectors used in the multiregional Input-Output table.
Parameters
- vnm_IO - pandas dataframe of national Input-Output table
- vnm_IO_rowcol - pandas dataframe with all sectors in the national Input-Output table
- in_million - Specify whether we want to divide the table by 1000000, to have values in millions. The default value is set to **True**
Outputs
- pandas Dataframe with aggregated national Input-Output table
"""
sectors = get_final_sector_classification()
# aggregate table
mapper_row, mapper_col = map_sectors(vnm_IO_rowcol)
vnm_IO.index = vnm_IO.index.map(mapper_row.get)
vnm_IO.columns = vnm_IO.columns.to_series().map(mapper_col)
aggregated = vnm_IO.groupby(vnm_IO.index, axis=0).sum().groupby(
vnm_IO.columns, axis=1).sum()
aggregated = aggregated.reindex(sectors+['col1', 'col2', 'col3'], axis='columns')
aggregated = aggregated.reindex(sectors+['row1', 'row2', 'row3'], axis='index')
if in_million == True:
return aggregated/1000000
else:
return aggregated
def is_balanced(io_table):
"""Function to check if Input-Output table is balanced.
Parameters
- io_table - Input-Output table.
Outputs
- return print statement if table is balanced.
"""
row = io_table.sum(axis=0)
col = io_table.sum(axis=1)
if ((row-col).sum() < 1):
print('Table is balanced')
def load_provincial_stats(data_path):
"""Load shapefile with provincial-level data.
Parameters
- data_path - String name of data path
Outputs
- geopandas GeoDataFrame with provincial data.
"""
prov_path = os.path.join(data_path, 'Vietnam_boundaries',
'boundaries_stats', 'province_level_stats.shp')
return gpd.read_file(prov_path)
def estimate_gva(regions, in_million=True):
"""Functions to estimate the Gross Value Added for each sector in each province.
Parameters
- regions - pandas DataFrame with provincial/regional data
Outputs
- list with GVA values per sector in each province
"""
if in_million == True:
return list(((regions.pro_nfirm*regions.laborcost)+(regions.pro_nfirm*regions.capital))/1000000)
else:
return list(((regions.pro_nfirm*regions.laborcost)+(regions.pro_nfirm*regions.capital)))
def create_proxies(data_path, notrade=False, own_production_ratio=0.9, min_rice=True):
"""Create all proxies required in the disaggregation process.
Parameters
- data_path - String name of data path
- notrade - Boolean to specify whether we should include trade in the disaggregation. This should be set to **True** in the first step of the disaggregation. The default is set to **False**
- min_rice - Boolean to determine whether you want to use the minimal rice value or the maximum rice value from the flow analysis. The default is set to **True**
- own_production_ratio - Specify how much supply and demand is locally supplied and used, and how much is imported/exported. The default is set to **0.8**
Outputs
- all proxy level .csv files.
"""
provinces = load_provincial_stats(data_path)
provinces.name_eng = provinces.name_eng.apply(
lambda x: x.replace(' ', '_').replace('-', '_'))
od_table = load_od(data_path, min_rice=min_rice)
create_indices(data_path, provinces, write_to_csv=True)
create_regional_proxy(data_path, provinces, write_to_csv=True)
create_sector_proxies(data_path, provinces, write_to_csv=True)
create_zero_proxies(data_path, od_table, notrade=notrade, write_to_csv=True)
if notrade == False:
create_level14_proxies(data_path, od_table, own_production_ratio, write_to_csv=True)
def create_regional_proxy(data_path, regions, write_to_csv=True):
"""Function to create the proxy to disaggregate the national table to the different regions.
Parameters
- data_path - String name of data path
- regions - pandas DataFrame with provincial/regional data
- write_to_csv - Boolean to specify whether you want to save output to .csv files. The default value is set to **True**
Outputs
- set of .csv files with regional proxy data
"""
# regions['pro_nfirm']*regions['laborcost'] + regions['pro_nfirm']*regions['capital']
regions['raw_gva'] = estimate_gva(regions)
subset = regions.loc[:, ['name_eng', 'raw_gva']]
subset['year'] = 2010
subset['raw_gva'] = subset.raw_gva.apply(int)/(subset['raw_gva'].sum(axis='index'))
subset = subset[['year', 'name_eng', 'raw_gva']]
subset.columns = ['year', 'id', 'gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path, 'IO_analysis', 'MRIO_TABLE', 'proxy_reg_vnm.csv')
subset.to_csv(csv_path, index=False)
def create_indices(data_path, provinces, write_to_csv=True):
"""Create list of indices required to disaggregate the national table to the different regions.
Parameters
- data_path - String name of data path
- provinces - pandas DataFrame with provincial/regional data
- write_to_csv - Boolean to specify whether you want to save output to .csv files. The default value is set to **True**
Outputs
- set of .csv files with indices proxy data
"""
# prepare index and cols
region_names = list(provinces.name_eng)
rowcol_names = list(load_sectors(data_path)['mapped'].unique())
rows = [x for x in rowcol_names if (x.startswith(
'sec') | x.startswith('row'))]*len(region_names)
region_names_list = [item for sublist in [[x]*12 for x in region_names]
for item in sublist]
indices = pd.DataFrame([region_names_list, rows]).T
indices.columns = ['region', 'sector']
indices['sector'] = indices['sector'].apply(lambda x: x.replace('row', 'other'))
if write_to_csv == True:
csv_path = os.path.join(data_path, 'IO_analysis', 'MRIO_TABLE', 'indices_mrio.csv')
indices.to_csv(csv_path, index=False)
def create_sector_proxies(data_path, regions, write_to_csv=True):
"""Create sector proxies required to disaggregate the national table to the different sectors in each region.
Parameters
- data_path - String name of data path
- regions - pandas DataFrame with provincial/regional data
- write_to_csv - Boolean to specify whether you want to save output to .csv files. The default value is set to **True**
Outputs
- set of .csv files with sector proxy data
"""
# list of sectors
sector_list = get_final_sector_classification()
# get own sector classification for region file
map_dict = map_sect_vnm_to_eng()
regions = regions.rename(columns=map_dict)
# get sectoral gva based on proportion of firms in the region
sector_shares = regions[sector_list].multiply(regions['raw_gva'], axis='index')
sector_shares.index = regions.name_eng
for sector in sector_list+['other1', 'other2', 'other3']:
if sector in ['other1', 'other2', 'other3']:
subset = pd.DataFrame(sector_shares.sum(axis='columns')).divide(
pd.DataFrame(sector_shares.sum(axis='columns')).sum(axis='index'))
subset.columns = [sector]
else:
subset = pd.DataFrame(sector_shares.loc[:, sector]).divide(
pd.DataFrame(sector_shares.loc[:, sector]).sum(axis='index'))
subset.reset_index(inplace=True, drop=False)
subset['year'] = 2010
subset['sector'] = sector+str(1)
subset[sector] = subset[sector].apply(lambda x: round(x, 7))
subset = subset[['year', 'sector', 'name_eng', sector]]
subset.columns = ['year', 'sector', 'region', 'gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path, 'IO_analysis',
'MRIO_TABLE', 'proxy_{}.csv'.format(sector))
subset.to_csv(csv_path, index=False)
def get_trade_value(x, sum_use, sector, own_production_ratio=0.8):
"""Function to get the trade value between a certain origin and destination.
Parameters
- x - row in Origin-Destination dataframe
- sum_use - total use in a certain destination
- own_production_ratio - Specify how much supply and demand is locally supplied and used, and how much is imported/exported. The default is set to **0.8**
Outputs
- returns trade value
"""
if x.Destination == x.Origin:
try:
return list(sum_use.loc[(sum_use['region'] == x.Destination) & (sum_use['sector'] == sector)]['value'])[0]*own_production_ratio
except:
return 1
elif x.gdp == 0:
return 0
else:
try:
return list(sum_use.loc[(sum_use['region'] == x.Destination) & (sum_use['sector'] == sector)]['value'])[0]*(1-own_production_ratio)*x.ratio
except:
return 0
def create_level14_proxies(data_path, od_table, own_production_ratio=0.8, write_to_csv=True):
"""Function to create the level14 proxies, required to disaggregate the national table.
Parameters
- data_path - String name of data path
- od_table - pandas DataFrame with the Origin-Destination matrix
- own_production_ratio - Specify how much supply and demand is locally supplied and used, and how much is imported/exported. The default is set to **0.8**
- write_to_csv - Boolean to specify whether you want to save output to .csv files. The default value is set to **True**
Outputs
- set of .csv files with level 14 proxy data
"""
# get sector list
sector_list_ini = get_final_sector_classification()+['other1', 'other2', 'other3']
sector_list = [x+str(1) for x in sector_list_ini]
od_table.loc[od_table['Destination'] == od_table['Origin'], 'gdp'] = 10
od_sum = pd.DataFrame(od_table.groupby(['Destination', 'Origin']).sum().sum(axis=1))
od_sum['ratio'] = od_sum.groupby(level=0).apply(lambda x:
x / float(x.sum()))
od_sum.reset_index(inplace=True)
od_sum.columns = ['Destination', 'Origin', 'gdp', 'ratio']
df_pretable = pd.read_csv(os.path.join(
data_path, 'IO_analysis', 'MRIO_TABLE', 'notrade_trade.csv'), index_col=[0, 1], header=[0, 1])
df_pretable = df_pretable.iloc[:, :567]
sum_use = df_pretable.sum(axis=1)
sum_use = pd.DataFrame(sum_use*0.1)
sum_use.reset_index(inplace=True)
sum_use.columns = ['region', 'sector', 'value']
combine = []
for sector in sector_list:
if sector[:-1] in ['other1', 'other2', 'other3']:
subset = od_sum.copy()
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = 0
subset.drop('ratio', axis=1, inplace=True)
combine.append(subset)
else:
subset = od_sum.copy()
subset = subset.loc[od_sum.gdp != 0]
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = subset.apply(lambda x: get_trade_value(
x, sum_use, sector[:-1], own_production_ratio), axis=1) # subset['gdp'].apply(lambda x: round(x, 2))
subset.drop('ratio', axis=1, inplace=True)
combine.append(subset)
all_ = pd.concat(combine)
final_sub = all_[['year', 'sector', 'Origin', 'Destination', 'gdp']]
final_sub.columns = ['year', 'sector', 'region', 'region', 'gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path, 'IO_analysis', 'MRIO_TABLE',
'proxy_trade14_{}.csv'.format(sector[:-1]))
final_sub.to_csv(csv_path, index=False)
def create_zero_proxies(data_path, od_table, notrade=False, write_to_csv=True):
"""Function to create the trade proxies, required to disaggregate the national table.
Parameters
- data_path - String name of data path
- od_table - pandas DataFrame with the Origin-Destination matrix
- notrade - Boolean to specify whether we should include trade in the disaggregation. This should be set to **True** in the first step of the disaggregation. The default is set to **False**
- write_to_csv - Boolean to specify whether you want to save output to .csv files. The default value is set to **True**
Outputs
- set of .csv files with level 14 proxy data
"""
# get sector list
sector_list = get_final_sector_classification()+['other1', 'other2', 'other3']
sector_list = [x+str(1) for x in sector_list]
# map sectors to be the same
mapper = map_regions()
od_table['Destination'] = od_table['Destination'].apply(lambda x: mapper[x])
od_table['Origin'] = od_table['Origin'].apply(lambda x: mapper[x])
od_table = od_table.loc[od_table['Destination'] != od_table['Origin']]
od_sum = pd.DataFrame(od_table.groupby(['Destination', 'Origin']).sum().sum(axis=1))
od_sum.reset_index(inplace=True)
od_sum.columns = ['Destination', 'Origin', 'gdp']
if notrade == True:
od_sum['gdp'] = 0
for sector in sector_list:
if sector[:-1] in ['other1', 'other2', 'other3']:
subset = od_sum.copy()
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = 0
combine = []
for sector2 in sector_list:
sub_subset = subset.copy()
sub_subset['subsector'] = sector2
combine.append(sub_subset)
else:
subset = od_sum.copy()
if notrade == False:
subset = subset.loc[od_sum.gdp == 0]
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = 0
combine = []
for sector2 in sector_list:
sub_subset = subset.copy()
sub_subset['subsector'] = sector2
combine.append(sub_subset)
all_ = pd.concat(combine)
final_sub = all_[['year', 'sector', 'Origin', 'subsector', 'Destination', 'gdp']]
final_sub.columns = ['year', 'sector', 'region', 'sector', 'region', 'gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path, 'IO_analysis', 'MRIO_TABLE',
'proxy_trade_{}.csv'.format(sector[:-1]))
final_sub.to_csv(csv_path, index=False)
def load_output(data_path, provinces, notrade=True):
"""Read output from disaggregation process and translate to usable pandas DataFrame
Parameters
- data_path - String name of data path
- provinces - pandas DataFrame with provincial/regional data
- notrade - Boolean to specify whether we should include trade in the disaggregation. This should be set to **True** in the first step of the disaggregation. The default is set to **False**
Outputs
- pandas DataFrame with disaggregated Input-Output table
"""
# prepare index and cols
region_names = list(provinces.name_eng)
rowcol_names = list(load_sectors(data_path)['mapped'].unique())
rows = [x for x in rowcol_names if (x.startswith(
'sec') | x.startswith('row'))]*len(region_names)
cols = [x for x in rowcol_names if (x.startswith(
'sec') | x.startswith('col'))]*len(region_names)
region_names_list = [item for sublist in [[x]*12 for x in region_names]
for item in sublist]
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
# read output
if notrade == True:
output_path = os.path.join(data_path, 'IO_analysis',
'MRIO_TABLE', 'output_notrade.csv')
else:
output_path = os.path.join(data_path, 'IO_analysis', 'MRIO_TABLE', 'output.csv')
output_df = pd.read_csv(output_path, header=None)
output_df.index = index_mi
output_df.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in rowcol_names if x.startswith('sec')]*len(region_names)
col_only = [x for x in rowcol_names if x.startswith('col')]*len(region_names)
region_col = [item for sublist in [[x]*9 for x in region_names] for item in sublist] + \
[item for sublist in [[x]*3 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col'))
# sum va and imports
tax_sub = output_df.loc[output_df.index.get_level_values(1) == 'row1'].sum(axis='index')
import_ = output_df.loc[output_df.index.get_level_values(1) == 'row2'].sum(axis='index')
valueA = output_df.loc[output_df.index.get_level_values(1) == 'row3'].sum(axis='index')
output_new = pd.concat([output_df.loc[~output_df.index.get_level_values(1).isin(['row1', 'row2', 'row3'])], pd.DataFrame(tax_sub).T,
pd.DataFrame(import_).T, pd.DataFrame(valueA).T])
# output_new = output_new.reindex(index_mi_reorder, axis='index')
output_new = output_new.reindex(column_mi_reorder, axis='columns')
# write to new csv
output_path_new = os.path.join(data_path, 'IO_analysis',
'MRIO_TABLE', 'output_reordered.csv')
output_new.to_csv(output_path_new)
return output_new
def map_sect_vnm_to_eng():
"""Convert vietnamese sector names to simple sector classification.
Outputs
- dictionary to map vietnamese sectors to simple sector names.
"""
map_dict = {'nongnghiep': 'secA',
'khaikhoang': 'secB',
'chebien': 'secC',
'detmay': 'secD',
'gogiay': 'secE',
'sanxuat': 'secF',
'xaydung': 'secG',
'thuongmai': 'secH',
'dichvu': 'secI'}
return map_dict
def load_od(data_path, min_rice=True):
"""Load national Origin-Destination matrix as pandas DataFrame.
Parameters
- data_path - String name of data path
- min_rice - Boolean to determine whether you want to use the minimal rice value or the maximum rice value from the flow analysis. The default is set to **True**
Outputs
- pandas DataFrame with national Origin-Destination matrix
"""
od_path = os.path.join(data_path, 'OD_data', 'national_scale_od_matrix.xlsx')
od_table = pd.read_excel(od_path, sheet_name='total')
if min_rice == True:
od_table.drop(['max_rice', 'min_tons', 'max_tons'], inplace=True, axis=1)
else:
od_table.drop(['min_rice', 'min_tons', 'max_tons'], inplace=True, axis=1)
od_table = od_table.dropna(subset=['o_region', 'd_region'], axis='index')
od_table['o_region'] = od_table['d_region'].apply(
lambda x: x.replace(' ', '_').replace('-', '_'))
od_table['d_region'] = od_table['d_region'].apply(
lambda x: x.replace(' ', '_').replace('-', '_'))
od_table = od_table.rename(columns={'o_region': 'Origin', 'd_region': 'Destination'})
return od_table
def map_sectors_to_od(od_table):
"""Create dictionary to map products from national Origin-Destination matrix to sector classification for the Input-Output table.
Parameters
- od_table - pandas DataFrame with the Origin-Destination matrix
Outputs
- dictionary to map goods to sectors.
"""
goods = [x for x in od_table.columns if not (
x.startswith('Origin') | x.startswith('Destination'))]
sectors_conn = ['secA', 'secC', 'secE', 'secF', 'secG', 'secF',
'secF', 'secB', 'secF', 'secF', 'secF', 'secA', 'secC']
return dict(zip(goods, sectors_conn))
def map_regions():
"""Create dictionary to map regions to consistent format.
Outputs
- dictionary to map regions to consistent format
"""
return {
'An_Giang': 'An_Giang',
'Ba_Ria_Vung_Tau': 'Ba_Ria_Vung_Tau',
'Bac_Giang': 'Bac_Giang',
'Bac_Kan': 'Bac_Kan',
'Bac_Lieu': 'Bac_Lieu',
'Bac_Ninh': 'Bac_Ninh',
'Ben_Tre': 'Ben_Tre',
'Binh_Dinh': 'Binh_Dinh',
'Binh_Duong': 'Binh_Duong',
'Binh_Phuoc': 'Binh_Phuoc',
'Binh_Thuan': 'Binh_Thuan',
'Ca_Mau': 'Ca_Mau',
'Can_Tho': 'Can_Tho',
'Cao_Bang': 'Cao_Bang',
'Da_Nang': 'Da_Nang',
'Dak_Lak': 'Dak_Lak',
'Dak_Nong': 'Dak_Nong',
'Dien_Bien': 'Dien_Bien',
'Dong_Nai': 'Dong_Nai',
'Dong_Thap': 'Dong_Thap',
'Gia_Lai': 'Gia_Lai',
'Ha_Giang': 'Ha_Giang',
'Ha_Nam': 'Ha_Nam',
'Ha_Tay': 'Ha_Noi',
'Ha_Noi': 'Ha_Noi',
'Ha_Tinh': 'Ha_Tinh',
'Hai_Duong': 'Hai_Duong',
'Hai_Phong': 'Hai_Phong',
'Hau_Giang': 'Hau_Giang',
'Ho_Chi_Minh': 'Ho_Chi_Minh',
'Hoa_Binh': 'Hoa_Binh',
'Hung_Yen': 'Hung_Yen',
'Khanh_Hoa': 'Khanh_Hoa',
'Kien_Giang': 'Kien_Giang',
'Kon_Tum': 'Kon_Tum',
'Lai_Chau': 'Lai_Chau',
'Lam_Dong': 'Lam_Dong',
'Lang_Son': 'Lang_Son',
'Lao_Cai': 'Lao_Cai',
'Long_An': 'Long_An',
'Nam_Dinh': 'Nam_Dinh',
'Nghe_An': 'Nghe_An',
'Ninh_Binh': 'Ninh_Binh',
'Ninh_Thuan': 'Ninh_Thuan',
'Phu_Tho': 'Phu_Tho',
'Phu_Yen': 'Phu_Yen',
'Quang_Binh': 'Quang_Binh',
'Quang_Nam': 'Quang_Nam',
'Quang_Ngai': 'Quang_Ngai',
'Quang_Ninh': 'Quang_Ninh',
'Quang_Tri': 'Quang_Tri',
'Soc_Trang': 'Soc_Trang',
'Son_La': 'Son_La',
'Tay_Ninh': 'Tay_Ninh',
'Thai_Binh': 'Thai_Binh',
'Thai_Nguyen': 'Thai_Nguyen',
'Thanh_Hoa': 'Thanh_Hoa',
'Thua_Thien_Hue': 'Thua_Thien_Hue',
'Tien_Giang': 'Tien_Giang',
'Tra_Vinh': 'Tra_Vinh',
'Tuyen_Quang': 'Tuyen_Quang',
'Vinh_Long': 'Vinh_Long',
'Vinh_Phuc': 'Vinh_Phuc',
'Yen_Bai': 'Yen_Bai'
}
|
the-stack_106_14127
|
#!/usr/bin/env python3
# Test whether a client sends a correct SUBSCRIBE to a topic with QoS 1.
# The client should connect to port 1888 with keepalive=60, clean session set,
# and client id subscribe-qos1-test
# The test will send a CONNACK message to the client with rc=0. Upon receiving
# the CONNACK and verifying that rc=0, the client should send a SUBSCRIBE
# message to subscribe to topic "qos1/test" with QoS=1. If rc!=0, the client
# should exit with an error.
# Upon receiving the correct SUBSCRIBE message, the test will reply with a
# SUBACK message with the accepted QoS set to 1. On receiving the SUBACK
# message, the client should send a DISCONNECT message.
from mosq_test_helper import *
port = mosq_test.get_lib_port()
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("subscribe-qos1-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
disconnect_packet = mosq_test.gen_disconnect()
mid = 1
subscribe_packet = mosq_test.gen_subscribe(mid, "qos1/test", 1)
suback_packet = mosq_test.gen_suback(mid, 1)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(10)
sock.bind(('', port))
sock.listen(5)
client_args = sys.argv[1:]
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = '../../lib:../../lib/cpp'
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../lib/python:'+pp
client = mosq_test.start_client(filename=sys.argv[1].replace('/', '-'), cmd=client_args, env=env, port=port)
try:
(conn, address) = sock.accept()
conn.settimeout(10)
mosq_test.do_receive_send(conn, connect_packet, connack_packet, "connect")
mosq_test.do_receive_send(conn, subscribe_packet, suback_packet, "subscribe")
mosq_test.expect_packet(conn, "disconnect", disconnect_packet)
rc = 0
conn.close()
except mosq_test.TestError:
pass
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
|
the-stack_106_14132
|
# Copyright (c) 2006-2007 Open Source Applications Foundation
# Copyright (c) 2008-2009 Mikeal Rogers <[email protected]>
# Copyright (c) 2009 Domen Kozar <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
import os, sys
PACKAGE_NAME = "windmill"
PACKAGE_VERSION = "1.3"
SUMMARY = 'Web testing framework intended for complete automation of user interface testing, with strong test debugging and recording capabilities.'
DESCRIPTION = """Windmill is an Open Source AJAX Web UI Testing framework.
Windmill implements cross browser testing, in-browser recording and playback, and functionality for fast accurate debugging and test environment integration.
We welcome any and all interest and contribution, as we work diligently at adding new features and keeping up with your bugs.
Thanks for your interest and participation!
"""
dependencies = []
two_four_dependencies = ['ctypes']
if sys.version.startswith('2.4'):
dependencies.extend(two_four_dependencies)
if sys.platform == 'cygwin':
dependencies.append('cygwinreg')
setup(name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description=SUMMARY,
long_description=DESCRIPTION,
author='OSAF, Mikeal Rogers, Adam Christian',
author_email='[email protected]',
url='http://www.getwindmill.com/',
license='http://www.apache.org/licenses/LICENSE-2.0',
include_package_data = True,
packages = find_packages(exclude=['test', 'trac-files', 'tutorial', 'test.test_live', 'scripts',
'flash', 'contrib', 'windmill2']),
package_data = {'': ['*.js', '*.css', '*.html', '*.txt', '*.xpi',
'*.crt', '*.key', '*.csr', 'cert8.db' ],},
platforms =['Any'],
install_requires = dependencies,
entry_points = {
'nose.plugins': [
'windmill = windmill.authoring.nose_plugin:WindmillNosePlugin'
],
'console_scripts': [
'windmill = windmill.bin.windmill_bin:main'
]
},
# entry_points="""
# [console_scripts]
# windmill = windmill.bin.windmill_bin:main
# """,
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
the-stack_106_14133
|
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for analyzers."""
from __future__ import unicode_literals
import logging
import os
import yaml
import pandas
from flask import current_app
from timesketch.lib import definitions
from timesketch.lib.datastores.elastic import ElasticsearchDataStore
from timesketch.models import db_session
from timesketch.models.sketch import Event as SQLEvent
from timesketch.models.sketch import Sketch as SQLSketch
from timesketch.models.sketch import SearchIndex
from timesketch.models.sketch import View
from timesketch.models.sketch import Analysis
def _flush_datastore_decorator(func):
"""Decorator that flushes the bulk insert queue in the datastore."""
def wrapper(self, *args, **kwargs):
func_return = func(self, *args, **kwargs)
self.datastore.flush_queued_events()
return func_return
return wrapper
def get_yaml_config(file_name):
"""Return a dict parsed from a YAML file within the config directory.
Args:
file_name: String that defines the config file name.
Returns:
A dict with the parsed YAML content from the config file or
an empty dict if the file is not found or YAML was unable
to parse it.
"""
root_path = os.path.join(os.path.sep, 'etc', 'timesketch')
if not os.path.isdir(root_path):
return {}
path = os.path.join(root_path, file_name)
if not os.path.isfile(path):
return {}
with open(path, 'r') as fh:
try:
return yaml.safe_load(fh)
except yaml.parser.ParserError as exception:
# pylint: disable=logging-format-interpolation
logging.warning((
'Unable to read in YAML config file, '
'with error: {0!s}').format(exception))
return {}
class Event(object):
"""Event object with helper methods.
Attributes:
datastore: Instance of ElasticsearchDatastore.
sketch: Sketch ID or None if not provided.
event_id: ID of the Event.
event_type: Document type in Elasticsearch.
index_name: The name of the Elasticsearch index.
source: Source document from Elasticsearch.
"""
def __init__(self, event, datastore, sketch=None):
"""Initialize Event object.
Args:
event: Dictionary of event from Elasticsearch.
datastore: Instance of ElasticsearchDatastore.
sketch: Optional instance of a Sketch object.
Raises:
KeyError if event dictionary is missing mandatory fields.
"""
self.datastore = datastore
self.sketch = sketch
self.updated_event = {}
try:
self.event_id = event['_id']
self.event_type = event['_type']
self.index_name = event['_index']
self.source = event.get('_source', None)
except KeyError as e:
raise KeyError('Malformed event: {0!s}'.format(e))
def _update(self, event):
"""Update event attributes to add.
Args:
event: Dictionary with new or updated values.
"""
self.updated_event.update(event)
def commit(self, event_dict=None):
"""Commit an event to Elasticsearch.
Args:
event_dict: (optional) Dictionary with updated event attributes.
Defaults to self.updated_event.
"""
if event_dict:
event_to_commit = event_dict
else:
event_to_commit = self.updated_event
if not event_to_commit:
return
self.datastore.import_event(
self.index_name, self.event_type, event_id=self.event_id,
event=event_to_commit)
self.updated_event = {}
def add_attributes(self, attributes):
"""Add key/values to an Event.
Args:
attributes: Dictionary with new or updated values to add.
"""
self._update(attributes)
def add_label(self, label, toggle=False):
"""Add label to the Event.
Args:
label: Label name.
toggle: If True the label will be removed if it exists already.
Raises: RuntimeError of sketch ID is missing.
"""
if not self.sketch:
raise RuntimeError('No sketch provided.')
user_id = 0
updated_event = self.datastore.set_label(
self.index_name, self.event_id, self.event_type, self.sketch.id,
user_id, label, toggle=toggle, single_update=False)
self.commit(updated_event)
def add_tags(self, tags):
"""Add tags to the Event.
Args:
tags: List of tags to add.
"""
if not tags:
return
existing_tags = self.source.get('tag', [])
new_tags = list(set().union(existing_tags, tags))
updated_event_attribute = {'tag': new_tags}
self._update(updated_event_attribute)
def add_emojis(self, emojis):
"""Add emojis to the Event.
Args:
emojis: List of emojis to add (as unicode codepoints).
"""
if not emojis:
return
existing_emoji_list = self.source.get('__ts_emojis', [])
if not isinstance(existing_emoji_list, (list, tuple)):
existing_emoji_list = []
new_emoji_list = list(set().union(existing_emoji_list, emojis))
updated_event_attribute = {'__ts_emojis': new_emoji_list}
self._update(updated_event_attribute)
def add_star(self):
"""Star event."""
self.add_label(label='__ts_star')
def add_comment(self, comment):
"""Add comment to event.
Args:
comment: Comment string.
Raises:
RuntimeError: if no sketch is present.
"""
if not self.sketch:
raise RuntimeError('No sketch provided.')
searchindex = SearchIndex.query.filter_by(
index_name=self.index_name).first()
db_event = SQLEvent.get_or_create(
sketch=self.sketch.sql_sketch, searchindex=searchindex,
document_id=self.event_id)
comment = SQLEvent.Comment(comment=comment, user=None)
db_event.comments.append(comment)
db_session.add(db_event)
db_session.commit()
self.add_label(label='__ts_comment')
def add_human_readable(self, human_readable, analyzer_name, append=True):
"""Add a human readable string to event.
Args:
human_readable: human readable string.
analyzer_name: string with the name of the analyzer that was
used to generate the human_readable string.
append: boolean defining whether the data should be appended
or prepended to the human readable string, if it has already
been defined. Defaults to True, and does nothing if
human_readable is not defined.
"""
existing_human_readable = self.source.get('human_readable', [])
human_readable = '[{0:s}] {1:s}'.format(analyzer_name, human_readable)
if human_readable in existing_human_readable:
return
if append:
existing_human_readable.append(human_readable)
else:
existing_human_readable.insert(0, human_readable)
updated_human_readable = {'human_readable': existing_human_readable}
self._update(updated_human_readable)
class Sketch(object):
"""Sketch object with helper methods.
Attributes:
id: Sketch ID.
sql_sketch: Instance of a SQLAlchemy Sketch object.
"""
def __init__(self, sketch_id):
"""Initializes a Sketch object.
Args:
sketch_id: The Sketch ID.
"""
self.id = sketch_id
self.sql_sketch = SQLSketch.query.get(sketch_id)
if not self.sql_sketch:
raise RuntimeError('No such sketch')
def add_view(self, view_name, analyzer_name, query_string=None,
query_dsl=None, query_filter=None):
"""Add saved view to the Sketch.
Args:
view_name: The name of the view.
analyzer_name: The name of the analyzer.
query_string: Elasticsearch query string.
query_dsl: Dictionary with Elasticsearch DSL query.
query_filter: Dictionary with Elasticsearch filters.
Raises:
ValueError: If both query_string an query_dsl are missing.
Returns: An instance of a SQLAlchemy View object.
"""
if not query_string or query_dsl:
raise ValueError('Both query_string and query_dsl are missing.')
if not query_filter:
query_filter = {'indices': '_all'}
name = '[{0:s}] {1:s}'.format(analyzer_name, view_name)
view = View.get_or_create(name=name, sketch=self.sql_sketch, user=None)
view.query_string = query_string
view.query_filter = view.validate_filter(query_filter)
view.query_dsl = query_dsl
view.searchtemplate = None
db_session.add(view)
db_session.commit()
return view
def get_all_indices(self):
"""List all indices in the Sketch.
Returns:
List of index names.
"""
active_timelines = self.sql_sketch.active_timelines
indices = [t.searchindex.index_name for t in active_timelines]
return indices
class BaseIndexAnalyzer(object):
"""Base class for analyzers.
Attributes:
name: Analyzer name.
index_name: Name if Elasticsearch index.
datastore: Elasticsearch datastore client.
sketch: Instance of Sketch object.
"""
NAME = 'name'
IS_SKETCH_ANALYZER = False
# If this analyzer depends on another analyzer
# it needs to be included in this frozenset by using
# the indexer names.
DEPENDENCIES = frozenset()
def __init__(self, index_name):
"""Initialize the analyzer object.
Args:
index_name: Elasticsearch index name.
"""
self.name = self.NAME
self.index_name = index_name
self.datastore = ElasticsearchDataStore(
host=current_app.config['ELASTIC_HOST'],
port=current_app.config['ELASTIC_PORT'])
if not hasattr(self, 'sketch'):
self.sketch = None
def event_stream(
self, query_string=None, query_filter=None, query_dsl=None,
indices=None, return_fields=None):
"""Search ElasticSearch.
Args:
query_string: Query string.
query_filter: Dictionary containing filters to apply.
query_dsl: Dictionary containing Elasticsearch DSL query.
indices: List of indices to query.
return_fields: List of fields to return.
Returns:
Generator of Event objects.
Raises:
ValueError: if neither query_string or query_dsl is provided.
"""
if not (query_string or query_dsl):
raise ValueError('Both query_string and query_dsl are missing')
if not query_filter:
query_filter = {'indices': self.index_name}
# If not provided we default to the message field as this will always
# be present.
if not return_fields:
return_fields = ['message']
# Make sure we always return tag, human_readable and emoji attributes.
return_fields.extend(['tag', 'human_readable', '__ts_emojis'])
return_fields = list(set(return_fields))
if not indices:
indices = [self.index_name]
# Refresh the index to make sure it is searchable.
for index in indices:
self.datastore.client.indices.refresh(index=index)
event_generator = self.datastore.search_stream(
query_string=query_string,
query_filter=query_filter,
query_dsl=query_dsl,
indices=indices,
return_fields=return_fields
)
for event in event_generator:
yield Event(event, self.datastore, sketch=self.sketch)
@_flush_datastore_decorator
def run_wrapper(self, analysis_id):
"""A wrapper method to run the analyzer.
This method is decorated to flush the bulk insert operation on the
datastore. This makes sure that all events are indexed at exit.
Returns:
Return value of the run method.
"""
analysis = Analysis.query.get(analysis_id)
analysis.set_status('STARTED')
# Run the analyzer
result = self.run()
# Update database analysis object with result and status
analysis.result = '{0:s}'.format(result)
analysis.set_status('DONE')
db_session.add(analysis)
db_session.commit()
return result
def run(self):
"""Entry point for the analyzer."""
raise NotImplementedError
class BaseSketchAnalyzer(BaseIndexAnalyzer):
"""Base class for sketch analyzers.
Attributes:
sketch: A Sketch instance.
"""
NAME = 'name'
IS_SKETCH_ANALYZER = True
def __init__(self, index_name, sketch_id):
"""Initialize the analyzer object.
Args:
index_name: Elasticsearch index name.
sketch_id: Sketch ID.
"""
self.sketch = Sketch(sketch_id=sketch_id)
super(BaseSketchAnalyzer, self).__init__(index_name)
def event_pandas(
self, query_string=None, query_filter=None, query_dsl=None,
indices=None, return_fields=None):
"""Search ElasticSearch.
Args:
query_string: Query string.
query_filter: Dictionary containing filters to apply.
query_dsl: Dictionary containing Elasticsearch DSL query.
indices: List of indices to query.
return_fields: List of fields to be included in the search results,
if not included all fields will be included in the results.
Returns:
A python pandas object with all the events.
Raises:
ValueError: if neither query_string or query_dsl is provided.
"""
if not (query_string or query_dsl):
raise ValueError('Both query_string and query_dsl are missing')
if not query_filter:
query_filter = {'indices': self.index_name, 'size': 10000}
if not indices:
indices = [self.index_name]
# Refresh the index to make sure it is searchable.
for index in indices:
self.datastore.client.indices.refresh(index=index)
if return_fields:
default_fields = definitions.DEFAULT_SOURCE_FIELDS
return_fields.extend(default_fields)
return_fields = list(set(return_fields))
return_fields = ','.join(return_fields)
results = self.datastore.search_stream(
sketch_id=self.sketch.id,
query_string=query_string,
query_filter=query_filter,
query_dsl=query_dsl,
indices=indices,
return_fields=return_fields,
)
events = []
for event in results:
source = event.get('_source')
source['_id'] = event.get('_id')
source['_type'] = event.get('_type')
source['_index'] = event.get('_index')
events.append(source)
return pandas.DataFrame(events)
def run(self):
"""Entry point for the analyzer."""
raise NotImplementedError
|
the-stack_106_14140
|
# Copyright (c) 2018 Tencent Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class HttpProfile(object):
scheme = "https"
def __init__(self, protocol=None, endpoint=None, reqMethod="POST", reqTimeout=60,
keepAlive=False, proxy=None):
"""HTTP profile.
:param protocol: http or https, default is https.
:type protocol: str
:param endpoint: The domain to access, like: cvm.tencentcloudapi.com
:type endpoint: str
:param reqMethod: the http method, valid choice: GET, POST
:type reqMethod: str
:param reqTimeout: The http timeout in second.
:type reqTimeout: int
"""
self.endpoint = endpoint
self.reqTimeout = 60 if reqTimeout is None else reqTimeout
self.reqMethod = "POST" if reqMethod is None else reqMethod
self.protocol = protocol or "https"
# protocol is not precise word according to rfc
self.scheme = self.protocol
self.keepAlive = keepAlive
self.proxy = proxy
|
the-stack_106_14141
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for classes in iobase.py."""
# pytype: skip-file
from __future__ import absolute_import
import unittest
import mock
import apache_beam as beam
from apache_beam.io.concat_source import ConcatSource
from apache_beam.io.concat_source_test import RangeSource
from apache_beam.io import iobase
from apache_beam.io.iobase import SourceBundle
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class SDFBoundedSourceRestrictionProviderTest(unittest.TestCase):
def setUp(self):
self.initial_range_start = 0
self.initial_range_stop = 4
self.initial_range_source = RangeSource(
self.initial_range_start, self.initial_range_stop)
self.sdf_restriction_provider = (
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestrictionProvider(
self.initial_range_source, desired_chunk_size=2))
def test_initial_restriction(self):
unused_element = None
restriction = (
self.sdf_restriction_provider.initial_restriction(unused_element))
self.assertTrue(
isinstance(
restriction,
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestriction))
self.assertTrue(isinstance(restriction._source_bundle, SourceBundle))
self.assertEqual(
self.initial_range_start, restriction._source_bundle.start_position)
self.assertEqual(
self.initial_range_stop, restriction._source_bundle.stop_position)
self.assertTrue(isinstance(restriction._source_bundle.source, RangeSource))
self.assertEqual(restriction._range_tracker, None)
def test_create_tracker(self):
expected_start = 1
expected_stop = 3
source_bundle = SourceBundle(
expected_stop - expected_start,
RangeSource(1, 3),
expected_start,
expected_stop)
restriction_tracker = (
self.sdf_restriction_provider.create_tracker(
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestriction(
source_bundle)))
self.assertTrue(
isinstance(
restriction_tracker,
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestrictionTracker)
)
self.assertEqual(expected_start, restriction_tracker.start_pos())
self.assertEqual(expected_stop, restriction_tracker.stop_pos())
def test_simple_source_split(self):
unused_element = None
restriction = (
self.sdf_restriction_provider.initial_restriction(unused_element))
expect_splits = [(0, 2), (2, 4)]
split_bundles = list(
self.sdf_restriction_provider.split(unused_element, restriction))
self.assertTrue(
all([
isinstance(bundle._source_bundle, SourceBundle)
for bundle in split_bundles
]))
splits = ([(
bundle._source_bundle.start_position,
bundle._source_bundle.stop_position) for bundle in split_bundles])
self.assertEqual(expect_splits, list(splits))
def test_concat_source_split(self):
unused_element = None
initial_concat_source = ConcatSource([self.initial_range_source])
sdf_concat_restriction_provider = (
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestrictionProvider(
initial_concat_source, desired_chunk_size=2))
restriction = (
self.sdf_restriction_provider.initial_restriction(unused_element))
expect_splits = [(0, 2), (2, 4)]
split_bundles = list(
sdf_concat_restriction_provider.split(unused_element, restriction))
self.assertTrue(
all([
isinstance(bundle._source_bundle, SourceBundle)
for bundle in split_bundles
]))
splits = ([(
bundle._source_bundle.start_position,
bundle._source_bundle.stop_position) for bundle in split_bundles])
self.assertEqual(expect_splits, list(splits))
def test_restriction_size(self):
unused_element = None
restriction = (
self.sdf_restriction_provider.initial_restriction(unused_element))
split_1, split_2 = self.sdf_restriction_provider.split(unused_element,
restriction)
split_1_size = self.sdf_restriction_provider.restriction_size(
unused_element, split_1)
split_2_size = self.sdf_restriction_provider.restriction_size(
unused_element, split_2)
self.assertEqual(2, split_1_size)
self.assertEqual(2, split_2_size)
class SDFBoundedSourceRestrictionTrackerTest(unittest.TestCase):
def setUp(self):
self.initial_start_pos = 0
self.initial_stop_pos = 4
source_bundle = SourceBundle(
self.initial_stop_pos - self.initial_start_pos,
RangeSource(self.initial_start_pos, self.initial_stop_pos),
self.initial_start_pos,
self.initial_stop_pos)
self.sdf_restriction_tracker = (
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestrictionTracker(
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestriction(
source_bundle)))
def test_current_restriction_before_split(self):
current_restriction = (self.sdf_restriction_tracker.current_restriction())
self.assertEqual(
self.initial_start_pos,
current_restriction._source_bundle.start_position)
self.assertEqual(
self.initial_stop_pos, current_restriction._source_bundle.stop_position)
self.assertEqual(
self.initial_start_pos,
current_restriction._range_tracker.start_position())
self.assertEqual(
self.initial_stop_pos,
current_restriction._range_tracker.stop_position())
def test_current_restriction_after_split(self):
fraction_of_remainder = 0.5
self.sdf_restriction_tracker.try_claim(1)
expected_restriction, _ = (
self.sdf_restriction_tracker.try_split(fraction_of_remainder))
current_restriction = self.sdf_restriction_tracker.current_restriction()
self.assertEqual(
expected_restriction._source_bundle, current_restriction._source_bundle)
self.assertTrue(current_restriction._range_tracker)
def test_try_split_at_remainder(self):
fraction_of_remainder = 0.4
expected_primary = (0, 2, 2.0)
expected_residual = (2, 4, 2.0)
self.sdf_restriction_tracker.try_claim(0)
actual_primary, actual_residual = (
self.sdf_restriction_tracker.try_split(fraction_of_remainder))
self.assertEqual(
expected_primary,
(
actual_primary._source_bundle.start_position,
actual_primary._source_bundle.stop_position,
actual_primary._source_bundle.weight))
self.assertEqual(
expected_residual,
(
actual_residual._source_bundle.start_position,
actual_residual._source_bundle.stop_position,
actual_residual._source_bundle.weight))
self.assertEqual(
actual_primary._source_bundle.weight,
self.sdf_restriction_tracker.current_restriction().weight())
class UseSdfBoundedSourcesTests(unittest.TestCase):
def _run_sdf_wrapper_pipeline(self, source, expected_values):
with beam.Pipeline() as p:
experiments = (p._options.view_as(DebugOptions).experiments or [])
# Setup experiment option to enable using SDFBoundedSourceWrapper
if 'beam_fn_api' not in experiments:
# Required so mocking below doesn't mock Create used in assert_that.
experiments.append('beam_fn_api')
p._options.view_as(DebugOptions).experiments = experiments
actual = p | beam.io.Read(source)
assert_that(actual, equal_to(expected_values))
@mock.patch('apache_beam.io.iobase._SDFBoundedSourceWrapper.expand')
def test_sdf_wrapper_overrides_read(self, sdf_wrapper_mock_expand):
def _fake_wrapper_expand(pbegin):
return pbegin | beam.Create(['fake'])
sdf_wrapper_mock_expand.side_effect = _fake_wrapper_expand
self._run_sdf_wrapper_pipeline(RangeSource(0, 4), ['fake'])
def test_sdf_wrap_range_source(self):
self._run_sdf_wrapper_pipeline(RangeSource(0, 4), [0, 1, 2, 3])
if __name__ == '__main__':
unittest.main()
|
the-stack_106_14142
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Various kinds of layout components.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ..core.enums import SizingMode, SizingPolicy, Location, TrackAlign
from ..core.has_props import abstract
from ..core.properties import (Bool, Auto, Enum, Int, NonNegativeInt, Float,
Instance, List, Seq, Tuple, Dict, String, Either, Struct, Color)
from ..core.validation import warning, error
from ..core.validation.warnings import (BOTH_CHILD_AND_ROOT, EMPTY_LAYOUT,
FIXED_SIZING_MODE, FIXED_WIDTH_POLICY, FIXED_HEIGHT_POLICY)
from ..core.validation.errors import MIN_PREFERRED_MAX_WIDTH, MIN_PREFERRED_MAX_HEIGHT
from ..model import Model
from .callbacks import Callback
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Box',
'Column',
'GridBox',
'HTMLBox',
'LayoutDOM',
'Row',
'Spacer',
'WidgetBox',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class LayoutDOM(Model):
""" The base class for layoutable components.
"""
disabled = Bool(False, help="""
Whether the widget will be disabled when rendered.
If ``True``, the widget will be greyed-out and not responsive to UI events.
""")
visible = Bool(True, help="""
Whether the component will be visible and a part of a layout.
""")
width = NonNegativeInt(default=None, help="""
The width of the component (in pixels).
This can be either fixed or preferred width, depending on width sizing policy.
""")
height = NonNegativeInt(default=None, help="""
The height of the component (in pixels).
This can be either fixed or preferred height, depending on height sizing policy.
""")
min_width = NonNegativeInt(default=None, help="""
Minimal width of the component (in pixels) if width is adjustable.
""")
min_height = NonNegativeInt(default=None, help="""
Minimal height of the component (in pixels) if height is adjustable.
""")
max_width = NonNegativeInt(default=None, help="""
Minimal width of the component (in pixels) if width is adjustable.
""")
max_height = NonNegativeInt(default=None, help="""
Minimal height of the component (in pixels) if height is adjustable.
""")
margin = Tuple(Int, Int, Int, Int, default=(0, 0, 0, 0), help="""
Allows to create additional space around the component.
""").accepts(Tuple(Int, Int), lambda v_h: (v_h[0], v_h[1], v_h[0], v_h[1])) \
.accepts(Int, lambda m: (m, m, m, m))
width_policy = Either(Auto, Enum(SizingPolicy), default="auto", help="""
Describes how the component should maintain its width.
``"auto"``
Use component's preferred sizing policy.
``"fixed"``
Use exactly ``width`` pixels. Component will overflow if it can't fit in the
available horizontal space.
``"fit"``
Use component's preferred width (if set) and allow it to fit into the available
horizontal space within the minimum and maximum width bounds (if set). Component's
width neither will be aggressively minimized nor maximized.
``"min"``
Use as little horizontal space as possible, not less than the minimum width (if set).
The starting point is the preferred width (if set). The width of the component may
shrink or grow depending on the parent layout, aspect management and other factors.
``"max"``
Use as much horizontal space as possible, not more than the maximum width (if set).
The starting point is the preferred width (if set). The width of the component may
shrink or grow depending on the parent layout, aspect management and other factors.
.. note::
This is an experimental feature and may change in future. Use it at your
own discretion. Prefer using ``sizing_mode`` if this level of control isn't
strictly necessary.
""")
height_policy = Either(Auto, Enum(SizingPolicy), default="auto", help="""
Describes how the component should maintain its height.
``"auto"``
Use component's preferred sizing policy.
``"fixed"``
Use exactly ``height`` pixels. Component will overflow if it can't fit in the
available vertical space.
``"fit"``
Use component's preferred height (if set) and allow to fit into the available
vertical space withing the minimum and maximum height bounds (if set). Component's
height neither will be aggressively minimized nor maximized.
``"min"``
Use as little vertical space as possible, not less than the minimum height (if set).
The starting point is the preferred height (if set). The height of the component may
shrink or grow depending on the parent layout, aspect management and other factors.
``"max"``
Use as much vertical space as possible, not more than the maximum height (if set).
The starting point is the preferred height (if set). The height of the component may
shrink or grow depending on the parent layout, aspect management and other factors.
.. note::
This is an experimental feature and may change in future. Use it at your
own discretion. Prefer using ``sizing_mode`` if this level of control isn't
strictly necessary.
""")
aspect_ratio = Either(Enum("auto"), Float, default=None, help="""
Describes the proportional relationship between component's width and height.
This works if any of component's dimensions are flexible in size. If set to
a number, ``width / height = aspect_ratio`` relationship will be maintained.
Otherwise, if set to ``"auto"``, component's preferred width and height will
be used to determine the aspect (if not set, no aspect will be preserved).
""")
sizing_mode = Enum(SizingMode, default=None, help="""
How the component should size itself.
This is a high-level setting for maintaining width and height of the component. To
gain more fine grained control over sizing, use ``width_policy``, ``height_policy``
and ``aspect_ratio`` instead (those take precedence over ``sizing_mode``).
Possible scenarios:
``"fixed"``
Component is not responsive. It will retain its original width and height
regardless of any subsequent browser window resize events.
``"stretch_width"``
Component will responsively resize to stretch to the available width, without
maintaining any aspect ratio. The height of the component depends on the type
of the component and may be fixed or fit to component's contents.
``"stretch_height"``
Component will responsively resize to stretch to the available height, without
maintaining any aspect ratio. The width of the component depends on the type
of the component and may be fixed or fit to component's contents.
``"stretch_both"``
Component is completely responsive, independently in width and height, and
will occupy all the available horizontal and vertical space, even if this
changes the aspect ratio of the component.
``"scale_width"``
Component will responsively resize to stretch to the available width, while
maintaining the original or provided aspect ratio.
``"scale_height"``
Component will responsively resize to stretch to the available height, while
maintaining the original or provided aspect ratio.
``"scale_both"``
Component will responsively resize to both the available width and height, while
maintaining the original or provided aspect ratio.
""")
background = Color(default=None, help="""
Background color of the component.
""")
# List in order for in-place changes to trigger changes, ref: https://github.com/bokeh/bokeh/issues/6841
css_classes = List(String, help="""
A list of CSS class names to add to this DOM element. Note: the class names are
simply added as-is, no other guarantees are provided.
It is also permissible to assign from tuples, however these are adapted -- the
property will always contain a list.
""").accepts(Seq(String), lambda x: list(x))
@warning(FIXED_SIZING_MODE)
def _check_fixed_sizing_mode(self):
if self.sizing_mode == "fixed" and (self.width is None or self.height is None):
return str(self)
@warning(FIXED_WIDTH_POLICY)
def _check_fixed_width_policy(self):
if self.width_policy == "fixed" and self.width is None:
return str(self)
@warning(FIXED_HEIGHT_POLICY)
def _check_fixed_height_policy(self):
if self.height_policy == "fixed" and self.height is None:
return str(self)
@error(MIN_PREFERRED_MAX_WIDTH)
def _min_preferred_max_width(self):
min_width = self.min_width if self.min_width is not None else 0
width = self.width if self.width is not None else min_width
max_width = self.max_width if self.max_width is not None else width
if not (min_width <= width <= max_width):
return str(self)
@error(MIN_PREFERRED_MAX_HEIGHT)
def _min_preferred_max_height(self):
min_height = self.min_height if self.min_height is not None else 0
height = self.height if self.height is not None else min_height
max_height = self.max_height if self.max_height is not None else height
if not (min_height <= height <= max_height):
return str(self)
@abstract
class HTMLBox(LayoutDOM):
''' A component which size is determined by its HTML content.
'''
class Spacer(LayoutDOM):
''' A container for space used to fill an empty spot in a row or column.
'''
QuickTrackSizing = Either(Enum("auto", "min", "max"), Int)
RowSizing = Either(
QuickTrackSizing,
Struct(policy=Enum("auto", "min", "max"), align=Enum(TrackAlign)),
Struct(policy=Enum("fixed"), height=Int, align=Enum(TrackAlign)),
Struct(policy=Enum("flex"), factor=Float, align=Enum(TrackAlign)))
ColSizing = Either(
QuickTrackSizing,
Struct(policy=Enum("auto", "min", "max"), align=Enum(TrackAlign)),
Struct(policy=Enum("fixed"), width=Int, align=Enum(TrackAlign)),
Struct(policy=Enum("flex"), factor=Float, align=Enum(TrackAlign)))
IntOrString = Either(Int, String) # XXX: work around issue #8166
class GridBox(LayoutDOM):
children = List(Tuple(Instance(LayoutDOM), Int, Int), default=[], help="""
A list of children with their associated position in the grid (row, column).
""")
rows = Either(QuickTrackSizing, Dict(IntOrString, RowSizing), default="auto", help="""
Describes how the grid should maintain its rows' heights.
.. note::
This is an experimental feature and may change in future. Use it at your
own discretion.
""")
cols = Either(QuickTrackSizing, Dict(IntOrString, ColSizing), default="auto", help="""
Describes how the grid should maintain its columns' widths.
.. note::
This is an experimental feature and may change in future. Use it at your
own discretion.
""")
spacing = Either(Int, Tuple(Int, Int), default=0, help="""
The gap between children (in pixels).
""")
@abstract
class Box(LayoutDOM):
''' Abstract base class for Row and Column. Do not use directly.
'''
def __init__(self, *args, **kwargs):
if len(args) > 0 and "children" in kwargs:
raise ValueError("'children' keyword cannot be used with positional arguments")
elif len(args) > 0:
kwargs["children"] = list(args)
super(Box, self).__init__(**kwargs)
@warning(EMPTY_LAYOUT)
def _check_empty_layout(self):
from itertools import chain
if not list(chain(self.children)):
return str(self)
@warning(BOTH_CHILD_AND_ROOT)
def _check_child_is_also_root(self):
problems = []
for c in self.children:
if c.document is not None and c in c.document.roots:
problems.append(str(c))
if problems:
return ", ".join(problems)
else:
return None
children = List(Instance(LayoutDOM), help="""
The list of children, which can be other components including plots, rows, columns, and widgets.
""")
spacing = Int(default=0, help="""
The gap between children (in pixels).
""")
class Row(Box):
''' Lay out child components in a single horizontal row.
Children can be specified as positional arguments, as a single argument
that is a sequence, or using the ``children`` keyword argument.
'''
cols = Either(QuickTrackSizing, Dict(IntOrString, ColSizing), default="auto", help="""
Describes how the component should maintain its columns' widths.
.. note::
This is an experimental feature and may change in future. Use it at your
own discretion.
""")
class Column(Box):
''' Lay out child components in a single vertical row.
Children can be specified as positional arguments, as a single argument
that is a sequence, or using the ``children`` keyword argument.
'''
rows = Either(QuickTrackSizing, Dict(IntOrString, RowSizing), default="auto", help="""
Describes how the component should maintain its rows' heights.
.. note::
This is an experimental feature and may change in future. Use it at your
own discretion.
""")
class WidgetBox(Column):
''' Create a column of bokeh widgets with predefined styling.
'''
class Panel(Model):
''' A single-widget container with title bar and controls.
'''
title = String(default="", help="""
The text title of the panel.
""")
child = Instance(LayoutDOM, help="""
The child widget. If you need more children, use a layout widget, e.g. a ``Column``.
""")
class Tabs(LayoutDOM):
''' A panel widget with navigation tabs.
'''
__example__ = "sphinx/source/docs/user_guide/examples/interaction_tab_panes.py"
tabs = List(Instance(Panel), help="""
The list of child panel widgets.
""").accepts(List(Tuple(String, Instance(LayoutDOM))),
lambda items: [ Panel(title=title, child=child) for (title, child) in items ])
tabs_location = Enum(Location, default="above", help="""
The location of the buttons that activate tabs.
""")
active = Int(0, help="""
The index of the active tab.
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the button is activated.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
the-stack_106_14143
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import os
import random
import sys
import unittest
from collections import OrderedDict
import mock
import pandas as pd
from hmsclient import HMSClient
from airflow import DAG, configuration
from airflow.exceptions import AirflowException
from airflow.hooks.hive_hooks import HiveCliHook, HiveMetastoreHook, HiveServer2Hook
from airflow.operators.hive_operator import HiveOperator
from airflow.utils import timezone
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
from airflow.utils.tests import assertEqualIgnoreMultipleSpaces
configuration.load_test_config()
DEFAULT_DATE = timezone.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
NOT_ASSERTLOGS_VERSION = sys.version_info.major + sys.version_info.minor / 10
class HiveEnvironmentTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG('test_dag_id', default_args=args)
self.next_day = (DEFAULT_DATE +
datetime.timedelta(days=1)).isoformat()[:10]
self.database = 'airflow'
self.partition_by = 'ds'
self.table = 'static_babynames_partitioned'
self.hql = """
CREATE DATABASE IF NOT EXISTS {{ params.database }};
USE {{ params.database }};
DROP TABLE IF EXISTS {{ params.table }};
CREATE TABLE IF NOT EXISTS {{ params.table }} (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY ({{ params.partition_by }} string);
ALTER TABLE {{ params.table }}
ADD PARTITION({{ params.partition_by }}='{{ ds }}');
"""
self.hook = HiveMetastoreHook()
t = HiveOperator(
task_id='HiveHook_' + str(random.randint(1, 10000)),
params={
'database': self.database,
'table': self.table,
'partition_by': self.partition_by
},
hive_cli_conn_id='hive_cli_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def tearDown(self):
hook = HiveMetastoreHook()
with hook.get_conn() as metastore:
metastore.drop_table(self.database, self.table, deleteData=True)
class TestHiveCliHook(unittest.TestCase):
def test_run_cli(self):
hook = HiveCliHook()
hook.run_cli("SHOW DATABASES")
def test_run_cli_with_hive_conf(self):
hql = "set key;\n" \
"set airflow.ctx.dag_id;\nset airflow.ctx.dag_run_id;\n" \
"set airflow.ctx.task_id;\nset airflow.ctx.execution_date;\n"
dag_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format']
task_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format']
execution_date_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][
'env_var_format']
dag_run_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][
'env_var_format']
os.environ[dag_id_ctx_var_name] = 'test_dag_id'
os.environ[task_id_ctx_var_name] = 'test_task_id'
os.environ[execution_date_ctx_var_name] = 'test_execution_date'
os.environ[dag_run_id_ctx_var_name] = 'test_dag_run_id'
hook = HiveCliHook()
output = hook.run_cli(hql=hql, hive_conf={'key': 'value'})
self.assertIn('value', output)
self.assertIn('test_dag_id', output)
self.assertIn('test_task_id', output)
self.assertIn('test_execution_date', output)
self.assertIn('test_dag_run_id', output)
del os.environ[dag_id_ctx_var_name]
del os.environ[task_id_ctx_var_name]
del os.environ[execution_date_ctx_var_name]
del os.environ[dag_run_id_ctx_var_name]
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli')
def test_load_file(self, mock_run_cli):
filepath = "/path/to/input/file"
table = "output_table"
hook = HiveCliHook()
hook.load_file(filepath=filepath, table=table, create=False)
query = (
"LOAD DATA LOCAL INPATH '{filepath}' "
"OVERWRITE INTO TABLE {table} ;\n"
.format(filepath=filepath, table=table)
)
mock_run_cli.assert_called_with(query)
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file')
@mock.patch('pandas.DataFrame.to_csv')
def test_load_df(self, mock_to_csv, mock_load_file):
df = pd.DataFrame({"c": ["foo", "bar", "baz"]})
table = "t"
delimiter = ","
encoding = "utf-8"
hook = HiveCliHook()
hook.load_df(df=df,
table=table,
delimiter=delimiter,
encoding=encoding)
assert mock_to_csv.call_count == 1
kwargs = mock_to_csv.call_args[1]
self.assertEqual(kwargs["header"], False)
self.assertEqual(kwargs["index"], False)
self.assertEqual(kwargs["sep"], delimiter)
assert mock_load_file.call_count == 1
kwargs = mock_load_file.call_args[1]
self.assertEqual(kwargs["delimiter"], delimiter)
self.assertEqual(kwargs["field_dict"], {"c": "STRING"})
self.assertTrue(isinstance(kwargs["field_dict"], OrderedDict))
self.assertEqual(kwargs["table"], table)
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file')
@mock.patch('pandas.DataFrame.to_csv')
def test_load_df_with_optional_parameters(self, mock_to_csv, mock_load_file):
hook = HiveCliHook()
b = (True, False)
for create, recreate in itertools.product(b, b):
mock_load_file.reset_mock()
hook.load_df(df=pd.DataFrame({"c": range(0, 10)}),
table="t",
create=create,
recreate=recreate)
assert mock_load_file.call_count == 1
kwargs = mock_load_file.call_args[1]
self.assertEqual(kwargs["create"], create)
self.assertEqual(kwargs["recreate"], recreate)
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli')
def test_load_df_with_data_types(self, mock_run_cli):
d = OrderedDict()
d['b'] = [True]
d['i'] = [-1]
d['t'] = [1]
d['f'] = [0.0]
d['c'] = ['c']
d['M'] = [datetime.datetime(2018, 1, 1)]
d['O'] = [object()]
d['S'] = ['STRING'.encode('utf-8')]
d['U'] = ['STRING']
d['V'] = [None]
df = pd.DataFrame(d)
hook = HiveCliHook()
hook.load_df(df, 't')
query = """
CREATE TABLE IF NOT EXISTS t (
b BOOLEAN,
i BIGINT,
t BIGINT,
f DOUBLE,
c STRING,
M TIMESTAMP,
O STRING,
S STRING,
U STRING,
V STRING)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS textfile
;
"""
assertEqualIgnoreMultipleSpaces(self, mock_run_cli.call_args_list[0][0][0], query)
class TestHiveMetastoreHook(HiveEnvironmentTest):
VALID_FILTER_MAP = {'key2': 'value2'}
def test_get_max_partition_from_empty_part_specs(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs([],
'key1',
self.VALID_FILTER_MAP)
self.assertIsNone(max_partition)
def test_get_max_partition_from_valid_part_specs_and_invalid_filter_map(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
{'key3': 'value5'})
def test_get_max_partition_from_valid_part_specs_and_invalid_partition_key(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key3',
self.VALID_FILTER_MAP)
def test_get_max_partition_from_valid_part_specs_and_none_partition_key(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
None,
self.VALID_FILTER_MAP)
def test_get_max_partition_from_valid_part_specs_and_none_filter_map(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
None)
# No partition will be filtered out.
self.assertEqual(max_partition, b'value3')
def test_get_max_partition_from_valid_part_specs(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
self.VALID_FILTER_MAP)
self.assertEqual(max_partition, b'value1')
def test_get_metastore_client(self):
self.assertIsInstance(self.hook.get_metastore_client(), HMSClient)
def test_get_conn(self):
self.assertIsInstance(self.hook.get_conn(), HMSClient)
def test_check_for_partition(self):
partition = "{p_by}='{date}'".format(date=DEFAULT_DATE_DS,
p_by=self.partition_by)
missing_partition = "{p_by}='{date}'".format(date=self.next_day,
p_by=self.partition_by)
self.assertTrue(
self.hook.check_for_partition(self.database, self.table,
partition)
)
self.assertFalse(
self.hook.check_for_partition(self.database, self.table,
missing_partition)
)
def test_check_for_named_partition(self):
partition = "{p_by}={date}".format(date=DEFAULT_DATE_DS,
p_by=self.partition_by)
missing_partition = "{p_by}={date}".format(date=self.next_day,
p_by=self.partition_by)
self.assertTrue(
self.hook.check_for_named_partition(self.database,
self.table,
partition)
)
self.assertFalse(
self.hook.check_for_named_partition(self.database,
self.table,
missing_partition)
)
def test_get_table(self):
table_info = self.hook.get_table(db=self.database,
table_name=self.table)
self.assertEqual(table_info.tableName, self.table)
columns = ['state', 'year', 'name', 'gender', 'num']
self.assertEqual([col.name for col in table_info.sd.cols], columns)
def test_get_tables(self):
tables = self.hook.get_tables(db=self.database,
pattern=self.table + "*")
self.assertIn(self.table, {table.tableName for table in tables})
def test_get_databases(self):
databases = self.hook.get_databases(pattern='*')
self.assertIn(self.database, databases)
def test_get_partitions(self):
partitions = self.hook.get_partitions(schema=self.database,
table_name=self.table)
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions, [{self.partition_by: DEFAULT_DATE_DS}])
def test_max_partition(self):
filter_map = {self.partition_by: DEFAULT_DATE_DS}
partition = self.hook.max_partition(schema=self.database,
table_name=self.table,
field=self.partition_by,
filter_map=filter_map)
self.assertEqual(partition, DEFAULT_DATE_DS.encode('utf-8'))
def test_table_exists(self):
self.assertTrue(self.hook.table_exists(self.table, db=self.database))
self.assertFalse(
self.hook.table_exists(str(random.randint(1, 10000)))
)
class TestHiveServer2Hook(unittest.TestCase):
def _upload_dataframe(self):
df = pd.DataFrame({'a': [1, 2], 'b': [1, 2]})
self.local_path = '/tmp/TestHiveServer2Hook.csv'
df.to_csv(self.local_path, header=False, index=False)
def setUp(self):
configuration.load_test_config()
self._upload_dataframe()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG('test_dag_id', default_args=args)
self.database = 'airflow'
self.table = 'hive_server_hook'
self.hql = """
CREATE DATABASE IF NOT EXISTS {{ params.database }};
USE {{ params.database }};
DROP TABLE IF EXISTS {{ params.table }};
CREATE TABLE IF NOT EXISTS {{ params.table }} (
a int,
b int)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ',';
LOAD DATA LOCAL INPATH '{{ params.csv_path }}'
OVERWRITE INTO TABLE {{ params.table }};
"""
self.columns = ['{}.a'.format(self.table),
'{}.b'.format(self.table)]
self.hook = HiveMetastoreHook()
t = HiveOperator(
task_id='HiveHook_' + str(random.randint(1, 10000)),
params={
'database': self.database,
'table': self.table,
'csv_path': self.local_path
},
hive_cli_conn_id='hive_cli_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def tearDown(self):
hook = HiveMetastoreHook()
with hook.get_conn() as metastore:
metastore.drop_table(self.database, self.table, deleteData=True)
os.remove(self.local_path)
def test_get_conn(self):
hook = HiveServer2Hook()
hook.get_conn()
@mock.patch('pyhive.hive.connect')
def test_get_conn_with_password(self, mock_connect):
from airflow.hooks.base_hook import CONN_ENV_PREFIX
conn_id = "conn_with_password"
conn_env = CONN_ENV_PREFIX + conn_id.upper()
conn_value = os.environ.get(conn_env)
os.environ[conn_env] = "jdbc+hive2://conn_id:conn_pass@localhost:10000/default?authMechanism=LDAP"
HiveServer2Hook(hiveserver2_conn_id=conn_id).get_conn()
mock_connect.assert_called_with(
host='localhost',
port=10000,
auth='LDAP',
kerberos_service_name=None,
username='conn_id',
password='conn_pass',
database='default')
if conn_value:
os.environ[conn_env] = conn_value
def test_get_records(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_records(query, schema=self.database)
self.assertListEqual(results, [(1, 1), (2, 2)])
def test_get_pandas_df(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
df = hook.get_pandas_df(query, schema=self.database)
self.assertEqual(len(df), 2)
self.assertListEqual(df.columns.tolist(), self.columns)
self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2])
def test_get_results_header(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_results(query, schema=self.database)
self.assertListEqual([col[0] for col in results['header']],
self.columns)
def test_get_results_data(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_results(query, schema=self.database)
self.assertListEqual(results['data'], [(1, 1), (2, 2)])
@unittest.skipIf(NOT_ASSERTLOGS_VERSION < 3.4, 'assertLogs not support before python 3.4')
def test_to_csv_assertlogs(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
csv_filepath = 'query_results.csv'
with self.assertLogs() as cm:
hook.to_csv(query, csv_filepath, schema=self.database,
delimiter=',', lineterminator='\n', output_header=True, fetch_size=2)
df = pd.read_csv(csv_filepath, sep=',')
self.assertListEqual(df.columns.tolist(), self.columns)
self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2])
self.assertEqual(len(df), 2)
self.assertIn('INFO:airflow.hooks.hive_hooks.HiveServer2Hook:'
'Written 2 rows so far.', cm.output)
@unittest.skipIf(NOT_ASSERTLOGS_VERSION >= 3.4, 'test could cover by test_to_csv_assertLogs')
def test_to_csv_without_assertlogs(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
csv_filepath = 'query_results.csv'
hook.to_csv(query, csv_filepath, schema=self.database,
delimiter=',', lineterminator='\n', output_header=True)
df = pd.read_csv(csv_filepath, sep=',')
self.assertListEqual(df.columns.tolist(), self.columns)
self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2])
self.assertEqual(len(df), 2)
def test_multi_statements(self):
sqls = [
"CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)",
"SELECT * FROM {}".format(self.table),
"DROP TABLE test_multi_statements",
]
hook = HiveServer2Hook()
results = hook.get_records(sqls, schema=self.database)
self.assertListEqual(results, [(1, 1), (2, 2)])
def test_get_results_with_hive_conf(self):
hql = ["set key",
"set airflow.ctx.dag_id",
"set airflow.ctx.dag_run_id",
"set airflow.ctx.task_id",
"set airflow.ctx.execution_date"]
dag_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format']
task_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format']
execution_date_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][
'env_var_format']
dag_run_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][
'env_var_format']
os.environ[dag_id_ctx_var_name] = 'test_dag_id'
os.environ[task_id_ctx_var_name] = 'test_task_id'
os.environ[execution_date_ctx_var_name] = 'test_execution_date'
os.environ[dag_run_id_ctx_var_name] = 'test_dag_run_id'
hook = HiveServer2Hook()
output = '\n'.join(res_tuple[0]
for res_tuple
in hook.get_results(hql=hql,
hive_conf={'key': 'value'})['data'])
self.assertIn('value', output)
self.assertIn('test_dag_id', output)
self.assertIn('test_task_id', output)
self.assertIn('test_execution_date', output)
self.assertIn('test_dag_run_id', output)
del os.environ[dag_id_ctx_var_name]
del os.environ[task_id_ctx_var_name]
del os.environ[execution_date_ctx_var_name]
del os.environ[dag_run_id_ctx_var_name]
|
the-stack_106_14144
|
# Binary search tree and it's operations
class Node:
def __init__(self, value) -> None:
self.value = value
self.left = None
self.right = None
class BinarySearchTree:
def __init__(self) -> None:
self.root = None
def insert(self, value):
new_node = Node(value)
if not self.root:
self.root = new_node
else:
temp = self.root
while True:
if new_node.value < temp.value:
if temp.left is None:
temp.left = new_node
break
temp = temp.left
elif new_node.value > temp.value:
if temp.right is None:
temp.right = new_node
break
temp = temp.right
else: # when the value of new_node == temp node
return False
return True
def contains(self, value):
if not self.root: # no node in tree
return False
else:
temp = self.root
while temp:
if value == temp.value:
return True
elif value < temp.value:
temp = temp.left
elif value > temp.value:
temp = temp.right
return False
|
the-stack_106_14145
|
class LMS8001_GPIO(object):
def __init__(self, chip, n):
if n not in range(0,9):
raise ValueError("GPIO n must be in range 0 - 8")
self.chip = chip # reference to chip instance
self.n = n # GPIO number
#
# GPIOn output value
#
@property
def OUT(self):
"""
Get the value written to GPIOn
"""
val = self.chip["GPIOOutData"]["GPIO_OUT_SPI<8:0>"]
if val & (1<<self.n)>0:
return 1
return 0
@OUT.setter
def OUT(self, value):
"""
Write the value to GPIOn
"""
tmp = self.chip["GPIOOutData"]["GPIO_OUT_SPI<8:0>"]
mask = 1<<self.n
tmp &= (0xFFFF ^ mask)
if value > 0:
tmp |= mask
self.chip["GPIOOutData"]["GPIO_OUT_SPI<8:0>"] = tmp
#
# GPIOn output value
#
@property
def IN(self):
"""
Get the value of GPIOn pin
"""
val = self.chip["GPIOInData"]["GPIO_IN<8:0>"]
if val & (1<<self.n)>0:
return 1
return 0
#
# GPIO selector
#
@property
def SEL(self):
"""
Get the value of GPIOn_SEL<2:0>
"""
if self.n<5:
regName = "GPIOOUT_SEL0"
else:
regName = "GPIOOUT_SEL1"
fieldName = "GPIO"+str(self.n)+"_SEL<2:0>"
return self.chip[regName][fieldName]
@SEL.setter
def SEL(self, value):
"""
Set the value of GPIOn_SEL<2:0>
"""
if value not in [0,1,2,3,4,"SPI","PLL_LOCK","VTUNE_LOW","VTUNE_HIGH","FAST_LOCK"]:
raise ValueError('GPIO_SEL must have value of 0,1,2,3,4,"SPI","PLL_LOCK","VTUNE_LOW","VTUNE_HIGH","FAST_LOCK"')
if value==0 or value=="SPI":
tmp = 0
elif value==1 or value=="PLL_LOCK":
tmp = 1
elif value==2 or value=="VTUNE_LOW":
tmp = 2
elif value==3 or value=="VTUNE_HIGH":
tmp = 3
else:
tmp = 4
if self.n<5:
regName = "GPIOOUT_SEL0"
else:
regName = "GPIOOUT_SEL1"
fieldName = "GPIO"+str(self.n)+"_SEL<2:0>"
self.chip[regName][fieldName] = tmp
#
# GPIO_PEn output value
#
@property
def PE(self):
"""
Get the value of GPIO_PEn
"""
val = self.chip["GPIOConfig_PE"]["GPIO_PE<8:0>"]
if val & (1<<self.n)>0:
return 1
return 0
@PE.setter
def PE(self, value):
"""
Write the value to GPIO_PEn
"""
tmp = self.chip["GPIOConfig_PE"]["GPIO_PE<8:0>"]
mask = 1<<self.n
tmp &= (0xFFFF ^ mask)
if value > 0:
tmp |= mask
self.chip["GPIOConfig_PE"]["GPIO_PE<8:0>"] = tmp
#
# GPIO_DSn output value
#
@property
def DS(self):
"""
Get the value of GPIO_DSn
"""
val = self.chip["GPIOConfig_DS"]["GPIO_DS<8:0>"]
if val & (1<<self.n)>0:
return 1
return 0
@DS.setter
def DS(self, value):
"""
Write the value to GPIO_DSn
"""
tmp = self.chip["GPIOConfig_DS"]["GPIO_DS<8:0>"]
mask = 1<<self.n
tmp &= (0xFFFF ^ mask)
if value > 0:
tmp |= mask
self.chip["GPIOConfig_DS"]["GPIO_DS<8:0>"] = tmp
#
# GPIO_InOn output value
#
@property
def InO(self):
"""
Get the value of GPIO_InOn
"""
val = self.chip["GPIOConfig_IO"]["GPIO_InO<8:0>"]
if val & (1<<self.n)>0:
return 1
return 0
@InO.setter
def InO(self, value):
"""
Write the value to GPIO_InOn
"""
tmp = self.chip["GPIOConfig_IO"]["GPIO_InO<8:0>"]
mask = 1<<self.n
tmp &= (0xFFFF ^ mask)
if value > 0:
tmp |= mask
self.chip["GPIOConfig_IO"]["GPIO_InO<8:0>"] = tmp
|
the-stack_106_14146
|
import sys
from dynqmprop import DynQMProp
top_file = sys.argv[1]
coords_file = sys.argv[2]
def main():
charges_param = DynQMProp(top_file, coords_file, qm_charge=+1, ligand_selection=f':2', receptor_selection=f':1',
n_charge_updates=2, sampling_time=1, total_qm_calculations=5)
charges_out, charges_std_out, epol_out = charges_param.set_output_files()
charges_param.run(charges_out, charges_std_out, epol_out, compl=True)
if __name__ == '__main__':
main()
|
the-stack_106_14147
|
import json
from pywps import ComplexInput, ComplexOutput
from pywps import FORMATS
from pywps import LiteralInput
from pywps import Process
from pywps.app.Common import Metadata
import spotpy as sp
import xarray as xr
funcs = {f.__name__: f for f in sp.objectivefunctions._all_functions}
class ObjectiveFunctionProcess(Process):
def __init__(self):
inputs = [ComplexInput('obs', 'Stream flow observation',
abstract='Steam flow observation time series',
supported_formats=(FORMATS.NETCDF,)),
ComplexInput('sim', 'Stream flow simulation',
abstract='Stream flow simulation time series',
supported_formats=(FORMATS.NETCDF,)),
LiteralInput('name', 'Objective function name',
abstract="One or multiple objective function name. If None, defaults to all.",
data_type='string',
allowed_values=tuple(funcs.keys()),
default=None,
min_occurs=0,
max_occurs=17)
]
outputs = [ComplexOutput('metrics', 'Objective function values',
abstract="Returns up to 17 objective function values, depending on the user's "
"requests. By default all 17 are returned. JSON dictionary format.",
supported_formats=(FORMATS.JSON, )),
]
super(ObjectiveFunctionProcess, self).__init__(
self._handler,
identifier="objective-function",
title="Objective-function process based on SpotPy and its 17 objective functions.",
version="1.0",
abstract="This process takes two NETCDF files (one containing variable 'q_sim' and the other 'q_obs') "
"and computes objective-function metrics between them.",
metadata=[Metadata("SPOTPY Documentation", "http://fb09-pasig.umwelt.uni-giessen.de/spotpy/")],
inputs=inputs,
outputs=outputs,
keywords=["objective functions", "hydrological signatures", "optimization"] + list(funcs.keys()),
status_supported=True,
store_supported=True)
def _handler(self, request, response):
obs_fn = request.inputs['obs'][0].file
sim_fn = request.inputs['sim'][0].file
if 'name' in request.inputs:
names = [i.data for i in request.inputs['name']]
else:
names = funcs.keys()
obs = xr.open_dataset(obs_fn)
sim = xr.open_dataset(sim_fn)
# There is no support yet for handling NaN in SpotPy. Here we're starting from the second index to avoid missing
# values in the first time index for obs.
out = {}
for name in names:
da = xr.apply_ufunc(funcs[name],
obs['q_obs'].isel(time=slice(1, None)),
sim['q_sim'].isel(time=slice(1, None)),
input_core_dims=[['time', ], ['time', ]],
vectorize=True)
# For now we're assuming there is just one basin
out[name] = da.data[0]
response.outputs['metrics'].data = json.dumps(out)
return response
|
the-stack_106_14149
|
"""
Since we are working with our own dataset, we need to patch transforms for these custom
input formats.
"""
import torch
import torch.nn as nn
import torchvision.transforms.functional as F
from torchvision.transforms import (
InterpolationMode,
RandomCrop,
RandomHorizontalFlip,
ToTensor,
)
import logging
__all__ = [
"RandomCropImagePair",
"RandomHorizontalFlipImagePair",
"ToTensorImagePair",
"ResizeValidationImageFeaturesPair",
"ToTensorValidationPair",
]
logger = logging.getLogger("ransacflow.data.transform")
class RandomCropImagePair(RandomCrop):
def forward(self, im_pair):
im0, im1 = im_pair
assert im0.shape == im1.shape, "image pair has different dimensions"
# the following snippet is copied from RandomCrop
if self.padding is not None:
im0 = F.pad(im0, self.padding, self.fill, self.padding_mode)
im1 = F.pad(im1, self.padding, self.fill, self.padding_mode)
width, height = F.get_image_size(im0)
# pad the width if needed
if self.pad_if_needed and width < self.size[1]:
padding = [self.size[1] - width, 0]
im0 = F.pad(im0, padding, self.fill, self.padding_mode)
im1 = F.pad(im1, padding, self.fill, self.padding_mode)
# pad the height if needed
if self.pad_if_needed and height < self.size[0]:
padding = [0, self.size[0] - height]
im0 = F.pad(im0, padding, self.fill, self.padding_mode)
im1 = F.pad(im1, padding, self.fill, self.padding_mode)
crop_dims = self.get_params(im0, self.size)
# we need to apply the same crop location to _both_ images
return F.crop(im0, *crop_dims), F.crop(im1, *crop_dims)
class RandomHorizontalFlipImagePair(RandomHorizontalFlip):
def forward(self, im_pair):
im0, im1 = im_pair
if torch.rand(1) < self.p:
return F.hflip(im0), F.hflip(im1)
return im_pair
class SafeToTensor(nn.Module):
def __call__(self, array):
return self.to_tensor(array)
def to_tensor(self, array):
try:
return F.to_tensor(array)
except ValueError:
# sometimes input array contains negative strides
return F.to_tensor(array.copy())
class ToTensorImagePair(SafeToTensor):
def __call__(self, im_pair):
return self.to_tensor(im_pair[0]), self.to_tensor(im_pair[1])
class EnsureRGBImagePair(nn.Module):
def __call__(self, im_pair):
im0, im1 = im_pair
im0 = self._ensure_rgb(im0)
im1 = self._ensure_rgb(im1)
return im0, im1
def _ensure_rgb(self, im):
c = im.shape[-3]
assert c in (1, 3), f"unknown image channel size ({c})"
if im.shape[-3] == 1:
im = torch.cat([im] * 3, dim=-3)
return im
class ResizeValidationImageFeatures(nn.Module):
"""
TBD
Args:
min_size (int): The minimum allowed for the shoerter edge of the resized image.
interpolation (InterpolationMode, optional): Desired interpolation enum defined
by `torchvision.transforms.InterpolationMode`.
stride (int, optional): Image must be multiply of strides, since we downsample
the image during feature extraction.
"""
def __init__(
self,
min_size: int,
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
stride: int = 16,
):
super().__init__()
self.min_size = float(min_size)
self.interpolation = interpolation
self.stride = stride
def forward(self, item):
image, features = item
h, w = image.shape[-2:]
h, w = float(h), float(w)
# estimate new output size base on min size constraint
ratio = min(h / self.min_size, w / self.min_size)
ho, wo = round(h / ratio), round(w / ratio)
# estimate new output size base on the stride constraint
ho, wo = ho // self.stride * self.stride, wo // self.stride * self.stride
# since we may round up/down in the process, recalculate final ratio to ensure
# feature points are at correct positions
size = (ho, wo)
ratio_h, ratio_w = h / ho, w / wo
logger.debug(
f"init_ratio={ratio:.5f}, actual_ratio=(w={ratio_w:.5f}, h={ratio_h:.5f})"
)
ratio = torch.tensor([ratio_w, ratio_h])
# 1) resize image pairs
image = F.resize(image, size, self.interpolation)
# 2) resize feature points
features /= ratio
return image, features
class ResizeValidationImageFeaturesPair(ResizeValidationImageFeatures):
"""
Similar to `ResizeValidationImageFeatures` but resize both source and target.
Args:
min_size (int): The minimum allowed for the shoerter edge of the resized image.
interpolation (InterpolationMode, optional): Desired interpolation enum defined
by `torchvision.transforms.InterpolationMode`.
stride (int, optional): Image must be multiply of strides, since we downsample
the image during feature extraction.
"""
def forward(self, item):
source, target, affine_mat = item
source = super().forward(source)
target = super().forward(target)
return source, target, affine_mat
class ToTensorValidationPair(SafeToTensor):
"""
A tailored ToTensor operation for validation pairs.
We need this custom transformationi since validation set returns
(src_image, src_feat), (tgt_image, tgt_feat), affine_mat
Each of them needs to convert to tensor independently.
"""
def __call__(self, item):
(src_image, src_feat), (tgt_image, tgt_feat), affine_mat = item
# images, convert from (H, W, C) to (C, H, W)
src_image = self.to_tensor(src_image)
tgt_image = self.to_tensor(tgt_image)
# rest of the ndarray can transform to tensor directly
src_feat = torch.from_numpy(src_feat)
tgt_feat = torch.from_numpy(tgt_feat)
affine_mat = torch.from_numpy(affine_mat)
return (src_image, src_feat), (tgt_image, tgt_feat), affine_mat
class EnsureRGBValidationPair(EnsureRGBImagePair):
def __call__(self, item):
(src_image, src_feat), (tgt_image, tgt_feat), affine_mat = item
src_image = self._ensure_rgb(src_image)
tgt_image = self._ensure_rgb(tgt_image)
return (src_image, src_feat), (tgt_image, tgt_feat), affine_mat
|
the-stack_106_14150
|
import datetime
import logging
import os
from copy import deepcopy
from typing import Any, Dict, Optional
from tqdm import tqdm
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from ...customexception import ModelError
from ...data.datasets import BaseDataset
from ...models import BaseAE
from ..trainer_utils import set_seed
from ..base_trainer import BaseTrainer
from .coupled_optimizer_trainer_config import CoupledOptimizerTrainerConfig
logger = logging.getLogger(__name__)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logger.setLevel(logging.INFO)
class CoupledOptimizerTrainer(BaseTrainer):
"""Trainer using disctinct optimizers for encoder and decoder nn.
Args:
model (BaseAE): The model to train
train_dataset (BaseDataset): The training dataset of type
:class:`~pythae.data.dataset.BaseDataset`
training_args (CoupledOptimizerTrainerConfig): The training arguments summarizing the main
parameters used for training. If None, a basic training instance of
:class:`CoupledOptimizerTrainerConfig` is used. Default: None.
encoder_optimizer (~torch.optim.Optimizer): An instance of `torch.optim.Optimizer` used for
training the encoder. If None, a :class:`~torch.optim.Adam` optimizer is used.
Default: None.
decoder_optimizer (~torch.optim.Optimizer): An instance of `torch.optim.Optimizer` used for
training the decoder. If None, a :class:`~torch.optim.Adam` optimizer is used.
Default: None.
"""
def __init__(
self,
model: BaseAE,
train_dataset: BaseDataset,
eval_dataset: Optional[BaseDataset] = None,
training_config: Optional[CoupledOptimizerTrainerConfig] = None,
encoder_optimizer: Optional[torch.optim.Optimizer] = None,
decoder_optimizer: Optional[torch.optim.Optimizer] = None,
encoder_scheduler: Optional = None,
decoder_scheduler: Optional = None
):
BaseTrainer.__init__(
self,
model=model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
training_config=training_config,
optimizer=None)
# set encoder optimizer
if encoder_optimizer is None:
encoder_optimizer = self.set_default_encoder_optimizer(model)
else:
encoder_optimizer = self._set_optimizer_on_device(encoder_optimizer, self.device)
if encoder_scheduler is None:
encoder_scheduler = self.set_default_scheduler(model, encoder_optimizer)
# set decoder optimizer
if decoder_optimizer is None:
decoder_optimizer = self.set_default_decoder_optimizer(model)
else:
decoder_optimizer = self._set_optimizer_on_device(decoder_optimizer, self.device)
if decoder_scheduler is None:
decoder_scheduler = self.set_default_scheduler(model, encoder_optimizer)
self.encoder_optimizer = encoder_optimizer
self.decoder_optimizer = decoder_optimizer
self.encoder_scheduler = encoder_scheduler
self.decoder_scheduler = decoder_scheduler
self.optimizer = None
def set_default_encoder_optimizer(self, model: BaseAE) -> torch.optim.Optimizer:
optimizer = optim.Adam(
model.encoder.parameters(),
lr=self.training_config.learning_rate,
weight_decay=self.training_config.encoder_optim_decay
)
return optimizer
def set_default_decoder_optimizer(self, model: BaseAE) -> torch.optim.Optimizer:
optimizer = optim.Adam(
model.decoder.parameters(),
lr=self.training_config.learning_rate,
weight_decay=self.training_config.decoder_optim_decay
)
return optimizer
def train(self, log_output_dir: str = None):
"""This function is the main training function
Args:
log_output_dir (str): The path in which the log will be stored
"""
# run sanity check on the model
self._run_model_sanity_check(self.model, self.train_dataset)
logger.info("Model passed sanity check !\n")
self._training_signature = (
str(datetime.datetime.now())[0:19].replace(" ", "_").replace(":", "-")
)
training_dir = os.path.join(
self.training_config.output_dir,
f"{self.model.model_name}_training_{self._training_signature}",
)
self.training_dir = training_dir
if not os.path.exists(training_dir):
os.makedirs(training_dir)
logger.info(
f"Created {training_dir}. \n"
"Training config, checkpoints and final model will be saved here.\n"
)
log_verbose = False
# set up log file
if log_output_dir is not None:
log_dir = log_output_dir
log_verbose = True
# if dir does not exist create it
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logger.info(f"Created {log_dir} folder since did not exists.")
logger.info("Training logs will be recodered here.\n")
logger.info(" -> Training can be monitored here.\n")
# create and set logger
log_name = f"training_logs_{self._training_signature}"
file_logger = logging.getLogger(log_name)
file_logger.setLevel(logging.INFO)
f_handler = logging.FileHandler(
os.path.join(log_dir, f"training_logs_{self._training_signature}.log")
)
f_handler.setLevel(logging.INFO)
file_logger.addHandler(f_handler)
# Do not output logs in the console
file_logger.propagate = False
file_logger.info("Training started !\n")
file_logger.info(
f"Training params:\n - max_epochs: {self.training_config.num_epochs}\n"
f" - batch_size: {self.training_config.batch_size}\n"
f" - checkpoint saving every {self.training_config.steps_saving}\n"
)
file_logger.info(f"Model Architecture: {self.model}\n")
file_logger.info(f"Optimizer: {self.optimizer}\n")
logger.info("Successfully launched training !\n")
# set best losses for early stopping
best_train_loss = 1e10
best_eval_loss = 1e10
for epoch in range(1, self.training_config.num_epochs+1):
epoch_train_loss = self.train_step(epoch)
if self.eval_dataset is not None:
epoch_eval_loss = self.eval_step(epoch)
self.encoder_scheduler.step(epoch_eval_loss)
self.decoder_scheduler.step(epoch_eval_loss)
else:
epoch_eval_loss = best_eval_loss
self.encoder_scheduler.step(epoch_train_loss)
self.decoder_scheduler.step(epoch_eval_loss)
if (
epoch_eval_loss < best_eval_loss
and not self.training_config.keep_best_on_train
):
best_model_epoch = epoch
best_eval_loss = epoch_eval_loss
best_model = deepcopy(self.model)
self._best_model = best_model
elif (
epoch_train_loss < best_train_loss
and self.training_config.keep_best_on_train
):
best_model_epoch = epoch
best_train_loss = epoch_train_loss
best_model = deepcopy(self.model)
self._best_model = best_model
# save checkpoints
if (
self.training_config.steps_saving is not None
and epoch % self.training_config.steps_saving == 0
):
self.save_checkpoint(model=best_model, dir_path=training_dir, epoch=epoch)
logger.info(f"Saved checkpoint at epoch {epoch}\n")
if log_verbose:
file_logger.info(f"Saved checkpoint at epoch {epoch}\n")
if self.eval_dataset is not None:
logger.info(
"----------------------------------------------------------------"
)
logger.info(
f"Epoch {epoch}: Train loss: {np.round(epoch_train_loss, 10)}"
)
logger.info(
f"Epoch {epoch}: Eval loss: {np.round(epoch_eval_loss, 10)}"
)
logger.info(
"----------------------------------------------------------------"
)
else:
logger.info(
"----------------------------------------------------------------"
)
logger.info(
f"Epoch {epoch}: Train loss: {np.round(epoch_train_loss, 10)}"
)
logger.info(
"----------------------------------------------------------------"
)
final_dir = os.path.join(training_dir, "final_model")
self.save_model(best_model, dir_path=final_dir)
logger.info("----------------------------------")
logger.info("Training ended!")
logger.info(f"Saved final model in {final_dir}")
def train_step(self, epoch: int):
"""The trainer performs training loop over the train_loader.
Parameters:
epoch (int): The current epoch number
Returns:
(torch.Tensor): The step training loss
"""
# set model in train model
self.model.train()
epoch_loss = 0
with tqdm(self.train_loader, unit="batch") as tepoch:
for inputs in tepoch:
tepoch.set_description(
f"Training of epoch {epoch}/{self.training_config.num_epochs}"
)
inputs = self._set_inputs_to_device(inputs)
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
model_output = self.model(
inputs, epoch=epoch, dataset_size=len(self.train_loader.dataset)
)
loss = model_output.loss
loss.backward()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
epoch_loss += loss.item()
# Allows model updates if needed
self.model.update()
epoch_loss /= len(self.train_loader)
return epoch_loss
def save_checkpoint(self, model: BaseAE, dir_path, epoch: int):
"""Saves a checkpoint alowing to restart training from here
Args:
dir_path (str): The folder where the checkpoint should be saved
epochs_signature (int): The epoch number"""
checkpoint_dir = os.path.join(dir_path, f"checkpoint_epoch_{epoch}")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# save optimizers
torch.save(
deepcopy(self.encoder_optimizer.state_dict()),
os.path.join(checkpoint_dir, "encoder_optimizer.pt"),
)
torch.save(
deepcopy(self.decoder_optimizer.state_dict()),
os.path.join(checkpoint_dir, "decoder_optimizer.pt"),
)
# save model
model.save(checkpoint_dir)
# save training config
self.training_config.save_json(checkpoint_dir, "training_config")
|
the-stack_106_14152
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import os
import fnmatch
from flask import send_from_directory, Flask
from flexget.webserver import register_app, register_home
from flask_compress import Compress
log = logging.getLogger('webui')
manager = None
debug = False
app_base = None
ui_base = os.path.dirname(os.path.realpath(__file__))
ui_src = os.path.join(ui_base, 'src')
ui_dist = os.path.join(ui_base, 'app')
bower_components = os.path.join(ui_base, 'bower_components')
webui_app = Flask(__name__)
Compress(webui_app)
webui_app.url_path = '/'
@webui_app.route('/<path:path>')
def serve_app(path):
if debug:
if path.startswith('bower_components'):
return send_from_directory(
bower_components, path.lstrip('bower_components').lstrip('/')
)
if os.path.exists(os.path.join(ui_src, path)):
return send_from_directory(ui_src, path)
if not app_base:
return send_from_directory(ui_base, 'load.failure.html')
return send_from_directory(app_base, path)
@webui_app.route('/')
def root():
if not app_base:
return send_from_directory(ui_base, 'load.failure.html')
return send_from_directory(app_base, 'app.html')
def _find(path, f):
matches = []
for root_dir, _, file_names in os.walk(path):
for filename in fnmatch.filter(file_names, f):
matches.append(os.path.join(root_dir, filename))
return matches
def _strip_trailing_sep(path):
return path.rstrip('\\/')
def register_web_ui(mgr):
global manager, app_base, debug
manager = mgr
if 'debug' in manager.args:
debug = True
if debug:
app_base = os.path.join(ui_base, '.tmp', 'serve')
if not os.path.exists(app_base):
log.warning(
'Unable to start web ui in debug mode. To enable debug mode please run the debug build, '
'see http://flexget.com/wiki/Web-UI for instructions'
)
log.warning('Attempting to serve web ui from complied directory')
app_base = None
if not app_base:
app_base = ui_dist
if not os.path.exists(app_base):
log.fatal(
'Failed to start web ui,'
' this can happen if you are running from GitHub version and forgot to run the web ui build, '
'see http://flexget.com/wiki/Web-UI for instructions'
)
app_base = None
register_app(webui_app.url_path, webui_app)
register_home('%s/' % webui_app.url_path)
|
the-stack_106_14153
|
"""
Main command line interface of the pylexibank package.
Like programs such as git, this cli splits its functionality into sub-commands
(see e.g. https://docs.python.org/2/library/argparse.html#sub-commands).
The rationale behind this is that while a lot of different tasks may be
triggered using this cli, most of them require common configuration.
The basic invocation looks like
lexibank [OPTIONS] <command> [args]
"""
import sys
import os
import argparse
import readline
import glob
from termcolor import colored
from appdirs import user_config_dir
from clldutils.inifile import INI
from clldutils.clilib import ArgumentParserWithLogging, ParserError
from clldutils.path import Path
from clldutils.misc import lazyproperty
import pylexibank
from pylexibank.dataset import iter_datasets
from pylexibank.glottolog import Glottolog
from pylexibank.concepticon import Concepticon
import pylexibank.commands
assert pylexibank.commands
REPOS = [
('glottolog', 'clld/glottolog'),
('concepticon', 'clld/concepticon-data'),
]
# We want to provide tab-completion when the user is asked to provide local paths to
# repository clones.
def complete_dir(text, state): # pragma: no cover
if os.path.isdir(text) and not text.endswith(os.sep):
text += os.sep
return ([p for p in glob.glob(text + '*') if os.path.isdir(p)] + [None])[state]
readline.parse_and_bind("tab: complete")
readline.set_completer_delims('\t')
readline.set_completer(complete_dir)
def get_path(src): # pragma: no cover
"""
Prompts the user to input a local path.
:param src: github repository name
:return: Absolute local path
"""
res = None
while not res:
if res is False:
print(colored('You must provide a path to an existing directory!', 'red'))
print('You need a local clone or release of (a fork of) '
'https://github.com/{0}'.format(src))
res = input(colored('Local path to {0}: '.format(src), 'green', attrs=['blink']))
if res and Path(res).exists():
return Path(res).resolve()
res = False
class Config(INI):
@lazyproperty
def concepticon(self):
return Concepticon(self['paths']['concepticon'])
@lazyproperty
def glottolog(self):
return Glottolog(self['paths']['glottolog'])
@lazyproperty
def datasets(self):
return sorted(
iter_datasets(glottolog=self.glottolog, concepticon=self.concepticon, verbose=True),
key=lambda d: d.id)
def configure(cfgpath=None):
"""
Configure lexibank.
:return: a pair (config, logger)
"""
cfgpath = Path(cfgpath) \
if cfgpath else Path(user_config_dir(pylexibank.__name__)) / 'config.ini'
if not cfgpath.exists():
print("""
{0}
You seem to be running lexibank for the first time.
Your system configuration will now be written to a config file to be used
whenever lexibank is run lateron.
""".format(
colored('Welcome to lexibank!', 'blue', attrs=['bold', 'reverse'])))
if not cfgpath.parent.exists():
cfgpath.parent.mkdir(parents=True)
cfg = Config()
cfg['paths'] = {k: get_path(src) for k, src in REPOS}
cfg.write(cfgpath)
print("""
Configuration has been written to:
{0}
You may edit this file to adapt to changes in your system or to reconfigure settings
such as the logging level.""".format(cfgpath.resolve()))
else:
cfg = Config.from_file(cfgpath)
try:
cfg.glottolog
except (FileNotFoundError, ValueError):
raise ParserError('Misconfigured Glottolog path in {0}'.format(cfgpath))
if not Path(cfg['paths']['concepticon']).exists():
raise ParserError('Misconfigured Concepticon path in {0}'.format(cfgpath))
# Print the configuration directory for reference:
print("Using configuration file at:")
print(str(cfgpath) + '\n')
return cfg
def main(): # pragma: no cover
cfg = configure()
parser = ArgumentParserWithLogging(pylexibank.__name__)
parser.add_argument('--cfg', help=argparse.SUPPRESS, default=cfg)
parser.add_argument(
'--db',
help='path to SQLite db file',
default=os.path.join(os.getcwd(), 'lexibank.sqlite'))
sys.exit(parser.main())
|
the-stack_106_14155
|
"""TeamLineups.
Create a class to pull lineup data.
"""
from typing import Dict, List, Optional
from .base import BaseRequest
from .parameters import DefaultParameters
class TeamLineups(BaseRequest):
"""Pull lineup data.
Parameters
----------
TeamID : str
The team identifier
**params
Parameters for ``BaseRequest``
"""
endpoint: str = "teamdashlineups"
filename: str = "data_{TeamID}.json"
def __init__(
self,
TeamID: str,
output_dir: Optional[str] = None,
filesystem: Optional[str] = "file",
**params
):
"""Init method."""
super().__init__(
output_dir=output_dir, filesystem=filesystem, TeamID=TeamID, **params
)
@property
def datasets(self) -> List[str]:
"""Datasets returned by the API.
Returns
-------
List
Datasets returned by the API.
"""
return ["Overall", "Lineups"]
@property
def defaults(self) -> Dict:
"""Default parameter values for the endpoint.
Returns
-------
Dict
The default parameter values.
"""
return {
"GroupQuantity": DefaultParameters.GroupQuantity,
"LastNGames": DefaultParameters.LastNGames,
"MeasureType": DefaultParameters.MeasureType,
"Month": DefaultParameters.Month,
"OpponentTeamID": DefaultParameters.OpponentTeamID,
"PaceAdjust": DefaultParameters.PaceAdjust,
"PerMode": DefaultParameters.PerMode,
"Period": DefaultParameters.Period,
"PlusMinus": DefaultParameters.PlusMinus,
"Rank": DefaultParameters.Rank,
"Season": DefaultParameters.Season,
"SeasonType": DefaultParameters.SeasonType,
"TeamID": DefaultParameters.TeamID,
"VsDivision": DefaultParameters.VsDivision,
"VsConference": DefaultParameters.VsConference,
"ShotClockRange": DefaultParameters.ShotClockRange,
"SeasonSegment": DefaultParameters.SeasonSegment,
"PORound": DefaultParameters.PORound,
"Outcome": DefaultParameters.Outcome,
"Location": DefaultParameters.Location,
"LeagueID": DefaultParameters.LeagueID,
"GameSegment": DefaultParameters.GameSegment,
"GameID": DefaultParameters.GameID,
"DateTo": DefaultParameters.DateTo,
"DateFrom": DefaultParameters.DateFrom,
}
|
the-stack_106_14161
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import six
from paddlerec.core.reader import Reader
from paddlerec.core.utils import envs
class EvaluateReader(Reader):
def init(self):
dict_path = envs.get_global_env("word_id_dict_path", None, "evaluate.reader")
self.word_to_id = dict()
self.id_to_word = dict()
with io.open(dict_path, 'r', encoding='utf-8') as f:
for line in f:
self.word_to_id[line.split(' ')[0]] = int(line.split(' ')[1])
self.id_to_word[int(line.split(' ')[1])] = line.split(' ')[0]
self.dict_size = len(self.word_to_id)
def native_to_unicode(self, s):
if self._is_unicode(s):
return s
try:
return self._to_unicode(s)
except UnicodeDecodeError:
res = self._to_unicode(s, ignore_errors=True)
return res
def _is_unicode(self, s):
if six.PY2:
if isinstance(s, unicode):
return True
else:
if isinstance(s, str):
return True
return False
def _to_unicode(self, s, ignore_errors=False):
if self._is_unicode(s):
return s
error_mode = "ignore" if ignore_errors else "strict"
return s.decode("utf-8", errors=error_mode)
def strip_lines(self, line, vocab):
return self._replace_oov(vocab, self.native_to_unicode(line))
def _replace_oov(self, original_vocab, line):
"""Replace out-of-vocab words with "<UNK>".
This maintains compatibility with published results.
Args:
original_vocab: a set of strings (The standard vocabulary for the dataset)
line: a unicode string - a space-delimited sequence of words.
Returns:
a unicode string - a space-delimited sequence of words.
"""
return u" ".join([
word if word in original_vocab else u"<UNK>" for word in line.split()
])
def generate_sample(self, line):
def reader():
features = self.strip_lines(line.lower(), self.word_to_id)
features = features.split()
yield [('analogy_a', [self.word_to_id[features[0]]]), ('analogy_b', [self.word_to_id[features[1]]]),
('analogy_c', [self.word_to_id[features[2]]]), ('analogy_d', [self.word_to_id[features[3]]])]
return reader
|
the-stack_106_14162
|
# Copyright (c) 2017, Daniele Venzano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the entrypoint for the commandline Zoe client
"""
from datetime import datetime, timezone
import json
import logging
import os
import sys
from argparse import ArgumentParser, Namespace, FileType, RawDescriptionHelpFormatter
from typing import Tuple
from tabulate import tabulate
from zoe_cmd import utils
from zoe_cmd.api_lib import ZoeAPI
from zoe_lib.exceptions import ZoeAPIException, InvalidApplicationDescription
from zoe_lib.applications import app_validate
from zoe_lib.version import ZOE_API_VERSION
def _check_api_version(api: ZoeAPI):
"""Checks if there is a version mismatch between server and client."""
info = api.info.info()
if info['api_version'] != ZOE_API_VERSION:
print('Warning: this client understands ZOE API v. {}, but server talks v. {}'.format(ZOE_API_VERSION, info['api_version']))
print('Warning: certain commands may not work correctly')
print('Warning: please upgrade or downgrade your client to match the server version')
def app_validate_cmd(api_: ZoeAPI, args):
"""Validate an application description."""
app_descr = json.load(args.jsonfile)
try:
app_validate(app_descr)
except InvalidApplicationDescription as e:
print(e)
else:
print("Static validation OK")
def exec_list_cmd(api: ZoeAPI, args):
"""List executions"""
filter_names = [
'status',
'name',
'user_id',
'limit',
'earlier_than_submit',
'earlier_than_start',
'earlier_than_end',
'later_than_submit',
'later_than_start',
'later_than_end'
]
filters = {}
for key, value in vars(args).items():
if key in filter_names:
filters[key] = value
data = api.executions.list(**filters)
if len(data) == 0:
return
tabular_data = [[e['id'], e['name'], e['user_id'], e['status']] for e in sorted(data, key=lambda x: x['id'])]
headers = ['ID', 'Name', 'User ID', 'Status']
print(tabulate(tabular_data, headers))
def exec_get_cmd(api: ZoeAPI, args):
"""Gather information about an execution."""
execution = api.executions.get(args.id)
if execution is None:
print('Execution not found')
else:
print('Execution {} (ID: {})'.format(execution['name'], execution['id']))
print('Application name: {}'.format(execution['description']['name']))
print('Status: {}'.format(execution['status']))
if execution['status'] == 'error':
print('Last error: {}'.format(execution['error_message']))
print()
print('Time submit: {}'.format(datetime.fromtimestamp(execution['time_submit'], timezone.utc).astimezone()))
if execution['time_start'] is None:
print('Time start: {}'.format('not yet'))
else:
print('Time start: {}'.format(datetime.fromtimestamp(execution['time_start'], timezone.utc).astimezone()))
if execution['time_end'] is None:
print('Time end: {}'.format('not yet'))
else:
print('Time end: {}'.format(datetime.fromtimestamp(execution['time_end'], timezone.utc).astimezone()))
print()
endpoints = api.executions.endpoints(execution['id'])
if endpoints is not None and len(endpoints) > 0:
print('Exposed endpoints:')
for endpoint in endpoints:
print(' - {}: {}'.format(endpoint[0], endpoint[1]))
else:
print('This ZApp does not expose any endpoint')
print()
tabular_data = []
for c_id in execution['services']:
service = api.services.get(c_id)
service_data = [service['id'], service['name'], 'true' if service['essential'] else 'false', service['status'], service['backend_status'], service['backend_host'], service['error_message'] if service['error_message'] is not None else '']
tabular_data.append(service_data)
headers = ['ID', 'Name', 'Essential', 'Zoe status', 'Backend status', 'Host', 'Error message']
print(tabulate(tabular_data, headers))
def exec_rm_cmd(api: ZoeAPI, args):
"""Delete an execution and kill it if necessary."""
api.executions.delete(args.id)
def exec_kill_user_cmd(api: ZoeAPI, args):
"""Terminates all executions for the given user."""
filters = {
'status': 'running',
'user_id': args.user_id
}
data = api.executions.list(**filters)
print('Terminating {} executions belonging to user {}'.format(len(data), args.user_id))
for execution in data:
api.executions.terminate(execution)
print('Execution {} terminated'.format(execution))
def quota_ls_cmd(api: ZoeAPI, args):
"""List available quotas."""
filters = {}
if 'name' in args:
filters['name'] = args.name
quotas = api.quota.list(filters)
tabular_data = [[q['id'], q['name'], q['concurrent_executions'], q['memory'], q['cores'], q['runtime_limit']] for q in sorted(quotas, key=lambda x: x['id'])]
headers = ['ID', 'Name', 'Conc. Executions', 'Memory', 'Cores', 'Runtime limit (h)']
print(tabulate(tabular_data, headers))
def quota_get_cmd(api: ZoeAPI, args):
"""Get a quota by its ID."""
quota = api.quota.get(args.id)
tabular_data = [[quota['id'], quota['name'], quota['concurrent_executions'], quota['memory'], quota['cores'], quota['runtime_limit']]]
headers = ['ID', 'Name', 'Conc. Executions', 'Memory', 'Cores', 'Runtime limit (h)']
print(tabulate(tabular_data, headers))
def quota_create_cmd(api: ZoeAPI, args):
"""Create a new quota."""
quota = {
'name': args.name,
'concurrent_executions': args.concurrent_executions,
'memory': args.memory,
'cores': args.cores
}
new_id = api.quota.create(quota)
print('New quota created with ID: {}'.format(new_id))
def quota_delete_cmd(api: ZoeAPI, args):
"""Delete a quota given its ID."""
api.quota.delete(args.id)
def quota_update_cmd(api: ZoeAPI, args):
"""Updates an existing quota."""
quota_update = {}
if args.name is not None:
quota_update['name'] = args.name
if args.concurrent_executions is not None:
quota_update['concurrent_executions'] = args.concurrent_executions
if args.memory is not None:
quota_update['memory'] = args.memory
if args.cores is not None:
quota_update['cores'] = args.cores
api.quota.update(args.id, quota_update)
def role_ls_cmd(api: ZoeAPI, args):
"""List available roles."""
def b2t(val):
"""Boolean to text."""
if val:
return "Yes"
else:
return "No"
filters = {}
if args.name is not None:
filters['name'] = args.name
roles = api.role.list(filters)
tabular_data = [[r['id'], r['name'], b2t(r['can_see_status']), b2t(r['can_change_config']), b2t(r['can_operate_others']), b2t(r['can_delete_executions']), b2t(r['can_access_api']), b2t(r['can_customize_resources'])] for r in sorted(roles, key=lambda x: x['id'])]
headers = ['ID', 'Name', 'See status', 'Change config', 'Operate others', 'Delete execs', 'API access', 'Customize resources']
print(tabulate(tabular_data, headers))
def role_get_cmd(api: ZoeAPI, args):
"""Get a role by its ID."""
def b2t(val):
"""Boolean to text."""
if val:
return "Yes"
else:
return "No"
role = api.role.get(args.id)
tabular_data = [[role['id'], role['name'], b2t(role['can_see_status']), b2t(role['can_change_config']), b2t(role['can_operate_others']), b2t(role['can_delete_executions']), b2t(role['can_access_api']), b2t(role['can_customize_resources'])]]
headers = ['ID', 'Name', 'See status', 'Change config', 'Operate others', 'Delete execs', 'API access', 'Customize resources']
print(tabulate(tabular_data, headers))
def role_create_cmd(api: ZoeAPI, args):
"""Create a new role."""
role = {
'name': args.name,
'can_see_status': bool(args.can_see_status),
'can_change_config': bool(args.can_change_config),
'can_operate_others': bool(args.can_operate_others),
'can_delete_executions': bool(args.can_delete_executions),
'can_access_api': bool(args.can_access_api),
'can_customize_resources': bool(args.can_customize_resources),
'can_access_full_zapp_shop': bool(args.can_access_full_zapp_shop)
}
new_id = api.role.create(role)
print('New role created with ID: {}'.format(new_id))
def role_delete_cmd(api: ZoeAPI, args):
"""Delete a role given its ID."""
api.role.delete(args.id)
def role_update_cmd(api: ZoeAPI, args):
"""Updates an existing quota."""
role_update = {}
if args.name is not None:
role_update['name'] = args.name
if args.can_see_status is not None:
role_update['can_see_status'] = bool(args.can_see_status)
if args.can_change_config is not None:
role_update['can_change_config'] = bool(args.can_change_config)
if args.can_operate_others is not None:
role_update['can_operate_others'] = bool(args.can_operate_others)
if args.can_delete_executions is not None:
role_update['can_delete_executions'] = bool(args.can_delete_executions)
if args.can_access_api is not None:
role_update['can_access_api'] = bool(args.can_access_api)
if args.can_customize_resources is not None:
role_update['can_customize_resources'] = bool(args.can_customize_resources)
if args.can_access_full_zapp_shop is not None:
role_update['can_access_full_zapp_shop'] = bool(args.can_access_full_zapp_shop)
api.role.update(args.id, role_update)
def user_ls_cmd(api: ZoeAPI, args):
"""List defined users."""
filters = {}
if args.username is not None:
filters['username'] = args.username
if args.enabled is not None:
filters['enabled'] = args.enabled == 1
if args.auth_source is not None:
filters['auth_source'] = args.auth_source
if args.role is not None:
role = api.role.list({'name': args.role})[0]
if role is None:
print('Unknown role specified')
return
filters['role_id'] = role['id']
if args.quota is not None:
quota = api.quota.list({'name': args.quota})[0]
if quota is None:
print('Unknown quota specified')
return
filters['quota_id'] = quota['id']
users = api.user.list(filters)
tabular_data = []
role_cache = {}
quota_cache = {}
for user in sorted(users, key=lambda u: u['id']):
if user['role_id'] in role_cache:
role = role_cache[user['role_id']]
else:
role = api.role.get(user['role_id'])
role_cache[user['role_id']] = role
if user['quota_id'] in quota_cache:
quota = quota_cache[user['quota_id']]
else:
quota = api.quota.get(user['quota_id'])
quota_cache[user['quota_id']] = quota
tabular_data.append([user['id'], user['username'], user['email'], user['fs_uid'], user['priority'], user['enabled'], user['auth_source'], role['name'], quota['name']])
headers = ['ID', 'Username', 'Email', 'FS UID', 'Priority', 'Enabled', 'Auth source', 'Role', 'Quota']
print(tabulate(tabular_data, headers))
def user_get_cmd(api: ZoeAPI, args):
"""Get a user by its ID."""
user = api.user.get(args.id)
role = api.role.get(user['role_id'])
quota = api.quota.get(user['quota_id'])
tabular_data = [[user['id'], user['username'], user['email'], user['fs_uid'], user['priority'], user['enabled'], user['auth_source'], role['name'], quota['name']]]
headers = ['ID', 'Username', 'Email', 'FS UID', 'Priority', 'Enabled', 'Auth source', 'Role', 'Quota']
print(tabulate(tabular_data, headers))
def user_create_cmd(api: ZoeAPI, args):
"""Creates a user."""
user = {
'username': args.username,
'email': args.email,
'auth_source': args.auth_source,
'fs_uid': args.fs_uid
}
quota = api.quota.list({'name': args.quota})
if len(quota) == 0:
print('Unknown quota')
return
user['quota_id'] = quota[0]['id']
role = api.role.list({'name': args.role})
if len(role) == 0:
print('Unknown role')
return
user['role_id'] = role[0]['id']
new_id = api.user.create(user)
print('New user created with ID: {}'.format(new_id))
def user_delete_cmd(api: ZoeAPI, args):
"""Delete a user."""
api.user.delete(args.id)
def user_update_cmd(api: ZoeAPI, args):
"""Updates a user."""
user_update = {}
if args.email is not None:
user_update['email'] = args.email
if args.fs_uid is not None:
user_update['fs_uid'] = args.fs_uid
if args.password is not None:
user_update['password'] = args.password
if args.enabled is not None:
user_update['enabled'] = args.enabled
if args.auth_source is not None:
user_update['auth_source'] = args.auth_source
if args.priority is not None:
user_update['priority'] = args.priority
if args.role_id is not None:
user_update['role_id'] = args.role_id
if args.quota_id is not None:
user_update['quota_id'] = args.quota_id
api.user.update(args.id, user_update)
ENV_HELP_TEXT = '''To authenticate with Zoe you need to define three environment variables:
ZOE_URL: point to the URL of the Zoe Scheduler (ex.: http://localhost:5000/
ZOE_USER: the username used for authentication
ZOE_PASS: the password used for authentication
or create a ~/.zoerc file (another location can be specified with --auth-file) like this:
url = xxx
user = yyy
pass = zzz
Environment variable will override the values specified in the configuration file.
'''
def process_arguments() -> Tuple[ArgumentParser, Namespace]:
"""Parse command line arguments."""
parser = ArgumentParser(description="Zoe command-line administration client", epilog=ENV_HELP_TEXT, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--debug', action='store_true', help='Enable debug output')
parser.add_argument('--auth-file', type=str, help='Enable debug output', default=os.path.join(os.getenv('HOME', ''), '.zoerc'))
subparser = parser.add_subparsers()
# zapps
argparser_zapp_validate = subparser.add_parser('zapp-validate', help='Validate an application description')
argparser_zapp_validate.add_argument('jsonfile', type=FileType("r"), help='Application description')
argparser_zapp_validate.set_defaults(func=app_validate_cmd)
# executions
argparser_app_list = subparser.add_parser('exec-ls', help="List all executions for the calling user")
argparser_app_list.add_argument('--limit', type=int, help='Limit the number of executions')
argparser_app_list.add_argument('--name', help='Show only executions with this name')
argparser_app_list.add_argument('--user_id', help='Show only executions belonging to this user')
argparser_app_list.add_argument('--status', choices=["submitted", "queued", "starting", "error", "running", "cleaning up", "terminated"], help='Show only executions with this status')
argparser_app_list.add_argument('--earlier-than-submit', help='Show only executions submitted earlier than this timestamp (seconds since UTC epoch)')
argparser_app_list.add_argument('--earlier-than-start', help='Show only executions started earlier than this timestamp (seconds since UTC epoch)')
argparser_app_list.add_argument('--earlier-than-end', help='Show only executions ended earlier than this timestamp (seconds since UTC epoch)')
argparser_app_list.add_argument('--later-than-submit', help='Show only executions submitted later than this timestamp (seconds since UTC epoch)')
argparser_app_list.add_argument('--later-than-start', help='Show only executions started later than this timestamp (seconds since UTC epoch)')
argparser_app_list.add_argument('--later-than-end', help='Show only executions ended later than this timestamp (seconds since UTC epoch)')
argparser_app_list.set_defaults(func=exec_list_cmd)
argparser_execution_get = subparser.add_parser('exec-get', help="Get execution status")
argparser_execution_get.add_argument('id', type=int, help="Execution id")
argparser_execution_get.set_defaults(func=exec_get_cmd)
argparser_execution_rm = subparser.add_parser('exec-rm', help="Deletes an execution")
argparser_execution_rm.add_argument('id', type=int, help="Execution id")
argparser_execution_rm.set_defaults(func=exec_rm_cmd)
argparser_execution_kill_user = subparser.add_parser('user-terminate', help="Terminate all executions of a user")
argparser_execution_kill_user.add_argument('user_id', help="User name")
argparser_execution_kill_user.set_defaults(func=exec_kill_user_cmd)
# Quotas
sub_parser = subparser.add_parser('quota-ls', help="List existing quotas")
sub_parser.add_argument('--name', help="Filter by name")
sub_parser.set_defaults(func=quota_ls_cmd)
sub_parser = subparser.add_parser('quota-get', help="Get a quota by its ID")
sub_parser.add_argument('id', type=int, help="Quota ID")
sub_parser.set_defaults(func=quota_get_cmd)
sub_parser = subparser.add_parser('quota-create', help="Create a new quota")
sub_parser.add_argument('name', help="Quota name")
sub_parser.add_argument('concurrent_executions', type=int, help="Maximum number of concurrent executions (0 means no limit)")
sub_parser.add_argument('memory', type=int, help="Maximum memory in bytes across all running executions (0 means no limit)")
sub_parser.add_argument('cores', type=int, help="Maximum number of cores across all running executions (0 means no limit)")
sub_parser.add_argument('runtime_limit', type=int, help="Maximum number of hours an execution is allowed to run (0 means no limit)")
sub_parser.set_defaults(func=quota_create_cmd)
sub_parser = subparser.add_parser('quota-delete', help="Delete a quota")
sub_parser.add_argument('id', type=int, help="Quota ID")
sub_parser.set_defaults(func=quota_delete_cmd)
sub_parser = subparser.add_parser('quota-update', help="Update an existing quota")
sub_parser.add_argument('id', type=int, help="ID of the quota to update")
sub_parser.add_argument('--name', help="Quota name")
sub_parser.add_argument('--concurrent_executions', type=int, help="Maximum number of concurrent executions (0 means no limit)")
sub_parser.add_argument('--memory', type=int, help="Maximum memory in bytes across all running executions (0 means no limit)")
sub_parser.add_argument('--cores', type=int, help="Maximum number of cores across all running executions (0 means no limit)")
sub_parser.add_argument('--runtime_limit', type=int, help="Maximum number of hours an execution is allowed to run (0 means no limit)")
sub_parser.set_defaults(func=quota_update_cmd)
# Roles
sub_parser = subparser.add_parser('role-ls', help="List existing roles")
sub_parser.add_argument('--name', help="Filter by name")
sub_parser.set_defaults(func=role_ls_cmd)
sub_parser = subparser.add_parser('role-get', help="Get a role by its ID")
sub_parser.add_argument('id', type=int, help="Role ID")
sub_parser.set_defaults(func=role_get_cmd)
sub_parser = subparser.add_parser('role-create', help="Create a new role")
sub_parser.add_argument('name', help="Role name")
sub_parser.add_argument('can_see_status', choices=[0, 1], type=int, help="Can access the status web page")
sub_parser.add_argument('can_change_config', choices=[0, 1], type=int, help="Can change Zoe configuration")
sub_parser.add_argument('can_operate_others', choices=[0, 1], type=int, help="Can operate on other users' executions")
sub_parser.add_argument('can_delete_executions', choices=[0, 1], type=int, help="Can delete executions permanently")
sub_parser.add_argument('can_access_api', choices=[0, 1], type=int, help="Can access the REST API")
sub_parser.add_argument('can_customize_resources', choices=[0, 1], type=int, help="Can customize resource reservations before starting executions")
sub_parser.add_argument('can_access_full_zapp_shop', choices=[0, 1], type=int, help="Can access all ZApps in the ZApp shop")
sub_parser.set_defaults(func=role_create_cmd)
sub_parser = subparser.add_parser('role-delete', help="Delete a role")
sub_parser.add_argument('id', type=int, help="Role ID")
sub_parser.set_defaults(func=role_delete_cmd)
sub_parser = subparser.add_parser('role-update', help="Update an existing role")
sub_parser.add_argument('id', type=int, help="ID of the role to update")
sub_parser.add_argument('--name', help="Role name")
sub_parser.add_argument('--can_see_status', choices=[0, 1], type=int, help="Can access the status web page")
sub_parser.add_argument('--can_change_config', choices=[0, 1], type=int, help="Can change Zoe configuration")
sub_parser.add_argument('--can_operate_others', choices=[0, 1], type=int, help="Can operate on other users' executions")
sub_parser.add_argument('--can_delete_executions', choices=[0, 1], type=int, help="Can delete executions permanently")
sub_parser.add_argument('--can_access_api', choices=[0, 1], type=int, help="Can access the REST API")
sub_parser.add_argument('--can_customize_resources', choices=[0, 1], type=int, help="Can customize resource reservations before starting executions")
sub_parser.add_argument('--can_access_full_zapp_shop', choices=[0, 1], type=int, help="Can access all ZApps in the ZApp shop")
sub_parser.set_defaults(func=role_update_cmd)
# Users
sub_parser = subparser.add_parser('user-ls', help="List existing users")
sub_parser.add_argument('--username', help="Filter by user name")
sub_parser.add_argument('--enabled', type=int, choices=[0, 1], help="Filter by enabled status")
sub_parser.add_argument('--auth_source', choices=['internal', 'ldap', 'ldap+ssl', 'textfile', 'pam'], help="Filter by auth source")
sub_parser.add_argument('--role', help="Filter by role name")
sub_parser.add_argument('--quota', help="Filter by quota name")
sub_parser.set_defaults(func=user_ls_cmd)
sub_parser = subparser.add_parser('user-get', help="Get a user by its ID")
sub_parser.add_argument('id', type=int, help="User ID")
sub_parser.set_defaults(func=user_get_cmd)
sub_parser = subparser.add_parser('user-create', help="Create a new user")
sub_parser.add_argument('username', help="Username")
sub_parser.add_argument('email', help="Email")
sub_parser.add_argument('auth_source', choices=['internal', 'ldap', 'ldap+ssl', 'textfile', 'pam'], help="Authentication method")
sub_parser.add_argument('fs_uid', help="Filesystem UID", type=int)
sub_parser.add_argument('role', help="Role name")
sub_parser.add_argument('quota', help="Quota name")
sub_parser.set_defaults(func=user_create_cmd)
sub_parser = subparser.add_parser('user-delete', help="Delete a user")
sub_parser.add_argument('id', type=int, help="User ID")
sub_parser.set_defaults(func=user_delete_cmd)
sub_parser = subparser.add_parser('user-update', help="Update an existing role")
sub_parser.add_argument('id', type=int, help="ID of the user to update")
sub_parser.add_argument('--email', help="Change the email")
sub_parser.add_argument('--fs_uid', type=int, help="Filesystem UID")
sub_parser.add_argument('--password', help="Change or set the password for internal authentication")
sub_parser.add_argument('--enabled', type=int, choices=[0, 1], help="Enable or disable the user")
sub_parser.add_argument('--auth_source', choices=['internal', 'ldap', 'ldap+ssl', 'textfile', 'pam'], help="Change the authentication source")
sub_parser.add_argument('--priority', type=int, help="Change priority")
sub_parser.add_argument('--role_id', help="Change role")
sub_parser.add_argument('--quota_id', help="Change quota")
sub_parser.set_defaults(func=user_update_cmd)
return parser, parser.parse_args()
def zoe():
"""Main entrypoint."""
parser, args = process_arguments()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
if not hasattr(args, "func"):
parser.print_help()
return
auth = utils.read_auth(args)
if auth is None:
sys.exit(1)
try:
api = ZoeAPI(auth['url'], auth['user'], auth['pass'])
args.func(api, args)
except ZoeAPIException as e:
print(e.message)
except KeyboardInterrupt:
print('CTRL-C pressed, exiting...')
sys.exit(0)
|
the-stack_106_14163
|
import os
import sys
from timeit import default_timer as timer
from datetime import datetime, timedelta
from mkdocs import utils as mkdocs_utils
from mkdocs.config import config_options, Config
from mkdocs.plugins import BasePlugin
import json
import re
mapping = {"datum": "Datum", "themen": "Themen", "sw": "Schlüsselwörter", "vorg": "Vorgänger"}
def format_meta(meta):
fixed = ["datum", "themen", "sw"]
content = []
val = meta.get("vorg", None)
if val is not None:
val = re.sub(r"\.md$", "", val)
content.append(f'<strong><a href="/{val}">{mapping["vorg"]}</a></strong>')
content.append("")
for key in fixed:
val = meta.get(key, None)
if val is not None:
val2 = ", ".join(val.split(","))
content.append(f"<strong>{mapping[key]}:</strong> {val2}")
for key, val in meta.items():
if key in fixed or key == "vorg":
continue
if val is not None:
content.append(f"<strong>{key.title()}:</strong> {val}")
return content
class MetaToDoc(BasePlugin):
def __init__(self):
self.enabled = True
self.total_time = 0
def on_page_markdown(self, markdown, page, config, files):
mdneu = []
found = False
for l in markdown.split("\n"):
mdneu.append(l)
if found:
continue
meta_fmt = [f"{x}<br />" for x in format_meta(page.meta)]
if re.match(r"\s*#\s", l):
mdneu.append('<div id="sidebar-extra" class="md-nav">')
mdneu.extend(meta_fmt)
mdneu.append("<br />")
mdneu.append("<br />")
mdneu.append("</div>")
found = True
return "\n".join(mdneu)
|
the-stack_106_14164
|
#!/usr/bin/env python
from __future__ import print_function
import glob
import re
import argparse
fixtures_directory = 'tests/fixtures/'
# restclient api header configuration
zero_api_regex = r'(\[Token )0{40}(\])'
real_api_regex = r'(\[Token ).{40}(\])'
zero_token_string = '[Token ' + 40 * '0' + ']'
def has_api_key(file_name):
"""
Detect whether the file contains an api key in the Token object that is not 40*'0'.
See issue #86.
:param file: path-to-file to check
:return: boolean
"""
f = open(file_name, 'r')
text = f.read()
if re.search(real_api_regex, text) is not None and \
re.search(zero_api_regex, text) is None:
return True
return False
def remove_api_key(file_name):
"""
Change the api key in the Token object to 40*'0'. See issue #86.
:param file: path-to-file to change
"""
with open(file_name, 'r') as fp:
text = fp.read()
text = re.sub(real_api_regex, zero_token_string, text)
with open(file_name, 'w') as fp:
fp.write(text)
return
def main(path):
if path[-1] != '/':
raise ValueError('Final character in path must be /.')
n_files_changed = 0
for filename in glob.glob(path+'*.yaml'):
if has_api_key(filename):
remove_api_key(filename)
n_files_changed += 1
print("Changed {} files.".format(n_files_changed))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("path", help="path to test fixtures",
nargs='?', default=fixtures_directory)
args = parser.parse_args()
main(args.path)
|
the-stack_106_14165
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013-2014, Christian Berendt <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apache2_module
version_added: 1.6
short_description: enables/disables a module of the Apache2 webserver
description:
- Enables or disables a specified module of the Apache2 webserver.
options:
name:
description:
- name of the module to enable/disable
required: true
state:
description:
- indicate the desired state of the resource
choices: ['present', 'absent']
default: present
requirements: ["a2enmod","a2dismod"]
'''
EXAMPLES = '''
# enables the Apache2 module "wsgi"
- apache2_module: state=present name=wsgi
# disables the Apache2 module "wsgi"
- apache2_module: state=absent name=wsgi
'''
import re
def _disable_module(module):
name = module.params['name']
a2dismod_binary = module.get_bin_path("a2dismod")
if a2dismod_binary is None:
module.fail_json(msg="a2dismod not found. Perhaps this system does not use a2dismod to manage apache")
result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name))
if re.match(r'.*\b' + name + r' already disabled', stdout, re.S|re.M):
module.exit_json(changed = False, result = "Success")
elif result != 0:
module.fail_json(msg="Failed to disable module %s: %s" % (name, stdout))
else:
module.exit_json(changed = True, result = "Disabled")
def _enable_module(module):
name = module.params['name']
a2enmod_binary = module.get_bin_path("a2enmod")
if a2enmod_binary is None:
module.fail_json(msg="a2enmod not found. Perhaps this system does not use a2enmod to manage apache")
result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name))
if re.match(r'.*\b' + name + r' already enabled', stdout, re.S|re.M):
module.exit_json(changed = False, result = "Success")
elif result != 0:
module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout))
else:
module.exit_json(changed = True, result = "Enabled")
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(default='present', choices=['absent', 'present'])
),
)
if module.params['state'] == 'present':
_enable_module(module)
if module.params['state'] == 'absent':
_disable_module(module)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
the-stack_106_14167
|
from flask import request, jsonify
from flask_api import status
from werkzeug.exceptions import BadRequest
import datetime
from collections import defaultdict
from ..model.schedule import InterviewSchedule, InterviewScheduleSchema
from .person import Person
from .candidate import CandidateLogic
from .employee import EmpLogic
class ScheduleAlgo ( Person ):
def __init__ ( self ):
self.emp_logic = EmpLogic ( )
self.candidate_logic = CandidateLogic ( )
self.interview_schema = InterviewScheduleSchema ( )
self.interviews_schema = InterviewScheduleSchema ( many=True )
@classmethod
def verify_post_data ( cls ):
"""
Overriding method defined in Person class.
Verify POST RPC data. If data is not supposed format, it raises appropriate error.
:return: status_code and collected data (error_msg or post_data)
"""
valid_input_format = {
"candidate_email": "[email protected]",
"interviewers_email": "[email protected], [email protected]"
}
warning_msg = "Please provide input in following json format: " + str ( valid_input_format )
# post rpc input verification and fetching
try:
candidate_email = request.json[ 'candidate_email' ]
interviewers_email, interviewers_email_str = cls.get_interviewers_email ( )
cls.verify_rpc_value ( request.json )
except KeyError:
return status.HTTP_400_BAD_REQUEST, {"Error": "All mandatory fields are not provided", 'Fix': warning_msg}
except ValueError:
return status.HTTP_400_BAD_REQUEST, {"Error": "One of the values is not string", 'Fix': warning_msg}
except BadRequest:
return status.HTTP_400_BAD_REQUEST, {"Error": "All mandatory fields are not provided in json format",
'Fix': warning_msg}
return status.HTTP_200_OK, {"candidate_email": candidate_email, \
"interviewers_email": interviewers_email, \
"interviewers_email_str": interviewers_email_str}
@staticmethod
def get_interviewers_email ( ):
"""
This function retrieves the list of emails collected from the rest rpc
which are separated by commas.
:return: set of unique interviewers email ids and a string containing their email ids
"""
interviewers_email = request.json[ 'interviewers_email' ]
interviewers_email = interviewers_email.replace ( ',', ' ' ).strip ( ).split ( )
# verify same interviewer is not provided more than once
# if any interviewer is requested more than once, we choose unique interviewers
unique_interviewers = set ( )
for i_email in interviewers_email:
unique_interviewers.add ( i_email )
return unique_interviewers, ", ".join ( unique_interviewers )
@staticmethod
def find_overlapping_time ( t1, t2 ):
"""
This functions finds overlapping time between two times, if overlapping time is not possible, it returns None, None.
Note: Both t1 and t2 contains a tuple of 2 elements, where each entry in those tuples are in datetime format.
:param t1: (start_time, end_time)
:param t2: (start_time, end_time)
:return: (start_time, end_time)
"""
# t1 = (11:00, 12:00), t2 = (11:30, 12:30) => (11:30, 12:00) Correct
# t1 = (11:30, 12:30), t2 = (11:00, 12:00) => (11:30, 12:00) Correct
# t1 = (11:00, 12:00), t2 = (14:30, 16:30) => (14:30, 12:00) Wrong
start_time = max ( t1[ 0 ], t2[ 0 ] )
end_time = min ( t1[ 1 ], t2[ 1 ] )
if start_time >= end_time: return None, None
return start_time, end_time
@staticmethod
def convert_datetime_to_str ( user_date_time ):
"""
This function converts 24 hours datetime object to string
:param user_date_time: datetime object in HH:MM format
:return: string format of datetime in HH:MM format
"""
return user_date_time.strftime ( "%H:%M" )
@staticmethod
def convert_str_to_datetime ( time_str ):
"""
This function converts 24 hours string time to datetime object
:param time_str:time in string (HH:MM format)
:return: time in datetime object in HH:MM format
"""
return datetime.datetime.strptime ( time_str, "%H:%M" )
def find_common_schedule_time ( self, schedule_1, schedule_2 ):
"""
This function finds the common schedule between two given schedules.
If common schedule is not possible, it returns empty dictionary
:param schedule_1: {day: [(start_time, end_time)]}
:param schedule_2: {day: [(start_time, end_time)]}
:return: {day: [(start_time, end_time)]}
"""
common_schedule = defaultdict ( lambda: [ ] )
for day in schedule_1:
times_1 = schedule_1[ day ]
times_2 = schedule_2.get ( day )
if times_2 is None: continue
for t1 in times_1:
for t2 in times_2:
o_start_time, o_end_time = self.find_overlapping_time ( t1, t2 )
if o_start_time is not None and \
(o_start_time, o_end_time) not in common_schedule[ day ]:
common_schedule[ day ].append ( (o_start_time, o_end_time) )
return common_schedule
def calculate_schedule_interview ( self ):
"""
This function fetches candidate and interviewers email ids and their corresponding availabilities.
Based on their availabilities, it schedule an appointment with interviewers and candidate.
If such appointment is successfully scheduled, then it removes the appointment time slot
from interviewers availabilities and finally writes the appointment result in Schedule db.
In case of any error, it provides the reason and fix of issue to the user.
:return: Json reply and http status code
"""
status_code, data = self.verify_post_data ( )
if status_code != status.HTTP_200_OK:
return jsonify ( data ), status_code
candidate_email = data[ "candidate_email" ]
interviewers_email = data[ "interviewers_email" ]
interviewers_email_str = data[ "interviewers_email_str" ]
# fetching first candidate object from db matching candidate_email
candidate = self.candidate_logic.fetch_candidate_obj_from_db ( candidate_email )
# checking if candidate exist, if not then just exit
if not candidate:
error_msg = "Candidate " + candidate_email + " does not exist"
return jsonify (
{"Error": error_msg, "Fix": "Create schedule entry for candidate first"} ), status.HTTP_403_FORBIDDEN
# if candidate is already scheduled then no more processing and just return the result
interview_schedule = InterviewSchedule.query.filter_by ( candidate_email=candidate_email ).first ( )
if interview_schedule: # interview is already scheduled
return self.interview_schema.jsonify ( interview_schedule ), status.HTTP_200_OK
# fetch time slot choices saved in CandidateSchedule table
candidate_schedules = self.candidate_logic.fetch_candidate_schedule_objs_from_db ( candidate.id )
common_schedule = defaultdict ( lambda: [ ] ) # {day: [(start_time, end_time)]}
for c_schedule in candidate_schedules:
day = c_schedule.day
start_time = self.convert_str_to_datetime ( c_schedule.start_time )
end_time = self.convert_str_to_datetime ( c_schedule.end_time )
common_schedule[ day ].append ( (start_time, end_time) )
# fetching schedule of each interviewer
for i_email in interviewers_email:
interviewer = self.emp_logic.fetch_emp_obj_from_db ( i_email )
# checking if interviewer exist, if not then just exit
if not interviewer:
error_msg = "Interviewer " + i_email + " does not exist in company"
return jsonify ( {"Error": error_msg, "Fix": "Hire that Guy first:)"} ), status.HTTP_403_FORBIDDEN
interviewer_schedules = self.emp_logic.fetch_emp_schedule_objs_from_db ( interviewer.id )
fetched_interviewer_schedules = defaultdict ( lambda: [ ] ) # {day: [(start_time, end_time)]}
for i_schedule in interviewer_schedules:
day = i_schedule.day
start_time = self.convert_str_to_datetime ( i_schedule.start_time )
end_time = self.convert_str_to_datetime ( i_schedule.end_time )
fetched_interviewer_schedules[ day ].append ( (start_time, end_time) )
# finding common time for each day between this interviewer and candidate
# and store that result in common_schedule
common_schedule = self.find_common_schedule_time (
common_schedule,
fetched_interviewer_schedules
)
# If candidate schedule becomes empty, means no scheduling possible anymore.
# Therefore no point in doing more processing with other interviewers schedules.
if not common_schedule:
error_msg = "For next week, interview scheduling is not possible for " + candidate_email
return jsonify (
{"Error": error_msg, "Fix": "Enter new availabilities for candidate"} ), status.HTTP_403_FORBIDDEN
# Now common_schedule has the common possible schedule
# We choose a random entry from common_schedule dictionary
common_schedule = common_schedule.items ( )
# [
# ( day, [ (start1, end1), (start2, end2) ] )
# ]
interview_day = common_schedule[ 0 ][ 0 ]
interview_start_time = common_schedule[ 0 ][ 1 ][ 0 ][ 0 ]
interview_end_time = common_schedule[ 0 ][ 1 ][ 0 ][ 1 ]
# update interviewers schedules
self.update_interviewers_timeslots ( interviewers_email,
interview_day,
interview_start_time, interview_end_time )
# create an instance of schedule with interview_day, start and end_time, candidate_email, interviewer emails
interview_schedule = self.prepare_interview_schedule_instance (
interview_day,
interview_start_time, interview_end_time,
candidate_email,
interviewers_email_str
)
# save the result in schedule db
self.commit_into_db ( interview_schedule )
# return json reply
return self.interview_schema.jsonify ( interview_schedule ), status.HTTP_200_OK
def prepare_interview_schedule_instance ( self, interview_day,
interview_start_time, interview_end_time,
candidate_email, interviewers_email ):
"""
This function creates an instance of InterviewSchedule db and returns such.
:param interview_day: day of the week in str format
:param interview_start_time: 24 hours time in datetime format
:param interview_end_time: 24 hours time in datetime format
:param candidate_email: email of candidate in str format
:param interviewers_email: emails of interviewers in str format
:return: an instance of InterviewSchedule db
"""
interview_schedule = InterviewSchedule (
day=interview_day,
start_time=self.convert_datetime_to_str ( interview_start_time ),
end_time=self.convert_datetime_to_str ( interview_end_time ),
candidate_email=candidate_email,
interviewers_emails=interviewers_email
)
return interview_schedule
def update_interviewers_timeslots ( self,
interviewers_email,
interview_day, interview_start_time, interview_end_time
):
"""
Once an appointment is booked for a candidate, deleted the used time slot from interviewers
available timeslots.
:param interviewers_email: set of interviewer emails in str format
:param interview_day: string format of the day in a week
:param interview_start_time: datetime format in 24 hours
:param interview_end_time: datetime format in 24 hours
:return:
"""
for i_email in interviewers_email:
interviewer = self.emp_logic.fetch_emp_obj_from_db ( i_email )
interviewer_schedules = self.emp_logic.fetch_emp_schedule_objs_from_db ( interviewer.id )
for i_schedule in interviewer_schedules:
day = i_schedule.day
if day != interview_day: continue
start_time = self.convert_str_to_datetime ( i_schedule.start_time )
end_time = self.convert_str_to_datetime ( i_schedule.end_time )
if start_time <= interview_start_time and interview_end_time <= end_time:
self.commit_into_db ( i_schedule, add_operation=False )
break
def get_schedule_interviews ( self ):
"""
This function returns this list of candidate and their scheduled appointment with interviewers
:return: The calculated schedules of interviews for next week and http status code
"""
all_schedule = InterviewSchedule.query.all ( )
result = self.interviews_schema.dump ( all_schedule )
return jsonify ( {"scheduled_interviews": result.data} ), status.HTTP_200_OK
def reset_scheduler_dbs_for_next_week ( self ):
"""
This function resets all the dbs and thus prepares the scheduler application for next week.
:return: Json reply and http status code
"""
# Note: Order of execution matters for the sake of db integrity (Just to be holistic).
# Don't worry, other order of below execution will not have impact on the application functionality.
if request.method == 'GET':
return "To reset dbs, send an empty post to the same URI"
from setting import db
db.session.query ( InterviewSchedule ).delete ( )
self.emp_logic.reset_employees_data ( )
self.candidate_logic.reset_candidates_data ( )
self.commit_into_db ( content=None, add_operation=False )
return jsonify ( {
"Status": "Content of previous week is deleted from DBs. Application is now ready for use."} ), status.HTTP_200_OK
|
the-stack_106_14168
|
"""
day 8 of Advent of Code 2018
by Stefan Kruger
"""
class DataProvider:
def __init__(self, data):
self.cursor = 0
self.data = data
@classmethod
def read_file(cls, filename="data/input8.data"):
with open(filename) as f:
return cls([int(item) for item in f.read().split(" ")])
def next(self):
self.cursor += 1
return self.data[self.cursor - 1]
class Node:
def __init__(self, children, metadata):
self.children = children
self.metadata = metadata
@classmethod
def make_node(cls, data):
node_count = data.next()
meta_count = data.next()
children = []
metadata = []
for _ in range(node_count):
children.append(cls.make_node(data))
for _ in range(meta_count):
metadata.append(data.next())
return cls(children, metadata)
def meta_sum(self):
s = sum(self.metadata)
for c in self.children:
s += c.meta_sum()
return s
def value(self):
"""
If a node has no child nodes, its value is the sum of its metadata
entries.
However, if a node does have child nodes, the metadata entries become
indexes which refer to those child nodes. A metadata entry of 1 refers
to the first child node, 2 to the second, 3 to the third, and so on.
The value of this node is the sum of the values of the child nodes
referenced by the metadata entries. If a referenced child node does not
exist, that reference is skipped. A child node can be referenced
multiple time and counts each time it is referenced. A metadata entry
of 0 does not refer to any child node.
"""
if not self.children:
return sum(self.metadata)
v = 0
for index in self.metadata:
child = index - 1
if child < 0 or child >= len(self.children):
continue
v += self.children[child].value()
return v
if __name__ == "__main__":
data = DataProvider.read_file()
# data = DataProvider([2, 3, 0, 3, 10, 11, 12, 1, 1, 0, 1, 99, 2, 1, 1, 2])
tree = Node.make_node(data)
print(f'Part1: {tree.meta_sum()}')
print(f'Part2: {tree.value()}')
|
the-stack_106_14169
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class InstantItemRecoveryTarget(Model):
"""Target details for file / folder restore.
:param client_scripts: List of client scripts.
:type client_scripts: list of :class:`ClientScriptForConnect
<azure.mgmt.recoveryservicesbackup.models.ClientScriptForConnect>`
"""
_attribute_map = {
'client_scripts': {'key': 'clientScripts', 'type': '[ClientScriptForConnect]'},
}
def __init__(self, client_scripts=None):
self.client_scripts = client_scripts
|
the-stack_106_14170
|
import random
from logics.classes.propositional import Formula, Inference
from logics.classes.exceptions import FormulaGeneratorError
class BiasedPropositionalGenerator:
"""A biased random propositional generator.
It is biased because it will not return every formula/inference within the space asked with the same probability.
On the other hand, it is *much* faster than its non-biased counterpart.
Examples
--------
This generator takes no parameters, so to instantiate one you can simply do:
>>> from logics.utils.formula_generators.generators_biased import BiasedPropositionalGenerator
>>> random_generator = BiasedPropositionalGenerator()
There is also a predefined instance so there is even no need to instantiate it
>>> from logics.utils.formula_generators.generators_biased import random_formula_generator
"""
# ------------------------------------------------------------------------------------------------------------------
# FORMULAE
def random_formula(self, depth, atomics, language, exact_depth=True, all_atomics=False):
"""Generates a random formula for the language given.
Parameters
----------
depth: int
A positive integer, representing the depth of the formula to obtain
atomics: list of str
The sublist of atomics of the language that the formula will be built of
language: logics.classes.propositional.Language or logics.classes.propositional.InfiniteLanguage
Instance of Language or InfiniteLanguage
exact_depth: bool
If true, the resulting formula will have *exactly* the depth given. Otherwise, will have *up to* that depth.
Defaults to True.
all_atomics: bool
If true, the resulting formula will contain *all* the atomics given. Otherwise, can contain *some* of them.
Defaults to False.
Returns
-------
logics.classes.propositional.Formula
A randomly generated formula of the given depth, containing some or all the atomics
Raises
------
NotImplemented
If exact_depth is False and all_atomics is True
ValueError
If exact_depth is True, all_atomics is True and the number of atomics given > the maximum arity constant of
the language ** depth.
For example, it is not possible to build a formula of depth 1 with 3 atomics in a language that has
connectives of at most arity 2.
Examples
--------
Exact depth and not all atomics
>>> from logics.instances.propositional.languages import classical_language
>>> from logics.utils.formula_generators.generators_biased import random_formula_generator
>>> random_formula_generator.random_formula(depth=3, atomics=['p', 'q', 'r'],
... language=classical_language,
... exact_depth=True, all_atomics=False)
['→', ['→', ['p'], ['↔', ['r'], ['p']]], ['~', ['r']]]
>>> random_formula_generator.random_formula(depth=3, atomics=['p', 'q', 'r'],
... language=classical_language,
... exact_depth=True, all_atomics=False)
['↔', ['∨', ['~', ['r']], ['r']], ['↔', ['p'], ['q']]]
Not exact depth and not all atomics
>>> random_formula_generator.random_formula(depth=3, atomics=['p', 'q', 'r'],
... language=classical_language,
... exact_depth=False, all_atomics=False)
['∨', ['r'], ['q']]
>>> random_formula_generator.random_formula(depth=3, atomics=['p', 'q', 'r'],
... language=classical_language,
... exact_depth=False, all_atomics=False)
['→', ['r'], ['∨', ['p'], ['r']]]
Exact depth and all atomics
>>> random_formula_generator.random_formula(depth=3, atomics=['p', 'q', 'r'],
... language=classical_language,
... exact_depth=True, all_atomics=True)
['~', ['∧', ['∧', ['r'], ['q']], ['p']]]
>>> random_formula_generator.random_formula(depth=3, atomics=['p', 'q', 'r'],
... language=classical_language,
... exact_depth=True, all_atomics=True)
['∨', ['∨', ['∧', ['q'], ['r']], ['→', ['r'], ['q']]], ['p']]
"""
if exact_depth and all_atomics:
return self._exact_depth_all_atomics(depth, atomics, language)
elif exact_depth and not all_atomics:
return self._exact_depth_some_atomics(depth, atomics, language)
elif not exact_depth and not all_atomics:
return self._upto_depth_some_atomics(depth, atomics, language)
else:
raise NotImplemented('This method does not yet accept a non-exact depth and all atomics')
def _exact_depth_some_atomics(self, depth, atomics, language):
"""Generates a random formula of some *exact* depth, which includes *some* of the atomics given."""
if depth == 0:
return Formula([random.choice(atomics)])
else:
constant = random.choice(tuple(language.constants()))
arity = language.arity(constant)
formula = Formula([constant])
formula.extend([None] * arity) # By now, formula is something like ['^', None, None]
# Randomly choose an index and put a formula of depth - 1 there
# (to ensure the formula reaches the depth given)
i = random.randint(1, arity) # index 0 is the constant
formula[i] = self._exact_depth_some_atomics(depth-1, atomics, language)
# In the rest of the arguments put a formula of random depth
for x in set(range(1, arity+1)) - {i}:
j = random.randint(0, depth-1)
formula[x] = self._exact_depth_some_atomics(j, atomics, language)
return formula
def _upto_depth_some_atomics(self, depth, atomics, language):
"""Generates a random formula of *up to* some depth, which includes *some* of the atomics given.
Works very similarly to the method above
"""
# Choose a depth and then call the previous function
chosen_depth = random.randint(0, depth)
return self._exact_depth_some_atomics(chosen_depth, atomics, language)
def _exact_depth_all_atomics(self, depth, atomics, language):
"""Generates a random formula of some *exact* depth, which includes *all* of the atomics given."""
# Brute force approach, will use g_ES and then check if all the atomics are present
# Tries up to 100 times, then raises an RuntimeError exception
# Should be possible to build a better function (see below).
# Validate that the request is possible
if len(atomics) > max(language.constant_arity_dict.values()) ** depth:
raise ValueError("len(atomics) can be at most the maximum arity of a constant of the language ** depth")
for x in range(100):
acceptable = True
formula = self._exact_depth_some_atomics(depth, atomics, language)
for at in atomics:
if at not in str(formula):
acceptable = False
if acceptable:
return formula
raise FormulaGeneratorError('Could not generate a formula with the desired parameters')
def exact_depth_all_atomics2(self, depth, atomics, language):
# todo FIX THIS - for depth 1 and ['p', 'q'] the first part can return ~['$']
# Validate that the request is possible
if len(atomics) > max(language.constant_arity_dict.values()) ** depth:
raise ValueError("len(atomics) can be at most the maximum arity of a constant of the language ** depth")
# Generate a formula with placeholders where the atomics will be
formula_with_placeholders = self._exact_depth_some_atomics(depth, ['$'], language)
# For each atomic, replace at least one placeholder for it (so that every atomic appears at least once)
formula_string = str(formula_with_placeholders)
placeholder_indexes = [x for x in range(len(formula_string)) if formula_string[x] == '$']
for atomic in atomics:
# Choose an index and remove it from the list
chosen_index = random.choice(placeholder_indexes)
placeholder_indexes.remove(chosen_index)
# Replace the index with the atomic
formula_string = formula_string[:chosen_index] + atomic + formula_string[chosen_index+1:]
# The remaining placeholders replace them with a random choice of atomics
for index in placeholder_indexes:
chosen_atomic = random.choice(atomics)
formula_string = formula_string[:index] + chosen_atomic + formula_string[index+1:]
formula = Formula(eval(formula_string))
return formula
# ------------------------------------------------------------------------------------------------------------------
# INFERENCE
def random_inference(self, num_premises, num_conclusions, max_depth, atomics, language, level=1,
exact_num_premises=True, exact_num_conclusions=True):
"""Generates a random Inference.
Takes a number of premises and of conclusions (either exact or up to, see the `exact_num_premises` and
`exact_num_conclusions` parameters below), and populates them with formuale of *up to* the given `max_depth`
and *some* of the given `atomics`.
Can also be used to generate metainferences, if given a `level` > 1. In that case, the inferences in the
premises / conclusions will have *up to* that number of premises and conclusions. That is, if asked for an
inference of level 2 with exactly two premises, the premises will themselves be inferences, and they may contain
less than two premises.
Parameters
----------
num_premises: int
The exact number of premises the inference will contain
num_conclusions: int
The exact number of conclusions the inference will contain
max_depth: int
The maximum depth that the formulae within the inference can contain
atomics: list of str
The sublist of atomics of the language that the formulae within the inference will be built of
language: logics.classes.propositional.Language or logics.classes.propositional.InfiniteLanguage
Instance of Language or InfiniteLanguage
level: int, optional
Level of the inference to be generated (1 is regular inference, 2 is metainference, 3 is metameta, ...).
level is 1 by default.
exact_num_premises: bool
If True, the resulting inference will contain will contain *exactly* that number of premises, otherwise will
contain *up to* that number of premises. Defaults to True.
exact_num_conclusions: bool
If True, the resulting inference will contain will contain *exactly* that number of premises, otherwise will
contain *up to* that number of premises. Defaults to True.
Returns
-------
logics.classes.propositional.Inference
A randomly generated inference of the given parameters.
Examples
--------
Random inferences with exactly two premises and one conclusion:
>>> from logics.instances.propositional.languages import classical_language
>>> from logics.utils.parsers import classical_parser
>>> from logics.utils.formula_generators.generators_biased import random_formula_generator
>>> inf = random_formula_generator.random_inference(num_premises=2, num_conclusions=1,
... max_depth=2, atomics=['p', 'q'],
... language=classical_language, level=1,
... exact_num_premises=True,
... exact_num_conclusions=True)
>>> classical_parser.unparse(inf)
'(p ∧ p), ((q ∨ q) ∧ (p ∨ p)) / ~q'
>>> inf = random_formula_generator.random_inference(num_premises=2, num_conclusions=1,
... max_depth=2, atomics=['p', 'q'],
... language=classical_language, level=1,
... exact_num_premises=True,
... exact_num_conclusions=True)
>>> classical_parser.unparse(inf)
'~(q ↔ q), q / (q ∧ (p → p))'
Random inferences with up to two premises and two conclusions
>>> inf = random_formula_generator.random_inference(num_premises=2, num_conclusions=2,
... max_depth=2, atomics=['p', 'q'],
... language=classical_language, level=1,
... exact_num_premises=False,
... exact_num_conclusions=False)
>>> classical_parser.unparse(inf)
'(p ↔ p) / ~q'
>>> inf = random_formula_generator.random_inference(num_premises=2, num_conclusions=2,
... max_depth=2, atomics=['p', 'q'],
... language=classical_language, level=1,
... exact_num_premises=False,
... exact_num_conclusions=False)
>>> classical_parser.unparse(inf)
'/'
Random metainferences with up to two premises and two conclusions
>>> inf = random_formula_generator.random_inference(num_premises=2, num_conclusions=2,
... max_depth=2, atomics=['p', 'q'],
... language=classical_language, level=2,
... exact_num_premises=False,
... exact_num_conclusions=False)
>>> classical_parser.unparse(inf)
'((q → (q → q)) /), ((p ↔ p), (q ∧ p) / (q ∨ q), p) //'
>>> inf = random_formula_generator.random_inference(num_premises=2, num_conclusions=2,
... max_depth=2, atomics=['p', 'q'],
... language=classical_language, level=2,
... exact_num_premises=False,
... exact_num_conclusions=False)
>>> classical_parser.unparse(inf)
'(/) //'
"""
if level == 0:
raise ValueError('Inferences cannot have level 0')
# Premises
premises = list()
new_num_premises = num_premises
if not exact_num_premises:
new_num_premises = random.randint(0, num_premises)
for _ in range(new_num_premises):
if level == 1:
premises.append(self._upto_depth_some_atomics(max_depth, atomics, language))
else:
premises.append(self.random_inference(num_premises, num_conclusions, max_depth, atomics, language,
level=level-1, exact_num_premises=False,
exact_num_conclusions=False))
# Conclusions
conclusions = list()
new_num_conclusions = num_conclusions
if not exact_num_conclusions:
new_num_conclusions = random.randint(0, num_conclusions)
for _ in range(new_num_conclusions):
if level == 1:
conclusions.append(self._upto_depth_some_atomics(max_depth, atomics, language))
else:
conclusions.append(self.random_inference(num_premises, num_conclusions,
max_depth, atomics, language, level=level-1))
return Inference(premises=premises, conclusions=conclusions, level=level)
# ------------------------------------------------------------------------------------------------------------------
# THINGS WITH VALIDITY APPARATUSES
def random_tautology(self, depth, atomics, language, validity_apparatus, exact_depth=True, all_atomics=False,
attempts=100):
"""Generates a random tautogy for the validity apparatus given.
Will generate a random formula and then test the inference ([] / [formula]) to see if it is valid. If it is,
returns it, otherwise generates another. Can fail for at most the number of `attempts` given.
The signature is very similar to that of random_formula but takes two extra parameters:
* validity_apparatus: any object with an is_valid method (which takes an inference as argument)
* attempts: an `int`, maximum number of formulae it generates and tests (defaults to 100)
Returns
-------
logics.classes.propositional.Formula
A randomly generated tautology of the given depth, containing some or all the atomics
Raises
------
NotImplemented
If exact_depth is False and all_atomics is True
ValueError
If exact_depth is True, all_atomics is True and the number of atomics given > the maximum arity constant of
the language ** depth.
FormulaGeneratorError
If it cannot find a tautology in the given number of `attempts`
Examples
--------
>>> from logics.instances.propositional.languages import classical_language
>>> from logics.instances.propositional.many_valued_semantics import classical_mvl_semantics
>>> from logics.utils.formula_generators.generators_biased import random_formula_generator
>>> random_formula_generator.random_tautology(depth=2, atomics=['p', 'q'],
... language=classical_language,
... validity_apparatus=classical_mvl_semantics)
['→', ['∧', ['q'], ['p']], ['∧', ['q'], ['q']]]
>>> random_formula_generator.random_tautology(depth=2, atomics=['p', 'q'],
... language=classical_language,
... validity_apparatus=classical_mvl_semantics)
['→', ['p'], ['∨', ['p'], ['p']]]
"""
for _ in range(attempts):
if exact_depth and all_atomics:
formula = self._exact_depth_all_atomics(depth, atomics, language)
elif exact_depth and not all_atomics:
formula = self._exact_depth_some_atomics(depth, atomics, language)
elif not exact_depth and not all_atomics:
formula = self._upto_depth_some_atomics(depth, atomics, language)
else:
raise NotImplemented('Not exact depth and all atomics generator not implemented yet')
inf = Inference(premises=[], conclusions=[formula])
if validity_apparatus.is_valid(inf):
return formula
raise FormulaGeneratorError('Could not generate tautology with the parameters given')
def random_valid_inference(self, num_premises, num_conclusions, max_depth, atomics, language, validity_apparatus,
level=1, exact_num_premises=True, exact_num_conclusions=True, attempts=100):
"""Generates a random *valid* inference for the validity apparatus given.
Similar to the random_inference method. The two new parameters are to the method above.
In particular, The object passed to `validity_apparatus` must have an `is_valid` method.
If `level` > 1 the validity apparatus must be able to handle metainferential validity.
Returns
-------
logics.classes.propositional.Inference
A randomly generated valid inference or metainference with the parameters given
Raises
------
FormulaGeneratorError
If it cannot find a valid inference in the given number of `attempts`
Examples
--------
>>> from logics.instances.propositional.languages import classical_language
>>> from logics.utils.parsers import classical_parser
>>> from logics.instances.propositional.many_valued_semantics import classical_mvl_semantics
>>> from logics.utils.formula_generators.generators_biased import random_formula_generator
>>> inf = random_formula_generator.random_valid_inference(num_premises=2, num_conclusions=1,
... max_depth=2, atomics=['p', 'q'],
... language=classical_language, level=1,
... validity_apparatus=classical_mvl_semantics)
>>> classical_parser.unparse(inf)
'(q ↔ p), p / (p ∨ q)'
>>> inf = random_formula_generator.random_valid_inference(num_premises=2, num_conclusions=1,
... max_depth=2, atomics=['p', 'q'],
... language=classical_language, level=2,
... validity_apparatus=classical_mvl_semantics)
>>> classical_parser.unparse(inf)
'(p / p), (p, ((q → p) ∧ (p ∨ p)) / (p ↔ (q ↔ p))) // (((p ↔ p) ∨ p), ~q / (q ∨ ~p))'
"""
for _ in range(attempts):
inf = self.random_inference(num_premises, num_conclusions, max_depth, atomics, language, level,
exact_num_premises, exact_num_conclusions)
if validity_apparatus.is_valid(inf):
return inf
raise FormulaGeneratorError('Could not generate valid inference with the parameters given')
def random_invalid_inference(self, num_premises, num_conclusions, max_depth, atomics, language, validity_apparatus,
level=1, exact_num_premises=True, exact_num_conclusions=True, attempts=100):
"""Generates a random *invalid* inference for the validity apparatus given.
Identical to the method above, only that it returns an *invalid* inference.
"""
for _ in range(attempts):
inf = self.random_inference(num_premises, num_conclusions, max_depth, atomics, language, level,
exact_num_premises, exact_num_conclusions)
if not validity_apparatus.is_valid(inf):
return inf
raise FormulaGeneratorError('Could not generate valid inference with the parameters given')
random_formula_generator = BiasedPropositionalGenerator()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.